trie: replace bigcache with fastcache (#19971)

This commit is contained in:
gary rong
2019-11-25 16:58:15 +08:00
committed by Péter Szilágyi
parent f5a68a40bf
commit 9d67222f4e
3 changed files with 20 additions and 17 deletions

View File

@ -25,7 +25,7 @@ import (
"sync"
"time"
"github.com/allegro/bigcache"
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
@ -69,7 +69,7 @@ const secureKeyLength = 11 + 32
type Database struct {
diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes
cleans *bigcache.BigCache // GC friendly memory cache of clean node RLPs
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes
oldest common.Hash // Oldest tracked node, flush-list head
newest common.Hash // Newest tracked node, flush-list tail
@ -296,16 +296,9 @@ func NewDatabase(diskdb ethdb.KeyValueStore) *Database {
// before its written out to disk or garbage collected. It also acts as a read cache
// for nodes loaded from disk.
func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int) *Database {
var cleans *bigcache.BigCache
var cleans *fastcache.Cache
if cache > 0 {
cleans, _ = bigcache.NewBigCache(bigcache.Config{
Shards: 1024,
LifeWindow: time.Hour,
MaxEntriesInWindow: cache * 1024,
MaxEntrySize: 512,
HardMaxCacheSize: cache,
Hasher: trienodeHasher{},
})
cleans = fastcache.New(cache * 1024 * 1024)
}
return &Database{
diskdb: diskdb,
@ -381,7 +374,7 @@ func (db *Database) insertPreimage(hash common.Hash, preimage []byte) {
func (db *Database) node(hash common.Hash) node {
// Retrieve the node from the clean cache if available
if db.cleans != nil {
if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {
if enc := db.cleans.Get(nil, hash[:]); enc != nil {
memcacheCleanHitMeter.Mark(1)
memcacheCleanReadMeter.Mark(int64(len(enc)))
return mustDecodeNode(hash[:], enc)
@ -401,7 +394,7 @@ func (db *Database) node(hash common.Hash) node {
return nil
}
if db.cleans != nil {
db.cleans.Set(string(hash[:]), enc)
db.cleans.Set(hash[:], enc)
memcacheCleanMissMeter.Mark(1)
memcacheCleanWriteMeter.Mark(int64(len(enc)))
}
@ -417,7 +410,7 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
}
// Retrieve the node from the clean cache if available
if db.cleans != nil {
if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {
if enc := db.cleans.Get(nil, hash[:]); enc != nil {
memcacheCleanHitMeter.Mark(1)
memcacheCleanReadMeter.Mark(int64(len(enc)))
return enc, nil
@ -435,7 +428,7 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
enc, err := db.diskdb.Get(hash[:])
if err == nil && enc != nil {
if db.cleans != nil {
db.cleans.Set(string(hash[:]), enc)
db.cleans.Set(hash[:], enc)
memcacheCleanMissMeter.Mark(1)
memcacheCleanWriteMeter.Mark(int64(len(enc)))
}
@ -832,7 +825,7 @@ func (c *cleaner) Put(key []byte, rlp []byte) error {
}
// Move the flushed node into the clean cache to prevent insta-reloads
if c.db.cleans != nil {
c.db.cleans.Set(string(hash[:]), rlp)
c.db.cleans.Set(hash[:], rlp)
}
return nil
}