core: implement background trie prefetcher
Squashed from the following commits: core/state: lazily init snapshot storage map core/state: fix flawed meter on storage reads core/state: make statedb/stateobjects reuse a hasher core/blockchain, core/state: implement new trie prefetcher core: make trie prefetcher deliver tries to statedb core/state: refactor trie_prefetcher, export storage tries blockchain: re-enable the next-block-prefetcher state: remove panics in trie prefetcher core/state/trie_prefetcher: address some review concerns sq
This commit is contained in:
committed by
Péter Szilágyi
parent
93a89b2681
commit
1e1865b73f
@ -157,11 +157,20 @@ func (s *stateObject) touch() {
|
||||
|
||||
func (s *stateObject) getTrie(db Database) Trie {
|
||||
if s.trie == nil {
|
||||
var err error
|
||||
s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root)
|
||||
if err != nil {
|
||||
s.trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{})
|
||||
s.setError(fmt.Errorf("can't create storage trie: %v", err))
|
||||
// Try fetching from prefetcher first
|
||||
// We don't prefetch empty tries
|
||||
if s.data.Root != emptyRoot && s.db.prefetcher != nil {
|
||||
// When the miner is creating the pending state, there is no
|
||||
// prefetcher
|
||||
s.trie = s.db.prefetcher.GetTrie(s.data.Root)
|
||||
}
|
||||
if s.trie == nil {
|
||||
var err error
|
||||
s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root)
|
||||
if err != nil {
|
||||
s.trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{})
|
||||
s.setError(fmt.Errorf("can't create storage trie: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.trie
|
||||
@ -197,12 +206,24 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
||||
}
|
||||
// If no live objects are available, attempt to use snapshots
|
||||
var (
|
||||
enc []byte
|
||||
err error
|
||||
enc []byte
|
||||
err error
|
||||
meter *time.Duration
|
||||
)
|
||||
readStart := time.Now()
|
||||
if metrics.EnabledExpensive {
|
||||
// If the snap is 'under construction', the first lookup may fail. If that
|
||||
// happens, we don't want to double-count the time elapsed. Thus this
|
||||
// dance with the metering.
|
||||
defer func() {
|
||||
if meter != nil {
|
||||
*meter += time.Since(readStart)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if s.db.snap != nil {
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.db.SnapshotStorageReads += time.Since(start) }(time.Now())
|
||||
meter = &s.db.SnapshotStorageReads
|
||||
}
|
||||
// If the object was destructed in *this* block (and potentially resurrected),
|
||||
// the storage has been cleared out, and we should *not* consult the previous
|
||||
@ -217,8 +238,14 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
||||
}
|
||||
// If snapshot unavailable or reading from it failed, load from the database
|
||||
if s.db.snap == nil || err != nil {
|
||||
if meter != nil {
|
||||
// If we already spent time checking the snapshot, account for it
|
||||
// and reset the readStart
|
||||
*meter += time.Since(readStart)
|
||||
readStart = time.Now()
|
||||
}
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.db.StorageReads += time.Since(start) }(time.Now())
|
||||
meter = &s.db.StorageReads
|
||||
}
|
||||
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
|
||||
s.setError(err)
|
||||
@ -283,8 +310,13 @@ func (s *stateObject) setState(key, value common.Hash) {
|
||||
// finalise moves all dirty storage slots into the pending area to be hashed or
|
||||
// committed later. It is invoked at the end of every transaction.
|
||||
func (s *stateObject) finalise() {
|
||||
trieChanges := make([]common.Hash, 0, len(s.dirtyStorage))
|
||||
for key, value := range s.dirtyStorage {
|
||||
s.pendingStorage[key] = value
|
||||
trieChanges = append(trieChanges, key)
|
||||
}
|
||||
if len(trieChanges) > 0 && s.db.prefetcher != nil && s.data.Root != emptyRoot {
|
||||
s.db.prefetcher.PrefetchStorage(s.data.Root, trieChanges)
|
||||
}
|
||||
if len(s.dirtyStorage) > 0 {
|
||||
s.dirtyStorage = make(Storage)
|
||||
@ -303,18 +335,11 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
|
||||
}
|
||||
// Retrieve the snapshot storage map for the object
|
||||
// The snapshot storage map for the object
|
||||
var storage map[common.Hash][]byte
|
||||
if s.db.snap != nil {
|
||||
// Retrieve the old storage map, if available, create a new one otherwise
|
||||
storage = s.db.snapStorage[s.addrHash]
|
||||
if storage == nil {
|
||||
storage = make(map[common.Hash][]byte)
|
||||
s.db.snapStorage[s.addrHash] = storage
|
||||
}
|
||||
}
|
||||
// Insert all the pending updates into the trie
|
||||
tr := s.getTrie(db)
|
||||
hasher := s.db.hasher
|
||||
for key, value := range s.pendingStorage {
|
||||
// Skip noop changes, persist actual changes
|
||||
if value == s.originStorage[key] {
|
||||
@ -331,8 +356,15 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
||||
s.setError(tr.TryUpdate(key[:], v))
|
||||
}
|
||||
// If state snapshotting is active, cache the data til commit
|
||||
if storage != nil {
|
||||
storage[crypto.Keccak256Hash(key[:])] = v // v will be nil if value is 0x00
|
||||
if s.db.snap != nil {
|
||||
if storage == nil {
|
||||
// Retrieve the old storage map, if available, create a new one otherwise
|
||||
if storage = s.db.snapStorage[s.addrHash]; storage == nil {
|
||||
storage = make(map[common.Hash][]byte)
|
||||
s.db.snapStorage[s.addrHash] = storage
|
||||
}
|
||||
}
|
||||
storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00
|
||||
}
|
||||
}
|
||||
if len(s.pendingStorage) > 0 {
|
||||
|
Reference in New Issue
Block a user