core, eth/downloader: commit block data using batches (#15115)
* ethdb: add Putter interface and Has method * ethdb: improve docs and add IdealBatchSize * ethdb: remove memory batch lock Batches are not safe for concurrent use. * core: use ethdb.Putter for Write* functions This covers the easy cases. * core/state: simplify StateSync * trie: optimize local node check * ethdb: add ValueSize to Batch * core: optimize HasHeader check This avoids one random database read get the block number. For many uses of HasHeader, the expectation is that it's actually there. Using Has avoids a load + decode of the value. * core: write fast sync block data in batches Collect writes into batches up to the ideal size instead of issuing many small, concurrent writes. * eth/downloader: commit larger state batches Collect nodes into a batch up to the ideal size instead of committing whenever a node is received. * core: optimize HasBlock check This avoids a random database read to get the number. * core: use numberCache in HasHeader numberCache has higher capacity, increasing the odds of finding the header without a database lookup. * core: write imported block data using a batch Restore batch writes of state and add blocks, tx entries, receipts to the same batch. The change also simplifies the miner. This commit also removes posting of logs when a forked block is imported. * core: fix DB write error handling * ethdb: use RLock for Has * core: fix HasBlock comment
This commit is contained in:
committed by
Péter Szilágyi
parent
ac193e36ce
commit
10181b57a9
@ -138,7 +138,7 @@ func (s *TrieSync) AddRawEntry(hash common.Hash, depth int, parent common.Hash)
|
||||
if _, ok := s.membatch.batch[hash]; ok {
|
||||
return
|
||||
}
|
||||
if blob, _ := s.database.Get(hash.Bytes()); blob != nil {
|
||||
if ok, _ := s.database.Has(hash.Bytes()); ok {
|
||||
return
|
||||
}
|
||||
// Assemble the new sub-trie sync request
|
||||
@ -296,8 +296,7 @@ func (s *TrieSync) children(req *request, object node) ([]*request, error) {
|
||||
if _, ok := s.membatch.batch[hash]; ok {
|
||||
continue
|
||||
}
|
||||
blob, _ := s.database.Get(node)
|
||||
if local, err := decodeNode(node[:], blob, 0); local != nil && err == nil {
|
||||
if ok, _ := s.database.Has(node); ok {
|
||||
continue
|
||||
}
|
||||
// Locally unknown node, schedule for retrieval
|
||||
|
@ -66,6 +66,7 @@ type Database interface {
|
||||
// DatabaseReader wraps the Get method of a backing store for the trie.
|
||||
type DatabaseReader interface {
|
||||
Get(key []byte) (value []byte, err error)
|
||||
Has(key []byte) (bool, error)
|
||||
}
|
||||
|
||||
// DatabaseWriter wraps the Put method of a backing store for the trie.
|
||||
|
Reference in New Issue
Block a user