core, eth/downloader: commit block data using batches (#15115)
* ethdb: add Putter interface and Has method * ethdb: improve docs and add IdealBatchSize * ethdb: remove memory batch lock Batches are not safe for concurrent use. * core: use ethdb.Putter for Write* functions This covers the easy cases. * core/state: simplify StateSync * trie: optimize local node check * ethdb: add ValueSize to Batch * core: optimize HasHeader check This avoids one random database read get the block number. For many uses of HasHeader, the expectation is that it's actually there. Using Has avoids a load + decode of the value. * core: write fast sync block data in batches Collect writes into batches up to the ideal size instead of issuing many small, concurrent writes. * eth/downloader: commit larger state batches Collect nodes into a batch up to the ideal size instead of committing whenever a node is received. * core: optimize HasBlock check This avoids a random database read to get the number. * core: use numberCache in HasHeader numberCache has higher capacity, increasing the odds of finding the header without a database lookup. * core: write imported block data using a batch Restore batch writes of state and add blocks, tx entries, receipts to the same batch. The change also simplifies the miner. This commit also removes posting of logs when a forked block is imported. * core: fix DB write error handling * ethdb: use RLock for Has * core: fix HasBlock comment
This commit is contained in:
committed by
Péter Szilágyi
parent
ac193e36ce
commit
10181b57a9
@@ -109,6 +109,10 @@ func (db *LDBDatabase) Put(key []byte, value []byte) error {
|
||||
return db.db.Put(key, value, nil)
|
||||
}
|
||||
|
||||
func (db *LDBDatabase) Has(key []byte) (bool, error) {
|
||||
return db.db.Has(key, nil)
|
||||
}
|
||||
|
||||
// Get returns the given key if it's present.
|
||||
func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
|
||||
// Measure the database get latency, if requested
|
||||
@@ -271,19 +275,19 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove this stuff and expose leveldb directly
|
||||
|
||||
func (db *LDBDatabase) NewBatch() Batch {
|
||||
return &ldbBatch{db: db.db, b: new(leveldb.Batch)}
|
||||
}
|
||||
|
||||
type ldbBatch struct {
|
||||
db *leveldb.DB
|
||||
b *leveldb.Batch
|
||||
db *leveldb.DB
|
||||
b *leveldb.Batch
|
||||
size int
|
||||
}
|
||||
|
||||
func (b *ldbBatch) Put(key, value []byte) error {
|
||||
b.b.Put(key, value)
|
||||
b.size += len(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -291,6 +295,10 @@ func (b *ldbBatch) Write() error {
|
||||
return b.db.Write(b.b, nil)
|
||||
}
|
||||
|
||||
func (b *ldbBatch) ValueSize() int {
|
||||
return b.size
|
||||
}
|
||||
|
||||
type table struct {
|
||||
db Database
|
||||
prefix string
|
||||
@@ -309,6 +317,10 @@ func (dt *table) Put(key []byte, value []byte) error {
|
||||
return dt.db.Put(append([]byte(dt.prefix), key...), value)
|
||||
}
|
||||
|
||||
func (dt *table) Has(key []byte) (bool, error) {
|
||||
return dt.db.Has(append([]byte(dt.prefix), key...))
|
||||
}
|
||||
|
||||
func (dt *table) Get(key []byte) ([]byte, error) {
|
||||
return dt.db.Get(append([]byte(dt.prefix), key...))
|
||||
}
|
||||
@@ -342,3 +354,7 @@ func (tb *tableBatch) Put(key, value []byte) error {
|
||||
func (tb *tableBatch) Write() error {
|
||||
return tb.batch.Write()
|
||||
}
|
||||
|
||||
func (tb *tableBatch) ValueSize() int {
|
||||
return tb.batch.ValueSize()
|
||||
}
|
||||
|
Reference in New Issue
Block a user