core, eth/downloader: commit block data using batches (#15115)
* ethdb: add Putter interface and Has method * ethdb: improve docs and add IdealBatchSize * ethdb: remove memory batch lock Batches are not safe for concurrent use. * core: use ethdb.Putter for Write* functions This covers the easy cases. * core/state: simplify StateSync * trie: optimize local node check * ethdb: add ValueSize to Batch * core: optimize HasHeader check This avoids one random database read get the block number. For many uses of HasHeader, the expectation is that it's actually there. Using Has avoids a load + decode of the value. * core: write fast sync block data in batches Collect writes into batches up to the ideal size instead of issuing many small, concurrent writes. * eth/downloader: commit larger state batches Collect nodes into a batch up to the ideal size instead of committing whenever a node is received. * core: optimize HasBlock check This avoids a random database read to get the number. * core: use numberCache in HasHeader numberCache has higher capacity, increasing the odds of finding the header without a database lookup. * core: write imported block data using a batch Restore batch writes of state and add blocks, tx entries, receipts to the same batch. The change also simplifies the miner. This commit also removes posting of logs when a forked block is imported. * core: fix DB write error handling * ethdb: use RLock for Has * core: fix HasBlock comment
This commit is contained in:
committed by
Péter Szilágyi
parent
ac193e36ce
commit
10181b57a9
@ -304,13 +304,8 @@ func (self *worker) wait() {
|
||||
}
|
||||
go self.mux.Post(core.NewMinedBlockEvent{Block: block})
|
||||
} else {
|
||||
work.state.CommitTo(self.chainDb, self.config.IsEIP158(block.Number()))
|
||||
stat, err := self.chain.WriteBlock(block)
|
||||
if err != nil {
|
||||
log.Error("Failed writing block to chain", "err", err)
|
||||
continue
|
||||
}
|
||||
// update block hash since it is now available and not when the receipt/log of individual transactions were created
|
||||
// Update the block hash in all logs since it is now available and not when the
|
||||
// receipt/log of individual transactions were created.
|
||||
for _, r := range work.receipts {
|
||||
for _, l := range r.Logs {
|
||||
l.BlockHash = block.Hash()
|
||||
@ -319,15 +314,17 @@ func (self *worker) wait() {
|
||||
for _, log := range work.state.Logs() {
|
||||
log.BlockHash = block.Hash()
|
||||
}
|
||||
stat, err := self.chain.WriteBlockAndState(block, work.receipts, work.state)
|
||||
if err != nil {
|
||||
log.Error("Failed writing block to chain", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// check if canon block and write transactions
|
||||
if stat == core.CanonStatTy {
|
||||
// This puts transactions in a extra db for rpc
|
||||
core.WriteTxLookupEntries(self.chainDb, block)
|
||||
// implicit by posting ChainHeadEvent
|
||||
mustCommitNewWork = false
|
||||
}
|
||||
|
||||
// broadcast before waiting for validation
|
||||
go func(block *types.Block, logs []*types.Log, receipts []*types.Receipt) {
|
||||
self.mux.Post(core.NewMinedBlockEvent{Block: block})
|
||||
@ -336,16 +333,12 @@ func (self *worker) wait() {
|
||||
coalescedLogs []*types.Log
|
||||
)
|
||||
events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
|
||||
|
||||
if stat == core.CanonStatTy {
|
||||
events = append(events, core.ChainHeadEvent{Block: block})
|
||||
coalescedLogs = logs
|
||||
}
|
||||
// post blockchain events
|
||||
self.chain.PostChainEvents(events, coalescedLogs)
|
||||
if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
||||
log.Warn("Failed writing block receipts", "err", err)
|
||||
}
|
||||
}(block, work.state.Logs(), work.receipts)
|
||||
}
|
||||
// Insert the block into the set of pending ones to wait for confirmations
|
||||
|
Reference in New Issue
Block a user