core: define and test chain rewind corner cases (#21409)
* core: define and test chain reparation cornercases * core: write up a variety of set-head tests * core, eth: unify chain rollbacks, handle all the cases * core: make linter smile * core: remove commented out legacy code * core, eth/downloader: fix review comments * core: revert a removed recovery mechanism
This commit is contained in:
@ -176,8 +176,8 @@ type LightChain interface {
|
||||
// InsertHeaderChain inserts a batch of headers into the local chain.
|
||||
InsertHeaderChain([]*types.Header, int) (int, error)
|
||||
|
||||
// Rollback removes a few recently added elements from the local chain.
|
||||
Rollback([]common.Hash)
|
||||
// SetHead rewinds the local chain to a new head.
|
||||
SetHead(uint64) error
|
||||
}
|
||||
|
||||
// BlockChain encapsulates functions required to sync a (full or fast) blockchain.
|
||||
@ -469,6 +469,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
|
||||
if pivot <= origin {
|
||||
origin = pivot - 1
|
||||
}
|
||||
// Write out the pivot into the database so a rollback beyond it will
|
||||
// reenable fast sync
|
||||
rawdb.WriteLastPivotNumber(d.stateDB, pivot)
|
||||
}
|
||||
}
|
||||
d.committed = 1
|
||||
@ -496,6 +499,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
|
||||
d.ancientLimit = height - fullMaxForkAncestry - 1
|
||||
}
|
||||
frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
|
||||
|
||||
// If a part of blockchain data has already been written into active store,
|
||||
// disable the ancient style insertion explicitly.
|
||||
if origin >= frozen && frozen != 0 {
|
||||
@ -506,11 +510,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
|
||||
}
|
||||
// Rewind the ancient store and blockchain if reorg happens.
|
||||
if origin+1 < frozen {
|
||||
var hashes []common.Hash
|
||||
for i := origin + 1; i < d.lightchain.CurrentHeader().Number.Uint64(); i++ {
|
||||
hashes = append(hashes, rawdb.ReadCanonicalHash(d.stateDB, i))
|
||||
if err := d.lightchain.SetHead(origin + 1); err != nil {
|
||||
return err
|
||||
}
|
||||
d.lightchain.Rollback(hashes)
|
||||
}
|
||||
}
|
||||
// Initiate the sync using a concurrent header and content retrieval algorithm
|
||||
@ -1382,35 +1384,32 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
|
||||
func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
|
||||
// Keep a count of uncertain headers to roll back
|
||||
var (
|
||||
rollback []*types.Header
|
||||
rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis)
|
||||
rollbackErr error
|
||||
mode = d.getMode()
|
||||
)
|
||||
defer func() {
|
||||
if len(rollback) > 0 {
|
||||
// Flatten the headers and roll them back
|
||||
hashes := make([]common.Hash, len(rollback))
|
||||
for i, header := range rollback {
|
||||
hashes[i] = header.Hash()
|
||||
}
|
||||
if rollback > 0 {
|
||||
lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
|
||||
if mode != LightSync {
|
||||
lastFastBlock = d.blockchain.CurrentFastBlock().Number()
|
||||
lastBlock = d.blockchain.CurrentBlock().Number()
|
||||
}
|
||||
d.lightchain.Rollback(hashes)
|
||||
if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
|
||||
// We're already unwinding the stack, only print the error to make it more visible
|
||||
log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
|
||||
}
|
||||
curFastBlock, curBlock := common.Big0, common.Big0
|
||||
if mode != LightSync {
|
||||
curFastBlock = d.blockchain.CurrentFastBlock().Number()
|
||||
curBlock = d.blockchain.CurrentBlock().Number()
|
||||
}
|
||||
log.Warn("Rolled back headers", "count", len(hashes),
|
||||
log.Warn("Rolled back chain segment",
|
||||
"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
|
||||
"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
|
||||
"block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for batches of headers to process
|
||||
gotHeaders := false
|
||||
|
||||
@ -1462,7 +1461,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
}
|
||||
}
|
||||
// Disable any rollback and return
|
||||
rollback = nil
|
||||
rollback = 0
|
||||
return nil
|
||||
}
|
||||
// Otherwise split the chunk of headers into batches and process them
|
||||
@ -1481,15 +1480,9 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
limit = len(headers)
|
||||
}
|
||||
chunk := headers[:limit]
|
||||
|
||||
// In case of header only syncing, validate the chunk immediately
|
||||
if mode == FastSync || mode == LightSync {
|
||||
// Collect the yet unknown headers to mark them as uncertain
|
||||
unknown := make([]*types.Header, 0, len(chunk))
|
||||
for _, header := range chunk {
|
||||
if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
|
||||
unknown = append(unknown, header)
|
||||
}
|
||||
}
|
||||
// If we're importing pure headers, verify based on their recentness
|
||||
frequency := fsHeaderCheckFrequency
|
||||
if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
|
||||
@ -1497,17 +1490,18 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
}
|
||||
if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
|
||||
rollbackErr = err
|
||||
// If some headers were inserted, add them too to the rollback list
|
||||
if n > 0 {
|
||||
rollback = append(rollback, chunk[:n]...)
|
||||
|
||||
// If some headers were inserted, track them as uncertain
|
||||
if n > 0 && rollback == 0 {
|
||||
rollback = chunk[0].Number.Uint64()
|
||||
}
|
||||
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
|
||||
return fmt.Errorf("%w: %v", errInvalidChain, err)
|
||||
}
|
||||
// All verifications passed, store newly found uncertain headers
|
||||
rollback = append(rollback, unknown...)
|
||||
if len(rollback) > fsHeaderSafetyNet {
|
||||
rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
|
||||
// All verifications passed, track all headers within the alloted limits
|
||||
head := chunk[len(chunk)-1].Number.Uint64()
|
||||
if head-rollback > uint64(fsHeaderSafetyNet) {
|
||||
rollback = head - uint64(fsHeaderSafetyNet)
|
||||
}
|
||||
}
|
||||
// Unless we're doing light chains, schedule the headers for associated content retrieval
|
||||
@ -1613,6 +1607,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
|
||||
}
|
||||
}
|
||||
go closeOnErr(sync)
|
||||
|
||||
// Figure out the ideal pivot block. Note, that this goalpost may move if the
|
||||
// sync takes long enough for the chain head to move significantly.
|
||||
pivot := uint64(0)
|
||||
@ -1654,6 +1649,10 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
|
||||
if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
|
||||
log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
|
||||
pivot = height - uint64(fsMinFullBlocks)
|
||||
|
||||
// Write out the pivot into the database so a rollback beyond it will
|
||||
// reenable fast sync
|
||||
rawdb.WriteLastPivotNumber(d.stateDB, pivot)
|
||||
}
|
||||
}
|
||||
P, beforeP, afterP := splitAroundPivot(pivot, results)
|
||||
|
Reference in New Issue
Block a user