core/state/snapshot, ethdb: track deletions more accurately (#22582)

* core/state/snapshot, ethdb: track deletions more accurately

* core/state/snapshot: don't reset the iterator, leveldb's screwy

* ethdb: don't mess with the insert batches for now
This commit is contained in:
Péter Szilágyi
2021-03-30 19:04:22 +03:00
committed by GitHub
parent 3faae5defc
commit 61ff3e86b2
3 changed files with 22 additions and 3 deletions

View File

@ -484,8 +484,17 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
batch.Delete(key)
base.cache.Del(key[1:])
snapshotFlushStorageItemMeter.Mark(1)
// Ensure we don't delete too much data blindly (contract can be
// huge). It's ok to flush, the root will go missing in case of a
// crash and we'll detect and regenerate the snapshot.
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
}
batch.Reset()
}
}
}
it.Release()
@ -503,6 +512,16 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
snapshotFlushAccountItemMeter.Mark(1)
snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
// Ensure we don't write too much data blindly. It's ok to flush, the
// root will go missing in case of a crash and we'll detect and regen
// the snapshot.
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
}
batch.Reset()
}
}
// Push all the storage slots into the database
for accountHash, storage := range bottom.storageData {