core/state/snapshot: unlink snapshots from blocks, quad->linear cleanup

This commit is contained in:
Péter Szilágyi
2019-11-22 13:23:49 +02:00
parent cdf3f016df
commit d754091a87
12 changed files with 203 additions and 206 deletions

View File

@ -43,12 +43,16 @@ var (
// layer had been invalidated due to the chain progressing forward far enough
// to not maintain the layer's original state.
ErrSnapshotStale = errors.New("snapshot stale")
// errSnapshotCycle is returned if a snapshot is attempted to be inserted
// that forms a cycle in the snapshot tree.
errSnapshotCycle = errors.New("snapshot cycle")
)
// Snapshot represents the functionality supported by a snapshot storage layer.
type Snapshot interface {
// Info returns the block number and root hash for which this snapshot was made.
Info() (uint64, common.Hash)
// Root returns the root hash for which this snapshot was made.
Root() common.Hash
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
@ -77,6 +81,10 @@ type snapshot interface {
// This is meant to be used during shutdown to persist the snapshot without
// flattening everything down (bad for reorgs).
Journal() error
// Stale return whether this layer has become stale (was flattened across) or
// if it's still live.
Stale() bool
}
// SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
@ -88,7 +96,7 @@ type snapshot interface {
// The goal of a state snapshot is twofold: to allow direct access to account and
// storage data to avoid expensive multi-level trie lookups; and to allow sorted,
// cheap iteration of the account/storage tries for sync aid.
type SnapshotTree struct {
type Tree struct {
layers map[common.Hash]snapshot // Collection of all known layers // TODO(karalabe): split Clique overlaps
lock sync.RWMutex
}
@ -99,22 +107,21 @@ type SnapshotTree struct {
//
// If the snapshot is missing or inconsistent, the entirety is deleted and will
// be reconstructed from scratch based on the tries in the key-value store.
func New(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (*SnapshotTree, error) {
func New(db ethdb.KeyValueStore, journal string, root common.Hash) (*Tree, error) {
// Attempt to load a previously persisted snapshot
head, err := loadSnapshot(db, journal, headNumber, headRoot)
head, err := loadSnapshot(db, journal, root)
if err != nil {
log.Warn("Failed to load snapshot, regenerating", "err", err)
if head, err = generateSnapshot(db, journal, headNumber, headRoot); err != nil {
if head, err = generateSnapshot(db, journal, root); err != nil {
return nil, err
}
}
// Existing snapshot loaded or one regenerated, seed all the layers
snap := &SnapshotTree{
snap := &Tree{
layers: make(map[common.Hash]snapshot),
}
for head != nil {
_, root := head.Info()
snap.layers[root] = head
snap.layers[head.Root()] = head
switch self := head.(type) {
case *diffLayer:
@ -130,54 +137,57 @@ func New(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot com
// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
// snapshot is maintained for that block.
func (st *SnapshotTree) Snapshot(blockRoot common.Hash) Snapshot {
st.lock.RLock()
defer st.lock.RUnlock()
func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
t.lock.RLock()
defer t.lock.RUnlock()
return st.layers[blockRoot]
return t.layers[blockRoot]
}
// Update adds a new snapshot into the tree, if that can be linked to an existing
// old parent. It is disallowed to insert a disk layer (the origin of all).
func (st *SnapshotTree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
// special case that can only happen for Clique networks where empty blocks
// don't modify the state (0 block subsidy).
//
// Although we could silently ignore this internally, it should be the caller's
// responsibility to avoid even attempting to insert such a snapshot.
if blockRoot == parentRoot {
return errSnapshotCycle
}
// Generate a new snapshot on top of the parent
parent := st.Snapshot(parentRoot).(snapshot)
parent := t.Snapshot(parentRoot).(snapshot)
if parent == nil {
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
}
snap := parent.Update(blockRoot, accounts, storage)
// Save the new snapshot for later
st.lock.Lock()
defer st.lock.Unlock()
t.lock.Lock()
defer t.lock.Unlock()
st.layers[snap.root] = snap
t.layers[snap.root] = snap
return nil
}
// Cap traverses downwards the snapshot tree from a head block hash until the
// number of allowed layers are crossed. All layers beyond the permitted number
// are flattened downwards.
func (st *SnapshotTree) Cap(blockRoot common.Hash, layers int, memory uint64) error {
func (t *Tree) Cap(root common.Hash, layers int, memory uint64) error {
// Retrieve the head snapshot to cap from
var snap snapshot
if s := st.Snapshot(blockRoot); s == nil {
return fmt.Errorf("snapshot [%#x] missing", blockRoot)
} else {
snap = s.(snapshot)
snap := t.Snapshot(root)
if snap == nil {
return fmt.Errorf("snapshot [%#x] missing", root)
}
diff, ok := snap.(*diffLayer)
if !ok {
return fmt.Errorf("snapshot [%#x] is disk layer", blockRoot)
return fmt.Errorf("snapshot [%#x] is disk layer", root)
}
// Run the internal capping and discard all stale layers
st.lock.Lock()
defer st.lock.Unlock()
t.lock.Lock()
defer t.lock.Unlock()
var (
diskNumber uint64
diffNumber uint64
)
// Flattening the bottom-most diff layer requires special casing since there's
// no child to rewire to the grandparent. In that case we can fake a temporary
// child for the capping and then remove it.
@ -188,8 +198,9 @@ func (st *SnapshotTree) Cap(blockRoot common.Hash, layers int, memory uint64) er
base := diffToDisk(diff.flatten().(*diffLayer))
diff.lock.RUnlock()
st.layers[base.root] = base
diskNumber, diffNumber = base.number, base.number
// Replace the entire snapshot tree with the flat base
t.layers = map[common.Hash]snapshot{base.root: base}
return nil
case 1:
// If full flattening was requested, flatten the diffs but only merge if the
@ -205,59 +216,74 @@ func (st *SnapshotTree) Cap(blockRoot common.Hash, layers int, memory uint64) er
}
diff.lock.RUnlock()
// If all diff layers were removed, replace the entire snapshot tree
if base != nil {
st.layers[base.root] = base
diskNumber, diffNumber = base.number, base.number
} else {
st.layers[bottom.root] = bottom
diskNumber, diffNumber = bottom.parent.(*diskLayer).number, bottom.number
t.layers = map[common.Hash]snapshot{base.root: base}
return nil
}
// Merge the new aggregated layer into the snapshot tree, clean stales below
t.layers[bottom.root] = bottom
default:
diskNumber, diffNumber = st.cap(diff, layers, memory)
// Many layers requested to be retained, cap normally
t.cap(diff, layers, memory)
}
for root, snap := range st.layers {
if number, _ := snap.Info(); number != diskNumber && number < diffNumber {
delete(st.layers, root)
// Remove any layer that is stale or links into a stale layer
children := make(map[common.Hash][]common.Hash)
for root, snap := range t.layers {
if diff, ok := snap.(*diffLayer); ok {
parent := diff.parent.Root()
children[parent] = append(children[parent], root)
}
}
var remove func(root common.Hash)
remove = func(root common.Hash) {
delete(t.layers, root)
for _, child := range children[root] {
remove(child)
}
delete(children, root)
}
for root, snap := range t.layers {
if snap.Stale() {
remove(root)
}
}
return nil
}
// cap traverses downwards the diff tree until the number of allowed layers are
// crossed. All diffs beyond the permitted number are flattened downwards. If
// the layer limit is reached, memory cap is also enforced (but not before). The
// block numbers for the disk layer and first diff layer are returned for GC.
func (st *SnapshotTree) cap(diff *diffLayer, layers int, memory uint64) (uint64, uint64) {
// crossed. All diffs beyond the permitted number are flattened downwards. If the
// layer limit is reached, memory cap is also enforced (but not before).
func (t *Tree) cap(diff *diffLayer, layers int, memory uint64) {
// Dive until we run out of layers or reach the persistent database
for ; layers > 2; layers-- {
// If we still have diff layers below, continue down
if parent, ok := diff.parent.(*diffLayer); ok {
diff = parent
} else {
// Diff stack too shallow, return block numbers without modifications
return diff.parent.(*diskLayer).number, diff.number
// Diff stack too shallow, return without modifications
return
}
}
// We're out of layers, flatten anything below, stopping if it's the disk or if
// the memory limit is not yet exceeded.
switch parent := diff.parent.(type) {
case *diskLayer:
return parent.number, diff.number
return
case *diffLayer:
// Flatten the parent into the grandparent. The flattening internally obtains a
// write lock on grandparent.
flattened := parent.flatten().(*diffLayer)
st.layers[flattened.root] = flattened
t.layers[flattened.root] = flattened
diff.lock.Lock()
defer diff.lock.Unlock()
diff.parent = flattened
if flattened.memory < memory {
diskNumber, _ := flattened.parent.Info()
return diskNumber, flattened.number
return
}
default:
panic(fmt.Sprintf("unknown data layer: %T", parent))
@ -269,10 +295,8 @@ func (st *SnapshotTree) cap(diff *diffLayer, layers int, memory uint64) (uint64,
base := diffToDisk(bottom)
bottom.lock.RUnlock()
st.layers[base.root] = base
t.layers[base.root] = base
diff.parent = base
return base.number, diff.number
}
// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
@ -284,7 +308,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
)
// Start by temporarily deleting the current snapshot block marker. This
// ensures that in the case of a crash, the entire snapshot is invalidated.
rawdb.DeleteSnapshotBlock(batch)
rawdb.DeleteSnapshotRoot(batch)
// Mark the original base as stale as we're going to create a new wrapper
base.lock.Lock()
@ -341,13 +365,12 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
}
}
// Update the snapshot block marker and write any remainder data
rawdb.WriteSnapshotBlock(batch, bottom.number, bottom.root)
rawdb.WriteSnapshotRoot(batch, bottom.root)
if err := batch.Write(); err != nil {
log.Crit("Failed to write leftover snapshot", "err", err)
}
return &diskLayer{
root: bottom.root,
number: bottom.number,
cache: base.cache,
db: base.db,
journal: base.journal,
@ -357,27 +380,25 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
// Journal commits an entire diff hierarchy to disk into a single journal file.
// This is meant to be used during shutdown to persist the snapshot without
// flattening everything down (bad for reorgs).
func (st *SnapshotTree) Journal(blockRoot common.Hash) error {
// Retrieve the head snapshot to journal from
var snap snapshot
if s := st.Snapshot(blockRoot); s == nil {
func (t *Tree) Journal(blockRoot common.Hash) error {
// Retrieve the head snapshot to journal from var snap snapshot
snap := t.Snapshot(blockRoot)
if snap == nil {
return fmt.Errorf("snapshot [%#x] missing", blockRoot)
} else {
snap = s.(snapshot)
}
// Run the journaling
st.lock.Lock()
defer st.lock.Unlock()
t.lock.Lock()
defer t.lock.Unlock()
return snap.Journal()
return snap.(snapshot).Journal()
}
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
func loadSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (snapshot, error) {
func loadSnapshot(db ethdb.KeyValueStore, journal string, root common.Hash) (snapshot, error) {
// Retrieve the block number and hash of the snapshot, failing if no snapshot
// is present in the database (or crashed mid-update).
number, root := rawdb.ReadSnapshotBlock(db)
if root == (common.Hash{}) {
baseRoot := rawdb.ReadSnapshotRoot(db)
if baseRoot == (common.Hash{}) {
return nil, errors.New("missing or corrupted snapshot")
}
cache, _ := bigcache.NewBigCache(bigcache.Config{ // TODO(karalabe): dedup
@ -391,16 +412,14 @@ func loadSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, hea
journal: journal,
db: db,
cache: cache,
number: number,
root: root,
root: baseRoot,
}
// Load all the snapshot diffs from the journal, failing if their chain is broken
// or does not lead from the disk snapshot to the specified head.
if _, err := os.Stat(journal); os.IsNotExist(err) {
// Journal doesn't exist, don't worry if it's not supposed to
if number != headNumber || root != headRoot {
return nil, fmt.Errorf("snapshot journal missing, head doesn't match snapshot: #%d [%#x] vs. #%d [%#x]",
headNumber, headRoot, number, root)
if baseRoot != root {
return nil, fmt.Errorf("snapshot journal missing, head doesn't match snapshot: have %#x, want %#x", baseRoot, root)
}
return base, nil
}
@ -414,10 +433,8 @@ func loadSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, hea
}
// Entire snapshot journal loaded, sanity check the head and return
// Journal doesn't exist, don't worry if it's not supposed to
number, root = snapshot.Info()
if number != headNumber || root != headRoot {
return nil, fmt.Errorf("head doesn't match snapshot: #%d [%#x] vs. #%d [%#x]",
headNumber, headRoot, number, root)
if head := snapshot.Root(); head != root {
return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
}
return snapshot, nil
}