* eth/protocols: persist received state segments * core: initial implementation * core/state/snapshot: add tests * core, eth: updates * eth/protocols/snapshot: count flat state size * core/state: add metrics * core/state/snapshot: skip unnecessary deletion * core/state/snapshot: rename * core/state/snapshot: use the global batch * core/state/snapshot: add logs and fix wiping * core/state/snapshot: fix * core/state/snapshot: save generation progress even if the batch is empty * core/state/snapshot: fixes * core/state/snapshot: fix initial account range length * core/state/snapshot: fix initial account range * eth/protocols/snap: store flat states during the healing * eth/protocols/snap: print logs * core/state/snapshot: refactor (#4) * core/state/snapshot: refactor * core/state/snapshot: tiny fix and polish Co-authored-by: rjl493456442 <garyrong0905@gmail.com> * core, eth: fixes * core, eth: fix healing writer * core, trie, eth: fix paths * eth/protocols/snap: fix encoding * eth, core: add debug log * core/state/generate: release iterator asap (#5) core/state/snapshot: less copy core/state/snapshot: revert split loop core/state/snapshot: handle storage becoming empty, improve test robustness core/state: test modified codehash core/state/snapshot: polish * core/state/snapshot: optimize stats counter * core, eth: add metric * core/state/snapshot: update comments * core/state/snapshot: improve tests * core/state/snapshot: replace secure trie with standard trie * core/state/snapshot: wrap return as the struct * core/state/snapshot: skip wiping correct states * core/state/snapshot: updates * core/state/snapshot: fixes * core/state/snapshot: fix panic due to reference flaw in closure * core/state/snapshot: fix errors in state generation logic + fix log output * core/state/snapshot: remove an error case * core/state/snapshot: fix condition-check for exhausted snap state * core/state/snapshot: use stackTrie for small tries * core/state/snapshot: don't resolve small storage tries in vain * core/state/snapshot: properly clean up storage of deleted accounts * core/state/snapshot: avoid RLP-encoding in some cases + minor nitpicks * core/state/snapshot: fix error (+testcase) * core/state/snapshot: clean up tests a bit * core/state/snapshot: work in progress on better tests * core/state/snapshot: polish code * core/state/snapshot: fix trie iteration abortion trigger * core/state/snapshot: fixes flaws * core/state/snapshot: remove panic * core/state/snapshot: fix abort * core/state/snapshot: more tests (plus failing testcase) * core/state/snapshot: more testcases + fix for failing test * core/state/snapshot: testcase for malformed data * core/state/snapshot: some test nitpicks * core/state/snapshot: improvements to logging * core/state/snapshot: testcase to demo error in abortion * core/state/snapshot: fix abortion * cmd/geth: make verify-state report the root * trie: fix failing test * core/state/snapshot: add timer metrics * core/state/snapshot: fix metrics * core/state/snapshot: udpate tests * eth/protocols/snap: write snapshot account even if code or state is needed * core/state/snapshot: fix diskmore check * core/state/snapshot: review fixes * core/state/snapshot: improve error message * cmd/geth: rename 'error' to 'err' in logs * core/state/snapshot: fix some review concerns * core/state/snapshot, eth/protocols/snap: clear snapshot marker when starting/resuming snap sync * core: add error log * core/state/snapshot: use proper timers for metrics collection * core/state/snapshot: address some review concerns * eth/protocols/snap: improved log message * eth/protocols/snap: fix heal logs to condense infos * core/state/snapshot: wait for generator termination before restarting * core/state/snapshot: revert timers to counters to track total time Co-authored-by: Martin Holst Swende <martin@swende.se> Co-authored-by: Péter Szilágyi <peterke@gmail.com>
		
			
				
	
	
		
			376 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			376 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
// Copyright 2020 The go-ethereum Authors
 | 
						|
// This file is part of the go-ethereum library.
 | 
						|
//
 | 
						|
// The go-ethereum library is free software: you can redistribute it and/or modify
 | 
						|
// it under the terms of the GNU Lesser General Public License as published by
 | 
						|
// the Free Software Foundation, either version 3 of the License, or
 | 
						|
// (at your option) any later version.
 | 
						|
//
 | 
						|
// The go-ethereum library is distributed in the hope that it will be useful,
 | 
						|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
						|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | 
						|
// GNU Lesser General Public License for more details.
 | 
						|
//
 | 
						|
// You should have received a copy of the GNU Lesser General Public License
 | 
						|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | 
						|
 | 
						|
package snapshot
 | 
						|
 | 
						|
import (
 | 
						|
	"bytes"
 | 
						|
	"encoding/binary"
 | 
						|
	"errors"
 | 
						|
	"fmt"
 | 
						|
	"math"
 | 
						|
	"runtime"
 | 
						|
	"sync"
 | 
						|
	"time"
 | 
						|
 | 
						|
	"github.com/ethereum/go-ethereum/common"
 | 
						|
	"github.com/ethereum/go-ethereum/core/rawdb"
 | 
						|
	"github.com/ethereum/go-ethereum/ethdb"
 | 
						|
	"github.com/ethereum/go-ethereum/log"
 | 
						|
	"github.com/ethereum/go-ethereum/rlp"
 | 
						|
	"github.com/ethereum/go-ethereum/trie"
 | 
						|
)
 | 
						|
 | 
						|
// trieKV represents a trie key-value pair
 | 
						|
type trieKV struct {
 | 
						|
	key   common.Hash
 | 
						|
	value []byte
 | 
						|
}
 | 
						|
 | 
						|
type (
 | 
						|
	// trieGeneratorFn is the interface of trie generation which can
 | 
						|
	// be implemented by different trie algorithm.
 | 
						|
	trieGeneratorFn func(db ethdb.KeyValueWriter, in chan (trieKV), out chan (common.Hash))
 | 
						|
 | 
						|
	// leafCallbackFn is the callback invoked at the leaves of the trie,
 | 
						|
	// returns the subtrie root with the specified subtrie identifier.
 | 
						|
	leafCallbackFn func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error)
 | 
						|
)
 | 
						|
 | 
						|
// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
 | 
						|
func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
 | 
						|
	return generateTrieRoot(nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
 | 
						|
}
 | 
						|
 | 
						|
// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
 | 
						|
func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
 | 
						|
	return generateTrieRoot(nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true)
 | 
						|
}
 | 
						|
 | 
						|
// GenerateTrie takes the whole snapshot tree as the input, traverses all the
 | 
						|
// accounts as well as the corresponding storages and regenerate the whole state
 | 
						|
// (account trie + all storage tries).
 | 
						|
func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethdb.KeyValueWriter) error {
 | 
						|
	// Traverse all state by snapshot, re-generate the whole state trie
 | 
						|
	acctIt, err := snaptree.AccountIterator(root, common.Hash{})
 | 
						|
	if err != nil {
 | 
						|
		return err // The required snapshot might not exist.
 | 
						|
	}
 | 
						|
	defer acctIt.Release()
 | 
						|
 | 
						|
	got, err := generateTrieRoot(dst, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
 | 
						|
		// Migrate the code first, commit the contract code into the tmp db.
 | 
						|
		if codeHash != emptyCode {
 | 
						|
			code := rawdb.ReadCode(src, codeHash)
 | 
						|
			if len(code) == 0 {
 | 
						|
				return common.Hash{}, errors.New("failed to read contract code")
 | 
						|
			}
 | 
						|
			rawdb.WriteCode(dst, codeHash, code)
 | 
						|
		}
 | 
						|
		// Then migrate all storage trie nodes into the tmp db.
 | 
						|
		storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{})
 | 
						|
		if err != nil {
 | 
						|
			return common.Hash{}, err
 | 
						|
		}
 | 
						|
		defer storageIt.Release()
 | 
						|
 | 
						|
		hash, err := generateTrieRoot(dst, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
 | 
						|
		if err != nil {
 | 
						|
			return common.Hash{}, err
 | 
						|
		}
 | 
						|
		return hash, nil
 | 
						|
	}, newGenerateStats(), true)
 | 
						|
 | 
						|
	if err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	if got != root {
 | 
						|
		return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
 | 
						|
	}
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// generateStats is a collection of statistics gathered by the trie generator
 | 
						|
// for logging purposes.
 | 
						|
type generateStats struct {
 | 
						|
	head  common.Hash
 | 
						|
	start time.Time
 | 
						|
 | 
						|
	accounts uint64 // Number of accounts done (including those being crawled)
 | 
						|
	slots    uint64 // Number of storage slots done (including those being crawled)
 | 
						|
 | 
						|
	slotsStart map[common.Hash]time.Time   // Start time for account slot crawling
 | 
						|
	slotsHead  map[common.Hash]common.Hash // Slot head for accounts being crawled
 | 
						|
 | 
						|
	lock sync.RWMutex
 | 
						|
}
 | 
						|
 | 
						|
// newGenerateStats creates a new generator stats.
 | 
						|
func newGenerateStats() *generateStats {
 | 
						|
	return &generateStats{
 | 
						|
		slotsStart: make(map[common.Hash]time.Time),
 | 
						|
		slotsHead:  make(map[common.Hash]common.Hash),
 | 
						|
		start:      time.Now(),
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// progressAccounts updates the generator stats for the account range.
 | 
						|
func (stat *generateStats) progressAccounts(account common.Hash, done uint64) {
 | 
						|
	stat.lock.Lock()
 | 
						|
	defer stat.lock.Unlock()
 | 
						|
 | 
						|
	stat.accounts += done
 | 
						|
	stat.head = account
 | 
						|
}
 | 
						|
 | 
						|
// finishAccounts updates the gemerator stats for the finished account range.
 | 
						|
func (stat *generateStats) finishAccounts(done uint64) {
 | 
						|
	stat.lock.Lock()
 | 
						|
	defer stat.lock.Unlock()
 | 
						|
 | 
						|
	stat.accounts += done
 | 
						|
}
 | 
						|
 | 
						|
// progressContract updates the generator stats for a specific in-progress contract.
 | 
						|
func (stat *generateStats) progressContract(account common.Hash, slot common.Hash, done uint64) {
 | 
						|
	stat.lock.Lock()
 | 
						|
	defer stat.lock.Unlock()
 | 
						|
 | 
						|
	stat.slots += done
 | 
						|
	stat.slotsHead[account] = slot
 | 
						|
	if _, ok := stat.slotsStart[account]; !ok {
 | 
						|
		stat.slotsStart[account] = time.Now()
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// finishContract updates the generator stats for a specific just-finished contract.
 | 
						|
func (stat *generateStats) finishContract(account common.Hash, done uint64) {
 | 
						|
	stat.lock.Lock()
 | 
						|
	defer stat.lock.Unlock()
 | 
						|
 | 
						|
	stat.slots += done
 | 
						|
	delete(stat.slotsHead, account)
 | 
						|
	delete(stat.slotsStart, account)
 | 
						|
}
 | 
						|
 | 
						|
// report prints the cumulative progress statistic smartly.
 | 
						|
func (stat *generateStats) report() {
 | 
						|
	stat.lock.RLock()
 | 
						|
	defer stat.lock.RUnlock()
 | 
						|
 | 
						|
	ctx := []interface{}{
 | 
						|
		"accounts", stat.accounts,
 | 
						|
		"slots", stat.slots,
 | 
						|
		"elapsed", common.PrettyDuration(time.Since(stat.start)),
 | 
						|
	}
 | 
						|
	if stat.accounts > 0 {
 | 
						|
		// If there's progress on the account trie, estimate the time to finish crawling it
 | 
						|
		if done := binary.BigEndian.Uint64(stat.head[:8]) / stat.accounts; done > 0 {
 | 
						|
			var (
 | 
						|
				left  = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts
 | 
						|
				speed = done/uint64(time.Since(stat.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
 | 
						|
				eta   = time.Duration(left/speed) * time.Millisecond
 | 
						|
			)
 | 
						|
			// If there are large contract crawls in progress, estimate their finish time
 | 
						|
			for acc, head := range stat.slotsHead {
 | 
						|
				start := stat.slotsStart[acc]
 | 
						|
				if done := binary.BigEndian.Uint64(head[:8]); done > 0 {
 | 
						|
					var (
 | 
						|
						left  = math.MaxUint64 - binary.BigEndian.Uint64(head[:8])
 | 
						|
						speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
 | 
						|
					)
 | 
						|
					// Override the ETA if larger than the largest until now
 | 
						|
					if slotETA := time.Duration(left/speed) * time.Millisecond; eta < slotETA {
 | 
						|
						eta = slotETA
 | 
						|
					}
 | 
						|
				}
 | 
						|
			}
 | 
						|
			ctx = append(ctx, []interface{}{
 | 
						|
				"eta", common.PrettyDuration(eta),
 | 
						|
			}...)
 | 
						|
		}
 | 
						|
	}
 | 
						|
	log.Info("Iterating state snapshot", ctx...)
 | 
						|
}
 | 
						|
 | 
						|
// reportDone prints the last log when the whole generation is finished.
 | 
						|
func (stat *generateStats) reportDone() {
 | 
						|
	stat.lock.RLock()
 | 
						|
	defer stat.lock.RUnlock()
 | 
						|
 | 
						|
	var ctx []interface{}
 | 
						|
	ctx = append(ctx, []interface{}{"accounts", stat.accounts}...)
 | 
						|
	if stat.slots != 0 {
 | 
						|
		ctx = append(ctx, []interface{}{"slots", stat.slots}...)
 | 
						|
	}
 | 
						|
	ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...)
 | 
						|
	log.Info("Iterated snapshot", ctx...)
 | 
						|
}
 | 
						|
 | 
						|
// runReport periodically prints the progress information.
 | 
						|
func runReport(stats *generateStats, stop chan bool) {
 | 
						|
	timer := time.NewTimer(0)
 | 
						|
	defer timer.Stop()
 | 
						|
 | 
						|
	for {
 | 
						|
		select {
 | 
						|
		case <-timer.C:
 | 
						|
			stats.report()
 | 
						|
			timer.Reset(time.Second * 8)
 | 
						|
		case success := <-stop:
 | 
						|
			if success {
 | 
						|
				stats.reportDone()
 | 
						|
			}
 | 
						|
			return
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// generateTrieRoot generates the trie hash based on the snapshot iterator.
 | 
						|
// It can be used for generating account trie, storage trie or even the
 | 
						|
// whole state which connects the accounts and the corresponding storages.
 | 
						|
func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
 | 
						|
	var (
 | 
						|
		in      = make(chan trieKV)         // chan to pass leaves
 | 
						|
		out     = make(chan common.Hash, 1) // chan to collect result
 | 
						|
		stoplog = make(chan bool, 1)        // 1-size buffer, works when logging is not enabled
 | 
						|
		wg      sync.WaitGroup
 | 
						|
	)
 | 
						|
	// Spin up a go-routine for trie hash re-generation
 | 
						|
	wg.Add(1)
 | 
						|
	go func() {
 | 
						|
		defer wg.Done()
 | 
						|
		generatorFn(db, in, out)
 | 
						|
	}()
 | 
						|
	// Spin up a go-routine for progress logging
 | 
						|
	if report && stats != nil {
 | 
						|
		wg.Add(1)
 | 
						|
		go func() {
 | 
						|
			defer wg.Done()
 | 
						|
			runReport(stats, stoplog)
 | 
						|
		}()
 | 
						|
	}
 | 
						|
	// Create a semaphore to assign tasks and collect results through. We'll pre-
 | 
						|
	// fill it with nils, thus using the same channel for both limiting concurrent
 | 
						|
	// processing and gathering results.
 | 
						|
	threads := runtime.NumCPU()
 | 
						|
	results := make(chan error, threads)
 | 
						|
	for i := 0; i < threads; i++ {
 | 
						|
		results <- nil // fill the semaphore
 | 
						|
	}
 | 
						|
	// stop is a helper function to shutdown the background threads
 | 
						|
	// and return the re-generated trie hash.
 | 
						|
	stop := func(fail error) (common.Hash, error) {
 | 
						|
		close(in)
 | 
						|
		result := <-out
 | 
						|
		for i := 0; i < threads; i++ {
 | 
						|
			if err := <-results; err != nil && fail == nil {
 | 
						|
				fail = err
 | 
						|
			}
 | 
						|
		}
 | 
						|
		stoplog <- fail == nil
 | 
						|
 | 
						|
		wg.Wait()
 | 
						|
		return result, fail
 | 
						|
	}
 | 
						|
	var (
 | 
						|
		logged    = time.Now()
 | 
						|
		processed = uint64(0)
 | 
						|
		leaf      trieKV
 | 
						|
	)
 | 
						|
	// Start to feed leaves
 | 
						|
	for it.Next() {
 | 
						|
		if account == (common.Hash{}) {
 | 
						|
			var (
 | 
						|
				err      error
 | 
						|
				fullData []byte
 | 
						|
			)
 | 
						|
			if leafCallback == nil {
 | 
						|
				fullData, err = FullAccountRLP(it.(AccountIterator).Account())
 | 
						|
				if err != nil {
 | 
						|
					return stop(err)
 | 
						|
				}
 | 
						|
			} else {
 | 
						|
				// Wait until the semaphore allows us to continue, aborting if
 | 
						|
				// a sub-task failed
 | 
						|
				if err := <-results; err != nil {
 | 
						|
					results <- nil // stop will drain the results, add a noop back for this error we just consumed
 | 
						|
					return stop(err)
 | 
						|
				}
 | 
						|
				// Fetch the next account and process it concurrently
 | 
						|
				account, err := FullAccount(it.(AccountIterator).Account())
 | 
						|
				if err != nil {
 | 
						|
					return stop(err)
 | 
						|
				}
 | 
						|
				go func(hash common.Hash) {
 | 
						|
					subroot, err := leafCallback(db, hash, common.BytesToHash(account.CodeHash), stats)
 | 
						|
					if err != nil {
 | 
						|
						results <- err
 | 
						|
						return
 | 
						|
					}
 | 
						|
					if !bytes.Equal(account.Root, subroot.Bytes()) {
 | 
						|
						results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot)
 | 
						|
						return
 | 
						|
					}
 | 
						|
					results <- nil
 | 
						|
				}(it.Hash())
 | 
						|
				fullData, err = rlp.EncodeToBytes(account)
 | 
						|
				if err != nil {
 | 
						|
					return stop(err)
 | 
						|
				}
 | 
						|
			}
 | 
						|
			leaf = trieKV{it.Hash(), fullData}
 | 
						|
		} else {
 | 
						|
			leaf = trieKV{it.Hash(), common.CopyBytes(it.(StorageIterator).Slot())}
 | 
						|
		}
 | 
						|
		in <- leaf
 | 
						|
 | 
						|
		// Accumulate the generation statistic if it's required.
 | 
						|
		processed++
 | 
						|
		if time.Since(logged) > 3*time.Second && stats != nil {
 | 
						|
			if account == (common.Hash{}) {
 | 
						|
				stats.progressAccounts(it.Hash(), processed)
 | 
						|
			} else {
 | 
						|
				stats.progressContract(account, it.Hash(), processed)
 | 
						|
			}
 | 
						|
			logged, processed = time.Now(), 0
 | 
						|
		}
 | 
						|
	}
 | 
						|
	// Commit the last part statistic.
 | 
						|
	if processed > 0 && stats != nil {
 | 
						|
		if account == (common.Hash{}) {
 | 
						|
			stats.finishAccounts(processed)
 | 
						|
		} else {
 | 
						|
			stats.finishContract(account, processed)
 | 
						|
		}
 | 
						|
	}
 | 
						|
	return stop(nil)
 | 
						|
}
 | 
						|
 | 
						|
func stackTrieGenerate(db ethdb.KeyValueWriter, in chan trieKV, out chan common.Hash) {
 | 
						|
	t := trie.NewStackTrie(db)
 | 
						|
	for leaf := range in {
 | 
						|
		t.TryUpdate(leaf.key[:], leaf.value)
 | 
						|
	}
 | 
						|
	var root common.Hash
 | 
						|
	if db == nil {
 | 
						|
		root = t.Hash()
 | 
						|
	} else {
 | 
						|
		root, _ = t.Commit()
 | 
						|
	}
 | 
						|
	out <- root
 | 
						|
}
 |