core, eth, les, trie: add a prefix to contract code (#21080)

This commit is contained in:
gary rong
2020-08-21 20:10:40 +08:00
committed by GitHub
parent b68929caee
commit 87c0ba9213
42 changed files with 580 additions and 287 deletions

View File

@ -27,6 +27,7 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@ -57,15 +58,6 @@ var (
memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
)
// secureKeyPrefix is the database key prefix used to store trie node preimages.
var secureKeyPrefix = []byte("secure-key-")
// secureKeyPrefixLength is the length of the above prefix
const secureKeyPrefixLength = 11
// secureKeyLength is the length of the above prefix + 32byte hash.
const secureKeyLength = secureKeyPrefixLength + 32
// Database is an intermediate write layer between the trie data structures and
// the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder.
@ -78,7 +70,7 @@ type Database struct {
diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
oldest common.Hash // Oldest tracked node, flush-list head
newest common.Hash // Newest tracked node, flush-list tail
@ -139,8 +131,8 @@ type rawShortNode struct {
func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") }
// cachedNode is all the information we know about a single cached node in the
// memory database write layer.
// cachedNode is all the information we know about a single cached trie node
// in the memory database write layer.
type cachedNode struct {
node node // Cached collapsed trie node, or raw rlp data
size uint16 // Byte size of the useful cached data
@ -161,8 +153,8 @@ var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
// reference map.
const cachedNodeChildrenSize = 48
// rlp returns the raw rlp encoded blob of the cached node, either directly from
// the cache, or by regenerating it from the collapsed node.
// rlp returns the raw rlp encoded blob of the cached trie node, either directly
// from the cache, or by regenerating it from the collapsed node.
func (n *cachedNode) rlp() []byte {
if node, ok := n.node.(rawNode); ok {
return node
@ -183,9 +175,9 @@ func (n *cachedNode) obj(hash common.Hash) node {
return expandNode(hash[:], n.node)
}
// forChilds invokes the callback for all the tracked children of this node,
// both the implicit ones from inside the node as well as the explicit ones
//from outside the node.
// forChilds invokes the callback for all the tracked children of this node,
// both the implicit ones from inside the node as well as the explicit ones
// from outside the node.
func (n *cachedNode) forChilds(onChild func(hash common.Hash)) {
for child := range n.children {
onChild(child)
@ -305,25 +297,14 @@ func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int, journal string)
}
// DiskDB retrieves the persistent storage backing the trie database.
func (db *Database) DiskDB() ethdb.KeyValueReader {
func (db *Database) DiskDB() ethdb.KeyValueStore {
return db.diskdb
}
// InsertBlob writes a new reference tracked blob to the memory database if it's
// yet unknown. This method should only be used for non-trie nodes that require
// reference counting, since trie nodes are garbage collected directly through
// their embedded children.
func (db *Database) InsertBlob(hash common.Hash, blob []byte) {
db.lock.Lock()
defer db.lock.Unlock()
db.insert(hash, len(blob), rawNode(blob))
}
// insert inserts a collapsed trie node into the memory database. This method is
// a more generic version of InsertBlob, supporting both raw blob insertions as
// well ex trie node insertions. The blob size must be specified to allow proper
// size tracking.
// insert inserts a collapsed trie node into the memory database.
// The blob size must be specified to allow proper size tracking.
// All nodes inserted by this function will be reference tracked
// and in theory should only used for **trie nodes** insertion.
func (db *Database) insert(hash common.Hash, size int, node node) {
// If the node's already cached, skip
if _, ok := db.dirties[hash]; ok {
@ -430,39 +411,30 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
memcacheDirtyMissMeter.Mark(1)
// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
if err == nil && enc != nil {
enc := rawdb.ReadTrieNode(db.diskdb, hash)
if len(enc) != 0 {
if db.cleans != nil {
db.cleans.Set(hash[:], enc)
memcacheCleanMissMeter.Mark(1)
memcacheCleanWriteMeter.Mark(int64(len(enc)))
}
return enc, nil
}
return enc, err
return nil, errors.New("not found")
}
// preimage retrieves a cached trie node pre-image from memory. If it cannot be
// found cached, the method queries the persistent database for the content.
func (db *Database) preimage(hash common.Hash) ([]byte, error) {
func (db *Database) preimage(hash common.Hash) []byte {
// Retrieve the node from cache if available
db.lock.RLock()
preimage := db.preimages[hash]
db.lock.RUnlock()
if preimage != nil {
return preimage, nil
return preimage
}
// Content unavailable in memory, attempt to retrieve from disk
return db.diskdb.Get(secureKey(hash))
}
// secureKey returns the database key for the preimage of key (as a newly
// allocated byte-slice)
func secureKey(hash common.Hash) []byte {
buf := make([]byte, secureKeyLength)
copy(buf, secureKeyPrefix)
copy(buf[secureKeyPrefixLength:], hash[:])
return buf
return rawdb.ReadPreimage(db.diskdb, hash)
}
// Nodes retrieves the hashes of all the nodes cached within the memory database.
@ -482,6 +454,9 @@ func (db *Database) Nodes() []common.Hash {
}
// Reference adds a new reference from a parent node to a child node.
// This function is used to add reference between internal trie node
// and external node(e.g. storage trie root), all internal trie nodes
// are referenced together by database itself.
func (db *Database) Reference(child common.Hash, parent common.Hash) {
db.lock.Lock()
defer db.lock.Unlock()
@ -604,27 +579,16 @@ func (db *Database) Cap(limit common.StorageSize) error {
size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize)
size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
// We reuse an ephemeral buffer for the keys. The batch Put operation
// copies it internally, so we can reuse it.
var keyBuf [secureKeyLength]byte
copy(keyBuf[:], secureKeyPrefix)
// If the preimage cache got large enough, push to disk. If it's still small
// leave for later to deduplicate writes.
flushPreimages := db.preimagesSize > 4*1024*1024
if flushPreimages {
for hash, preimage := range db.preimages {
copy(keyBuf[secureKeyPrefixLength:], hash[:])
if err := batch.Put(keyBuf[:], preimage); err != nil {
log.Error("Failed to commit preimage from trie database", "err", err)
rawdb.WritePreimages(batch, db.preimages)
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
batch.Reset()
}
}
// Keep committing nodes from the flush-list until we're below allowance
@ -632,9 +596,8 @@ func (db *Database) Cap(limit common.StorageSize) error {
for size > limit && oldest != (common.Hash{}) {
// Fetch the oldest referenced node and push into the batch
node := db.dirties[oldest]
if err := batch.Put(oldest[:], node.rlp()); err != nil {
return err
}
rawdb.WriteTrieNode(batch, oldest, node.rlp())
// If we exceeded the ideal batch size, commit and reset
if batch.ValueSize() >= ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
@ -662,8 +625,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
defer db.lock.Unlock()
if flushPreimages {
db.preimages = make(map[common.Hash][]byte)
db.preimagesSize = 0
db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0
}
for db.oldest != oldest {
node := db.dirties[db.oldest]
@ -706,25 +668,13 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
start := time.Now()
batch := db.diskdb.NewBatch()
// We reuse an ephemeral buffer for the keys. The batch Put operation
// copies it internally, so we can reuse it.
var keyBuf [secureKeyLength]byte
copy(keyBuf[:], secureKeyPrefix)
// Move all of the accumulated preimages into a write batch
for hash, preimage := range db.preimages {
copy(keyBuf[secureKeyPrefixLength:], hash[:])
if err := batch.Put(keyBuf[:], preimage); err != nil {
log.Error("Failed to commit preimage from trie database", "err", err)
rawdb.WritePreimages(batch, db.preimages)
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
// If the batch is too large, flush to disk
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
batch.Reset()
}
// Since we're going to replay trie node writes into the clean cache, flush out
// any batched pre-images before continuing.
@ -754,8 +704,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
batch.Reset()
// Reset the storage counters and bumpd metrics
db.preimages = make(map[common.Hash][]byte)
db.preimagesSize = 0
db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0
memcacheCommitTimeTimer.Update(time.Since(start))
memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
@ -791,13 +740,11 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
if err != nil {
return err
}
if err := batch.Put(hash[:], node.rlp()); err != nil {
return err
}
// If we've reached an optimal batch size, commit and start over
rawdb.WriteTrieNode(batch, hash, node.rlp())
if callback != nil {
callback(hash)
}
// If we've reached an optimal batch size, commit and start over
if batch.ValueSize() >= ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err

View File

@ -130,8 +130,7 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte {
if key, ok := t.getSecKeyCache()[string(shaKey)]; ok {
return key
}
key, _ := t.trie.db.preimage(common.BytesToHash(shaKey))
return key
return t.trie.db.preimage(common.BytesToHash(shaKey))
}
// Commit writes all nodes and the secure hash pre-images to the trie's database.

View File

@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
)
@ -37,7 +38,7 @@ var ErrAlreadyProcessed = errors.New("already processed")
type request struct {
hash common.Hash // Hash of the node data content to retrieve
data []byte // Data content of the node, cached until all subtrees complete
raw bool // Whether this is a raw entry (code) or a trie node
code bool // Whether this is a code entry
parents []*request // Parent state nodes referencing this entry (notify all upon completion)
depth int // Depth level within the trie the node is located to prioritise DFS
@ -46,8 +47,7 @@ type request struct {
callback LeafCallback // Callback to invoke if a leaf node it reached on this branch
}
// SyncResult is a simple list to return missing nodes along with their request
// hashes.
// SyncResult is a response with requested data along with it's hash.
type SyncResult struct {
Hash common.Hash // Hash of the originally unknown trie node
Data []byte // Data content of the retrieved node
@ -56,25 +56,40 @@ type SyncResult struct {
// syncMemBatch is an in-memory buffer of successfully downloaded but not yet
// persisted data items.
type syncMemBatch struct {
batch map[common.Hash][]byte // In-memory membatch of recently completed items
nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes
codes map[common.Hash][]byte // In-memory membatch of recently completed codes
}
// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
func newSyncMemBatch() *syncMemBatch {
return &syncMemBatch{
batch: make(map[common.Hash][]byte),
nodes: make(map[common.Hash][]byte),
codes: make(map[common.Hash][]byte),
}
}
// hasNode reports the trie node with specific hash is already cached.
func (batch *syncMemBatch) hasNode(hash common.Hash) bool {
_, ok := batch.nodes[hash]
return ok
}
// hasCode reports the contract code with specific hash is already cached.
func (batch *syncMemBatch) hasCode(hash common.Hash) bool {
_, ok := batch.codes[hash]
return ok
}
// Sync is the main state trie synchronisation scheduler, which provides yet
// unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done.
type Sync struct {
database ethdb.KeyValueReader // Persistent database to check for existing entries
membatch *syncMemBatch // Memory buffer to avoid frequent database writes
requests map[common.Hash]*request // Pending requests pertaining to a key hash
nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash
codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash
queue *prque.Prque // Priority queue with the pending requests
bloom *SyncBloom // Bloom filter for fast node existence checks
bloom *SyncBloom // Bloom filter for fast state existence checks
}
// NewSync creates a new trie data download scheduler.
@ -82,7 +97,8 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb
ts := &Sync{
database: database,
membatch: newSyncMemBatch(),
requests: make(map[common.Hash]*request),
nodeReqs: make(map[common.Hash]*request),
codeReqs: make(map[common.Hash]*request),
queue: prque.New(nil),
bloom: bloom,
}
@ -96,13 +112,15 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb
if root == emptyRoot {
return
}
if _, ok := s.membatch.batch[root]; ok {
if s.membatch.hasNode(root) {
return
}
if s.bloom == nil || s.bloom.Contains(root[:]) {
// Bloom filter says this might be a duplicate, double check
blob, _ := s.database.Get(root[:])
if local, err := decodeNode(root[:], blob); local != nil && err == nil {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
blob := rawdb.ReadTrieNode(s.database, root)
if len(blob) > 0 {
return
}
// False positive, bump fault meter
@ -116,7 +134,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb
}
// If this sub-trie has a designated parent, link them together
if parent != (common.Hash{}) {
ancestor := s.requests[parent]
ancestor := s.nodeReqs[parent]
if ancestor == nil {
panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent))
}
@ -126,21 +144,25 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb
s.schedule(req)
}
// AddRawEntry schedules the direct retrieval of a state entry that should not be
// interpreted as a trie node, but rather accepted and stored into the database
// as is. This method's goal is to support misc state metadata retrievals (e.g.
// contract code).
func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
// AddCodeEntry schedules the direct retrieval of a contract code that should not
// be interpreted as a trie node, but rather accepted and stored into the database
// as is.
func (s *Sync) AddCodeEntry(hash common.Hash, depth int, parent common.Hash) {
// Short circuit if the entry is empty or already known
if hash == emptyState {
return
}
if _, ok := s.membatch.batch[hash]; ok {
if s.membatch.hasCode(hash) {
return
}
if s.bloom == nil || s.bloom.Contains(hash[:]) {
// Bloom filter says this might be a duplicate, double check
if ok, _ := s.database.Has(hash[:]); ok {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, the blob is present for sure.
// Note we only check the existence with new code scheme, fast
// sync is expected to run with a fresh new node. Even there
// exists the code with legacy format, fetch and store with
// new scheme anyway.
if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 {
return
}
// False positive, bump fault meter
@ -149,12 +171,12 @@ func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
// Assemble the new sub-trie sync request
req := &request{
hash: hash,
raw: true,
code: true,
depth: depth,
}
// If this sub-trie has a designated parent, link them together
if parent != (common.Hash{}) {
ancestor := s.requests[parent]
ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq
if ancestor == nil {
panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
}
@ -173,61 +195,64 @@ func (s *Sync) Missing(max int) []common.Hash {
return requests
}
// Process injects a batch of retrieved trie nodes data, returning if something
// was committed to the database and also the index of an entry if its processing
// failed.
func (s *Sync) Process(results []SyncResult) (bool, int, error) {
committed := false
for i, item := range results {
// If the item was not requested, bail out
request := s.requests[item.Hash]
if request == nil {
return committed, i, ErrNotRequested
}
if request.data != nil {
return committed, i, ErrAlreadyProcessed
}
// If the item is a raw entry request, commit directly
if request.raw {
request.data = item.Data
s.commit(request)
committed = true
continue
}
// Process injects the received data for requested item. Note it can
// happpen that the single response commits two pending requests(e.g.
// there are two requests one for code and one for node but the hash
// is same). In this case the second response for the same hash will
// be treated as "non-requested" item or "already-processed" item but
// there is no downside.
func (s *Sync) Process(result SyncResult) error {
// If the item was not requested either for code or node, bail out
if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil {
return ErrNotRequested
}
// There is an pending code request for this data, commit directly
var filled bool
if req := s.codeReqs[result.Hash]; req != nil && req.data == nil {
filled = true
req.data = result.Data
s.commit(req)
}
// There is an pending node request for this data, fill it.
if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil {
filled = true
// Decode the node data content and update the request
node, err := decodeNode(item.Hash[:], item.Data)
node, err := decodeNode(result.Hash[:], result.Data)
if err != nil {
return committed, i, err
return err
}
request.data = item.Data
req.data = result.Data
// Create and schedule a request for all the children nodes
requests, err := s.children(request, node)
requests, err := s.children(req, node)
if err != nil {
return committed, i, err
return err
}
if len(requests) == 0 && request.deps == 0 {
s.commit(request)
committed = true
continue
}
request.deps += len(requests)
for _, child := range requests {
s.schedule(child)
if len(requests) == 0 && req.deps == 0 {
s.commit(req)
} else {
req.deps += len(requests)
for _, child := range requests {
s.schedule(child)
}
}
}
return committed, 0, nil
if !filled {
return ErrAlreadyProcessed
}
return nil
}
// Commit flushes the data stored in the internal membatch out to persistent
// storage, returning any occurred error.
func (s *Sync) Commit(dbw ethdb.Batch) error {
// Dump the membatch into a database dbw
for key, value := range s.membatch.batch {
if err := dbw.Put(key[:], value); err != nil {
return err
}
for key, value := range s.membatch.nodes {
rawdb.WriteTrieNode(dbw, key, value)
s.bloom.Add(key[:])
}
for key, value := range s.membatch.codes {
rawdb.WriteCode(dbw, key, value)
s.bloom.Add(key[:])
}
// Drop the membatch data and return
@ -237,21 +262,30 @@ func (s *Sync) Commit(dbw ethdb.Batch) error {
// Pending returns the number of state entries currently pending for download.
func (s *Sync) Pending() int {
return len(s.requests)
return len(s.nodeReqs) + len(s.codeReqs)
}
// schedule inserts a new state retrieval request into the fetch queue. If there
// is already a pending request for this node, the new request will be discarded
// and only a parent reference added to the old one.
func (s *Sync) schedule(req *request) {
var reqset = s.nodeReqs
if req.code {
reqset = s.codeReqs
}
// If we're already requesting this node, add a new reference and stop
if old, ok := s.requests[req.hash]; ok {
if old, ok := reqset[req.hash]; ok {
old.parents = append(old.parents, req.parents...)
return
}
// Schedule the request for future retrieval
reqset[req.hash] = req
// Schedule the request for future retrieval. This queue is shared
// by both node requests and code requests. It can happen that there
// is a trie node and code has same hash. In this case two elements
// with same hash and same or different depth will be pushed. But it's
// ok the worst case is the second response will be treated as duplicated.
s.queue.Push(req.hash, int64(req.depth))
s.requests[req.hash] = req
}
// children retrieves all the missing children of a state trie entry for future
@ -297,12 +331,14 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
if node, ok := (child.node).(hashNode); ok {
// Try to resolve the node from the local database
hash := common.BytesToHash(node)
if _, ok := s.membatch.batch[hash]; ok {
if s.membatch.hasNode(hash) {
continue
}
if s.bloom == nil || s.bloom.Contains(node) {
// Bloom filter says this might be a duplicate, double check
if ok, _ := s.database.Has(node); ok {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
if blob := rawdb.ReadTrieNode(s.database, common.BytesToHash(node)); len(blob) > 0 {
continue
}
// False positive, bump fault meter
@ -325,10 +361,13 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
// committed themselves.
func (s *Sync) commit(req *request) (err error) {
// Write the node content to the membatch
s.membatch.batch[req.hash] = req.data
delete(s.requests, req.hash)
if req.code {
s.membatch.codes[req.hash] = req.data
delete(s.codeReqs, req.hash)
} else {
s.membatch.nodes[req.hash] = req.data
delete(s.nodeReqs, req.hash)
}
// Check all parents for completion
for _, parent := range req.parents {
parent.deps--

View File

@ -25,6 +25,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@ -41,8 +42,8 @@ var (
)
// syncBloomHasher is a wrapper around a byte blob to satisfy the interface API
// requirements of the bloom library used. It's used to convert a trie hash into
// a 64 bit mini hash.
// requirements of the bloom library used. It's used to convert a trie hash or
// contract code hash into a 64 bit mini hash.
type syncBloomHasher []byte
func (f syncBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
@ -53,9 +54,9 @@ func (f syncBloomHasher) Size() int { return 8 }
func (f syncBloomHasher) Sum64() uint64 { return binary.BigEndian.Uint64(f) }
// SyncBloom is a bloom filter used during fast sync to quickly decide if a trie
// node already exists on disk or not. It self populates from the provided disk
// database on creation in a background thread and will only start returning live
// results once that's finished.
// node or contract code already exists on disk or not. It self populates from the
// provided disk database on creation in a background thread and will only start
// returning live results once that's finished.
type SyncBloom struct {
bloom *bloomfilter.Filter
inited uint32
@ -107,10 +108,16 @@ func (b *SyncBloom) init(database ethdb.Iteratee) {
)
for it.Next() && atomic.LoadUint32(&b.closed) == 0 {
// If the database entry is a trie node, add it to the bloom
if key := it.Key(); len(key) == common.HashLength {
key := it.Key()
if len(key) == common.HashLength {
b.bloom.Add(syncBloomHasher(key))
bloomLoadMeter.Mark(1)
}
// If the database entry is a contract code, add it to the bloom
if ok, hash := rawdb.IsCodeKey(key); ok {
b.bloom.Add(syncBloomHasher(hash))
bloomLoadMeter.Mark(1)
}
// If enough time elapsed since the last iterator swap, restart
if time.Since(swap) > 8*time.Second {
key := common.CopyBytes(it.Key())

View File

@ -124,8 +124,10 @@ func testIterativeSync(t *testing.T, count int) {
}
results[i] = SyncResult{hash, data}
}
if _, index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
for _, result := range results {
if err := sched.Process(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
batch := diskdb.NewBatch()
if err := sched.Commit(batch); err != nil {
@ -160,8 +162,10 @@ func TestIterativeDelayedSync(t *testing.T) {
}
results[i] = SyncResult{hash, data}
}
if _, index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
for _, result := range results {
if err := sched.Process(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
batch := diskdb.NewBatch()
if err := sched.Commit(batch); err != nil {
@ -204,8 +208,10 @@ func testIterativeRandomSync(t *testing.T, count int) {
results = append(results, SyncResult{hash, data})
}
// Feed the retrieved results back and queue new tasks
if _, index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
for _, result := range results {
if err := sched.Process(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
batch := diskdb.NewBatch()
if err := sched.Commit(batch); err != nil {
@ -251,8 +257,10 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
}
}
// Feed the retrieved results back and queue new tasks
if _, index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
for _, result := range results {
if err := sched.Process(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
batch := diskdb.NewBatch()
if err := sched.Commit(batch); err != nil {
@ -298,8 +306,10 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
results[i] = SyncResult{hash, data}
}
if _, index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
for _, result := range results {
if err := sched.Process(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
batch := diskdb.NewBatch()
if err := sched.Commit(batch); err != nil {
@ -336,8 +346,10 @@ func TestIncompleteSync(t *testing.T) {
results[i] = SyncResult{hash, data}
}
// Process each of the trie nodes
if _, index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
for _, result := range results {
if err := sched.Process(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
batch := diskdb.NewBatch()
if err := sched.Commit(batch); err != nil {

View File

@ -473,3 +473,9 @@ func (t *Trie) hashRoot(db *Database) (node, node, error) {
t.unhashed = 0
return hashed, cached, nil
}
// Reset drops the referenced root node and cleans all internal state.
func (t *Trie) Reset() {
t.root = nil
t.unhashed = 0
}