core, eth: split eth package, implement snap protocol (#21482)
This commit splits the eth package, separating the handling of eth and snap protocols. It also includes the capability to run snap sync (https://github.com/ethereum/devp2p/blob/master/caps/snap.md) , but does not enable it by default. Co-authored-by: Marius van der Wijden <m.vanderwijden@live.de> Co-authored-by: Martin Holst Swende <martin@swende.se>
This commit is contained in:
@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -38,7 +39,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request
|
||||
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
|
||||
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
|
||||
MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
|
||||
@ -89,7 +89,7 @@ var (
|
||||
errCancelContentProcessing = errors.New("content processing canceled (requested)")
|
||||
errCanceled = errors.New("syncing canceled (requested)")
|
||||
errNoSyncActive = errors.New("no sync active")
|
||||
errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 63)")
|
||||
errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 64)")
|
||||
)
|
||||
|
||||
type Downloader struct {
|
||||
@ -131,20 +131,22 @@ type Downloader struct {
|
||||
ancientLimit uint64 // The maximum block number which can be regarded as ancient data.
|
||||
|
||||
// Channels
|
||||
headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
|
||||
bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies
|
||||
receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts
|
||||
bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks
|
||||
receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks
|
||||
headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks
|
||||
headerCh chan dataPack // Channel receiving inbound block headers
|
||||
bodyCh chan dataPack // Channel receiving inbound block bodies
|
||||
receiptCh chan dataPack // Channel receiving inbound receipts
|
||||
bodyWakeCh chan bool // Channel to signal the block body fetcher of new tasks
|
||||
receiptWakeCh chan bool // Channel to signal the receipt fetcher of new tasks
|
||||
headerProcCh chan []*types.Header // Channel to feed the header processor new tasks
|
||||
|
||||
// State sync
|
||||
pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
|
||||
pivotLock sync.RWMutex // Lock protecting pivot header reads from updates
|
||||
|
||||
snapSync bool // Whether to run state sync over the snap protocol
|
||||
SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now
|
||||
stateSyncStart chan *stateSync
|
||||
trackStateReq chan *stateReq
|
||||
stateCh chan dataPack // [eth/63] Channel receiving inbound node state data
|
||||
stateCh chan dataPack // Channel receiving inbound node state data
|
||||
|
||||
// Cancellation and termination
|
||||
cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop)
|
||||
@ -237,6 +239,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
|
||||
headerProcCh: make(chan []*types.Header, 1),
|
||||
quitCh: make(chan struct{}),
|
||||
stateCh: make(chan dataPack),
|
||||
SnapSyncer: snap.NewSyncer(stateDb, stateBloom),
|
||||
stateSyncStart: make(chan *stateSync),
|
||||
syncStatsState: stateSyncStats{
|
||||
processed: rawdb.ReadFastTrieProgress(stateDb),
|
||||
@ -286,19 +289,16 @@ func (d *Downloader) Synchronising() bool {
|
||||
return atomic.LoadInt32(&d.synchronising) > 0
|
||||
}
|
||||
|
||||
// SyncBloomContains tests if the syncbloom filter contains the given hash:
|
||||
// - false: the bloom definitely does not contain hash
|
||||
// - true: the bloom maybe contains hash
|
||||
//
|
||||
// While the bloom is being initialized (or is closed), all queries will return true.
|
||||
func (d *Downloader) SyncBloomContains(hash []byte) bool {
|
||||
return d.stateBloom == nil || d.stateBloom.Contains(hash)
|
||||
}
|
||||
|
||||
// RegisterPeer injects a new download peer into the set of block source to be
|
||||
// used for fetching hashes and blocks from.
|
||||
func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
|
||||
logger := log.New("peer", id)
|
||||
func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
|
||||
var logger log.Logger
|
||||
if len(id) < 16 {
|
||||
// Tests use short IDs, don't choke on them
|
||||
logger = log.New("peer", id)
|
||||
} else {
|
||||
logger = log.New("peer", id[:16])
|
||||
}
|
||||
logger.Trace("Registering sync peer")
|
||||
if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
|
||||
logger.Error("Failed to register sync peer", "err", err)
|
||||
@ -310,7 +310,7 @@ func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
|
||||
}
|
||||
|
||||
// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
|
||||
func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error {
|
||||
func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
|
||||
return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
|
||||
}
|
||||
|
||||
@ -319,7 +319,13 @@ func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) e
|
||||
// the queue.
|
||||
func (d *Downloader) UnregisterPeer(id string) error {
|
||||
// Unregister the peer from the active peer set and revoke any fetch tasks
|
||||
logger := log.New("peer", id)
|
||||
var logger log.Logger
|
||||
if len(id) < 16 {
|
||||
// Tests use short IDs, don't choke on them
|
||||
logger = log.New("peer", id)
|
||||
} else {
|
||||
logger = log.New("peer", id[:16])
|
||||
}
|
||||
logger.Trace("Unregistering sync peer")
|
||||
if err := d.peers.Unregister(id); err != nil {
|
||||
logger.Error("Failed to unregister sync peer", "err", err)
|
||||
@ -381,6 +387,16 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
|
||||
if mode == FullSync && d.stateBloom != nil {
|
||||
d.stateBloom.Close()
|
||||
}
|
||||
// If snap sync was requested, create the snap scheduler and switch to fast
|
||||
// sync mode. Long term we could drop fast sync or merge the two together,
|
||||
// but until snap becomes prevalent, we should support both. TODO(karalabe).
|
||||
if mode == SnapSync {
|
||||
if !d.snapSync {
|
||||
log.Warn("Enabling snapshot sync prototype")
|
||||
d.snapSync = true
|
||||
}
|
||||
mode = FastSync
|
||||
}
|
||||
// Reset the queue, peer set and wake channels to clean any internal leftover state
|
||||
d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
|
||||
d.peers.Reset()
|
||||
@ -443,8 +459,8 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
|
||||
d.mux.Post(DoneEvent{latest})
|
||||
}
|
||||
}()
|
||||
if p.version < 63 {
|
||||
return errTooOld
|
||||
if p.version < 64 {
|
||||
return fmt.Errorf("%w, peer version: %d", errTooOld, p.version)
|
||||
}
|
||||
mode := d.getMode()
|
||||
|
||||
@ -1910,27 +1926,53 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error {
|
||||
|
||||
// DeliverHeaders injects a new batch of block headers received from a remote
|
||||
// node into the download schedule.
|
||||
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
|
||||
return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
|
||||
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {
|
||||
return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
|
||||
}
|
||||
|
||||
// DeliverBodies injects a new batch of block bodies received from a remote node.
|
||||
func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
|
||||
return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
|
||||
func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error {
|
||||
return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
|
||||
}
|
||||
|
||||
// DeliverReceipts injects a new batch of receipts received from a remote node.
|
||||
func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) {
|
||||
return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
|
||||
func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error {
|
||||
return d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
|
||||
}
|
||||
|
||||
// DeliverNodeData injects a new batch of node state data received from a remote node.
|
||||
func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) {
|
||||
return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
|
||||
func (d *Downloader) DeliverNodeData(id string, data [][]byte) error {
|
||||
return d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
|
||||
}
|
||||
|
||||
// DeliverSnapPacket is invoked from a peer's message handler when it transmits a
|
||||
// data packet for the local node to consume.
|
||||
func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {
|
||||
switch packet := packet.(type) {
|
||||
case *snap.AccountRangePacket:
|
||||
hashes, accounts, err := packet.Unpack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)
|
||||
|
||||
case *snap.StorageRangesPacket:
|
||||
hashset, slotset := packet.Unpack()
|
||||
return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)
|
||||
|
||||
case *snap.ByteCodesPacket:
|
||||
return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)
|
||||
|
||||
case *snap.TrieNodesPacket:
|
||||
return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unexpected snap packet type: %T", packet)
|
||||
}
|
||||
}
|
||||
|
||||
// deliver injects a new batch of data received from a remote node.
|
||||
func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
|
||||
func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
|
||||
// Update the delivery metrics for both good and failed deliveries
|
||||
inMeter.Mark(int64(packet.Items()))
|
||||
defer func() {
|
||||
|
Reference in New Issue
Block a user