core, eth: split eth package, implement snap protocol (#21482)
This commit splits the eth package, separating the handling of eth and snap protocols. It also includes the capability to run snap sync (https://github.com/ethereum/devp2p/blob/master/caps/snap.md) , but does not enable it by default. Co-authored-by: Marius van der Wijden <m.vanderwijden@live.de> Co-authored-by: Martin Holst Swende <martin@swende.se>
This commit is contained in:
32
eth/protocols/snap/discovery.go
Normal file
32
eth/protocols/snap/discovery.go
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snap
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// enrEntry is the ENR entry which advertises `snap` protocol on the discovery.
|
||||
type enrEntry struct {
|
||||
// Ignore additional fields (for forward compatibility).
|
||||
Rest []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
|
||||
// ENRKey implements enr.Entry.
|
||||
func (e enrEntry) ENRKey() string {
|
||||
return "snap"
|
||||
}
|
490
eth/protocols/snap/handler.go
Normal file
490
eth/protocols/snap/handler.go
Normal file
@ -0,0 +1,490 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
const (
|
||||
// softResponseLimit is the target maximum size of replies to data retrievals.
|
||||
softResponseLimit = 2 * 1024 * 1024
|
||||
|
||||
// maxCodeLookups is the maximum number of bytecodes to serve. This number is
|
||||
// there to limit the number of disk lookups.
|
||||
maxCodeLookups = 1024
|
||||
|
||||
// stateLookupSlack defines the ratio by how much a state response can exceed
|
||||
// the requested limit in order to try and avoid breaking up contracts into
|
||||
// multiple packages and proving them.
|
||||
stateLookupSlack = 0.1
|
||||
|
||||
// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
|
||||
// number is there to limit the number of disk lookups.
|
||||
maxTrieNodeLookups = 1024
|
||||
)
|
||||
|
||||
// Handler is a callback to invoke from an outside runner after the boilerplate
|
||||
// exchanges have passed.
|
||||
type Handler func(peer *Peer) error
|
||||
|
||||
// Backend defines the data retrieval methods to serve remote requests and the
|
||||
// callback methods to invoke on remote deliveries.
|
||||
type Backend interface {
|
||||
// Chain retrieves the blockchain object to serve data.
|
||||
Chain() *core.BlockChain
|
||||
|
||||
// RunPeer is invoked when a peer joins on the `eth` protocol. The handler
|
||||
// should do any peer maintenance work, handshakes and validations. If all
|
||||
// is passed, control should be given back to the `handler` to process the
|
||||
// inbound messages going forward.
|
||||
RunPeer(peer *Peer, handler Handler) error
|
||||
|
||||
// PeerInfo retrieves all known `snap` information about a peer.
|
||||
PeerInfo(id enode.ID) interface{}
|
||||
|
||||
// Handle is a callback to be invoked when a data packet is received from
|
||||
// the remote peer. Only packets not consumed by the protocol handler will
|
||||
// be forwarded to the backend.
|
||||
Handle(peer *Peer, packet Packet) error
|
||||
}
|
||||
|
||||
// MakeProtocols constructs the P2P protocol definitions for `snap`.
|
||||
func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
|
||||
protocols := make([]p2p.Protocol, len(protocolVersions))
|
||||
for i, version := range protocolVersions {
|
||||
version := version // Closure
|
||||
|
||||
protocols[i] = p2p.Protocol{
|
||||
Name: protocolName,
|
||||
Version: version,
|
||||
Length: protocolLengths[version],
|
||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error {
|
||||
return handle(backend, peer)
|
||||
})
|
||||
},
|
||||
NodeInfo: func() interface{} {
|
||||
return nodeInfo(backend.Chain())
|
||||
},
|
||||
PeerInfo: func(id enode.ID) interface{} {
|
||||
return backend.PeerInfo(id)
|
||||
},
|
||||
Attributes: []enr.Entry{&enrEntry{}},
|
||||
DialCandidates: dnsdisc,
|
||||
}
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// handle is the callback invoked to manage the life cycle of a `snap` peer.
|
||||
// When this function terminates, the peer is disconnected.
|
||||
func handle(backend Backend, peer *Peer) error {
|
||||
for {
|
||||
if err := handleMessage(backend, peer); err != nil {
|
||||
peer.Log().Debug("Message handling failed in `snap`", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleMessage is invoked whenever an inbound message is received from a
|
||||
// remote peer on the `spap` protocol. The remote connection is torn down upon
|
||||
// returning any error.
|
||||
func handleMessage(backend Backend, peer *Peer) error {
|
||||
// Read the next message from the remote peer, and ensure it's fully consumed
|
||||
msg, err := peer.rw.ReadMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.Size > maxMessageSize {
|
||||
return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
|
||||
}
|
||||
defer msg.Discard()
|
||||
|
||||
// Handle the message depending on its contents
|
||||
switch {
|
||||
case msg.Code == GetAccountRangeMsg:
|
||||
// Decode the account retrieval request
|
||||
var req GetAccountRangePacket
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if req.Bytes > softResponseLimit {
|
||||
req.Bytes = softResponseLimit
|
||||
}
|
||||
// Retrieve the requested state and bail out if non existent
|
||||
tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
||||
}
|
||||
it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin)
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
||||
}
|
||||
// Iterate over the requested range and pile accounts up
|
||||
var (
|
||||
accounts []*AccountData
|
||||
size uint64
|
||||
last common.Hash
|
||||
)
|
||||
for it.Next() && size < req.Bytes {
|
||||
hash, account := it.Hash(), common.CopyBytes(it.Account())
|
||||
|
||||
// Track the returned interval for the Merkle proofs
|
||||
last = hash
|
||||
|
||||
// Assemble the reply item
|
||||
size += uint64(common.HashLength + len(account))
|
||||
accounts = append(accounts, &AccountData{
|
||||
Hash: hash,
|
||||
Body: account,
|
||||
})
|
||||
// If we've exceeded the request threshold, abort
|
||||
if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
|
||||
// Generate the Merkle proofs for the first and last account
|
||||
proof := light.NewNodeSet()
|
||||
if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
|
||||
log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
||||
}
|
||||
if last != (common.Hash{}) {
|
||||
if err := tr.Prove(last[:], 0, proof); err != nil {
|
||||
log.Warn("Failed to prove account range", "last", last, "err", err)
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
||||
}
|
||||
}
|
||||
var proofs [][]byte
|
||||
for _, blob := range proof.NodeList() {
|
||||
proofs = append(proofs, blob)
|
||||
}
|
||||
// Send back anything accumulated
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
|
||||
ID: req.ID,
|
||||
Accounts: accounts,
|
||||
Proof: proofs,
|
||||
})
|
||||
|
||||
case msg.Code == AccountRangeMsg:
|
||||
// A range of accounts arrived to one of our previous requests
|
||||
res := new(AccountRangePacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Ensure the range is monotonically increasing
|
||||
for i := 1; i < len(res.Accounts); i++ {
|
||||
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
|
||||
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
|
||||
}
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetStorageRangesMsg:
|
||||
// Decode the storage retrieval request
|
||||
var req GetStorageRangesPacket
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if req.Bytes > softResponseLimit {
|
||||
req.Bytes = softResponseLimit
|
||||
}
|
||||
// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
|
||||
// TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user
|
||||
// TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
|
||||
|
||||
// Calculate the hard limit at which to abort, even if mid storage trie
|
||||
hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
|
||||
|
||||
// Retrieve storage ranges until the packet limit is reached
|
||||
var (
|
||||
slots [][]*StorageData
|
||||
proofs [][]byte
|
||||
size uint64
|
||||
)
|
||||
for _, account := range req.Accounts {
|
||||
// If we've exceeded the requested data limit, abort without opening
|
||||
// a new storage range (that we'd need to prove due to exceeded size)
|
||||
if size >= req.Bytes {
|
||||
break
|
||||
}
|
||||
// The first account might start from a different origin and end sooner
|
||||
var origin common.Hash
|
||||
if len(req.Origin) > 0 {
|
||||
origin, req.Origin = common.BytesToHash(req.Origin), nil
|
||||
}
|
||||
var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
if len(req.Limit) > 0 {
|
||||
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
||||
}
|
||||
// Retrieve the requested state and bail out if non existent
|
||||
it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin)
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
// Iterate over the requested range and pile slots up
|
||||
var (
|
||||
storage []*StorageData
|
||||
last common.Hash
|
||||
)
|
||||
for it.Next() && size < hardLimit {
|
||||
hash, slot := it.Hash(), common.CopyBytes(it.Slot())
|
||||
|
||||
// Track the returned interval for the Merkle proofs
|
||||
last = hash
|
||||
|
||||
// Assemble the reply item
|
||||
size += uint64(common.HashLength + len(slot))
|
||||
storage = append(storage, &StorageData{
|
||||
Hash: hash,
|
||||
Body: slot,
|
||||
})
|
||||
// If we've exceeded the request threshold, abort
|
||||
if bytes.Compare(hash[:], limit[:]) >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
slots = append(slots, storage)
|
||||
it.Release()
|
||||
|
||||
// Generate the Merkle proofs for the first and last storage slot, but
|
||||
// only if the response was capped. If the entire storage trie included
|
||||
// in the response, no need for any proofs.
|
||||
if origin != (common.Hash{}) || size >= hardLimit {
|
||||
// Request started at a non-zero hash or was capped prematurely, add
|
||||
// the endpoint Merkle proofs
|
||||
accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
var acc state.Account
|
||||
if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB())
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
proof := light.NewNodeSet()
|
||||
if err := stTrie.Prove(origin[:], 0, proof); err != nil {
|
||||
log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
if last != (common.Hash{}) {
|
||||
if err := stTrie.Prove(last[:], 0, proof); err != nil {
|
||||
log.Warn("Failed to prove storage range", "last", last, "err", err)
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
}
|
||||
for _, blob := range proof.NodeList() {
|
||||
proofs = append(proofs, blob)
|
||||
}
|
||||
// Proof terminates the reply as proofs are only added if a node
|
||||
// refuses to serve more data (exception when a contract fetch is
|
||||
// finishing, but that's that).
|
||||
break
|
||||
}
|
||||
}
|
||||
// Send back anything accumulated
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
|
||||
ID: req.ID,
|
||||
Slots: slots,
|
||||
Proof: proofs,
|
||||
})
|
||||
|
||||
case msg.Code == StorageRangesMsg:
|
||||
// A range of storage slots arrived to one of our previous requests
|
||||
res := new(StorageRangesPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Ensure the ranges ae monotonically increasing
|
||||
for i, slots := range res.Slots {
|
||||
for j := 1; j < len(slots); j++ {
|
||||
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
|
||||
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetByteCodesMsg:
|
||||
// Decode bytecode retrieval request
|
||||
var req GetByteCodesPacket
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if req.Bytes > softResponseLimit {
|
||||
req.Bytes = softResponseLimit
|
||||
}
|
||||
if len(req.Hashes) > maxCodeLookups {
|
||||
req.Hashes = req.Hashes[:maxCodeLookups]
|
||||
}
|
||||
// Retrieve bytecodes until the packet size limit is reached
|
||||
var (
|
||||
codes [][]byte
|
||||
bytes uint64
|
||||
)
|
||||
for _, hash := range req.Hashes {
|
||||
if hash == emptyCode {
|
||||
// Peers should not request the empty code, but if they do, at
|
||||
// least sent them back a correct response without db lookups
|
||||
codes = append(codes, []byte{})
|
||||
} else if blob, err := backend.Chain().ContractCode(hash); err == nil {
|
||||
codes = append(codes, blob)
|
||||
bytes += uint64(len(blob))
|
||||
}
|
||||
if bytes > req.Bytes {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Send back anything accumulated
|
||||
return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
|
||||
ID: req.ID,
|
||||
Codes: codes,
|
||||
})
|
||||
|
||||
case msg.Code == ByteCodesMsg:
|
||||
// A batch of byte codes arrived to one of our previous requests
|
||||
res := new(ByteCodesPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetTrieNodesMsg:
|
||||
// Decode trie node retrieval request
|
||||
var req GetTrieNodesPacket
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if req.Bytes > softResponseLimit {
|
||||
req.Bytes = softResponseLimit
|
||||
}
|
||||
// Make sure we have the state associated with the request
|
||||
triedb := backend.Chain().StateCache().TrieDB()
|
||||
|
||||
accTrie, err := trie.NewSecure(req.Root, triedb)
|
||||
if err != nil {
|
||||
// We don't have the requested state available, bail out
|
||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
|
||||
}
|
||||
snap := backend.Chain().Snapshots().Snapshot(req.Root)
|
||||
if snap == nil {
|
||||
// We don't have the requested state snapshotted yet, bail out.
|
||||
// In reality we could still serve using the account and storage
|
||||
// tries only, but let's protect the node a bit while it's doing
|
||||
// snapshot generation.
|
||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
|
||||
}
|
||||
// Retrieve trie nodes until the packet size limit is reached
|
||||
var (
|
||||
nodes [][]byte
|
||||
bytes uint64
|
||||
loads int // Trie hash expansions to cound database reads
|
||||
)
|
||||
for _, pathset := range req.Paths {
|
||||
switch len(pathset) {
|
||||
case 0:
|
||||
// Ensure we penalize invalid requests
|
||||
return fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
|
||||
|
||||
case 1:
|
||||
// If we're only retrieving an account trie node, fetch it directly
|
||||
blob, resolved, err := accTrie.TryGetNode(pathset[0])
|
||||
loads += resolved // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
nodes = append(nodes, blob)
|
||||
bytes += uint64(len(blob))
|
||||
|
||||
default:
|
||||
// Storage slots requested, open the storage trie and retrieve from there
|
||||
account, err := snap.Account(common.BytesToHash(pathset[0]))
|
||||
loads++ // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb)
|
||||
loads++ // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
for _, path := range pathset[1:] {
|
||||
blob, resolved, err := stTrie.TryGetNode(path)
|
||||
loads += resolved // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
nodes = append(nodes, blob)
|
||||
bytes += uint64(len(blob))
|
||||
|
||||
// Sanity check limits to avoid DoS on the store trie loads
|
||||
if bytes > req.Bytes || loads > maxTrieNodeLookups {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// Abort request processing if we've exceeded our limits
|
||||
if bytes > req.Bytes || loads > maxTrieNodeLookups {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Send back anything accumulated
|
||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
|
||||
ID: req.ID,
|
||||
Nodes: nodes,
|
||||
})
|
||||
|
||||
case msg.Code == TrieNodesMsg:
|
||||
// A batch of trie nodes arrived to one of our previous requests
|
||||
res := new(TrieNodesPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// NodeInfo represents a short summary of the `snap` sub-protocol metadata
|
||||
// known about the host peer.
|
||||
type NodeInfo struct{}
|
||||
|
||||
// nodeInfo retrieves some `snap` protocol metadata about the running host node.
|
||||
func nodeInfo(chain *core.BlockChain) *NodeInfo {
|
||||
return &NodeInfo{}
|
||||
}
|
111
eth/protocols/snap/peer.go
Normal file
111
eth/protocols/snap/peer.go
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snap
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
// Peer is a collection of relevant information we have about a `snap` peer.
|
||||
type Peer struct {
|
||||
id string // Unique ID for the peer, cached
|
||||
|
||||
*p2p.Peer // The embedded P2P package peer
|
||||
rw p2p.MsgReadWriter // Input/output streams for snap
|
||||
version uint // Protocol version negotiated
|
||||
|
||||
logger log.Logger // Contextual logger with the peer id injected
|
||||
}
|
||||
|
||||
// newPeer create a wrapper for a network connection and negotiated protocol
|
||||
// version.
|
||||
func newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer {
|
||||
id := p.ID().String()
|
||||
return &Peer{
|
||||
id: id,
|
||||
Peer: p,
|
||||
rw: rw,
|
||||
version: version,
|
||||
logger: log.New("peer", id[:8]),
|
||||
}
|
||||
}
|
||||
|
||||
// ID retrieves the peer's unique identifier.
|
||||
func (p *Peer) ID() string {
|
||||
return p.id
|
||||
}
|
||||
|
||||
// Version retrieves the peer's negoatiated `snap` protocol version.
|
||||
func (p *Peer) Version() uint {
|
||||
return p.version
|
||||
}
|
||||
|
||||
// RequestAccountRange fetches a batch of accounts rooted in a specific account
|
||||
// trie, starting with the origin.
|
||||
func (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit common.Hash, bytes uint64) error {
|
||||
p.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
|
||||
return p2p.Send(p.rw, GetAccountRangeMsg, &GetAccountRangePacket{
|
||||
ID: id,
|
||||
Root: root,
|
||||
Origin: origin,
|
||||
Limit: limit,
|
||||
Bytes: bytes,
|
||||
})
|
||||
}
|
||||
|
||||
// RequestStorageRange fetches a batch of storage slots belonging to one or more
|
||||
// accounts. If slots from only one accout is requested, an origin marker may also
|
||||
// be used to retrieve from there.
|
||||
func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
|
||||
if len(accounts) == 1 && origin != nil {
|
||||
p.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
|
||||
} else {
|
||||
p.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
|
||||
}
|
||||
return p2p.Send(p.rw, GetStorageRangesMsg, &GetStorageRangesPacket{
|
||||
ID: id,
|
||||
Root: root,
|
||||
Accounts: accounts,
|
||||
Origin: origin,
|
||||
Limit: limit,
|
||||
Bytes: bytes,
|
||||
})
|
||||
}
|
||||
|
||||
// RequestByteCodes fetches a batch of bytecodes by hash.
|
||||
func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
|
||||
p.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
|
||||
return p2p.Send(p.rw, GetByteCodesMsg, &GetByteCodesPacket{
|
||||
ID: id,
|
||||
Hashes: hashes,
|
||||
Bytes: bytes,
|
||||
})
|
||||
}
|
||||
|
||||
// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
|
||||
// a specificstate trie.
|
||||
func (p *Peer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
|
||||
p.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
|
||||
return p2p.Send(p.rw, GetTrieNodesMsg, &GetTrieNodesPacket{
|
||||
ID: id,
|
||||
Root: root,
|
||||
Paths: paths,
|
||||
Bytes: bytes,
|
||||
})
|
||||
}
|
218
eth/protocols/snap/protocol.go
Normal file
218
eth/protocols/snap/protocol.go
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// Constants to match up protocol versions and messages
|
||||
const (
|
||||
snap1 = 1
|
||||
)
|
||||
|
||||
// protocolName is the official short name of the `snap` protocol used during
|
||||
// devp2p capability negotiation.
|
||||
const protocolName = "snap"
|
||||
|
||||
// protocolVersions are the supported versions of the `snap` protocol (first
|
||||
// is primary).
|
||||
var protocolVersions = []uint{snap1}
|
||||
|
||||
// protocolLengths are the number of implemented message corresponding to
|
||||
// different protocol versions.
|
||||
var protocolLengths = map[uint]uint64{snap1: 8}
|
||||
|
||||
// maxMessageSize is the maximum cap on the size of a protocol message.
|
||||
const maxMessageSize = 10 * 1024 * 1024
|
||||
|
||||
const (
|
||||
GetAccountRangeMsg = 0x00
|
||||
AccountRangeMsg = 0x01
|
||||
GetStorageRangesMsg = 0x02
|
||||
StorageRangesMsg = 0x03
|
||||
GetByteCodesMsg = 0x04
|
||||
ByteCodesMsg = 0x05
|
||||
GetTrieNodesMsg = 0x06
|
||||
TrieNodesMsg = 0x07
|
||||
)
|
||||
|
||||
var (
|
||||
errMsgTooLarge = errors.New("message too long")
|
||||
errDecode = errors.New("invalid message")
|
||||
errInvalidMsgCode = errors.New("invalid message code")
|
||||
errBadRequest = errors.New("bad request")
|
||||
)
|
||||
|
||||
// Packet represents a p2p message in the `snap` protocol.
|
||||
type Packet interface {
|
||||
Name() string // Name returns a string corresponding to the message type.
|
||||
Kind() byte // Kind returns the message type.
|
||||
}
|
||||
|
||||
// GetAccountRangePacket represents an account query.
|
||||
type GetAccountRangePacket struct {
|
||||
ID uint64 // Request ID to match up responses with
|
||||
Root common.Hash // Root hash of the account trie to serve
|
||||
Origin common.Hash // Hash of the first account to retrieve
|
||||
Limit common.Hash // Hash of the last account to retrieve
|
||||
Bytes uint64 // Soft limit at which to stop returning data
|
||||
}
|
||||
|
||||
// AccountRangePacket represents an account query response.
|
||||
type AccountRangePacket struct {
|
||||
ID uint64 // ID of the request this is a response for
|
||||
Accounts []*AccountData // List of consecutive accounts from the trie
|
||||
Proof [][]byte // List of trie nodes proving the account range
|
||||
}
|
||||
|
||||
// AccountData represents a single account in a query response.
|
||||
type AccountData struct {
|
||||
Hash common.Hash // Hash of the account
|
||||
Body rlp.RawValue // Account body in slim format
|
||||
}
|
||||
|
||||
// Unpack retrieves the accounts from the range packet and converts from slim
|
||||
// wire representation to consensus format. The returned data is RLP encoded
|
||||
// since it's expected to be serialized to disk without further interpretation.
|
||||
//
|
||||
// Note, this method does a round of RLP decoding and reencoding, so only use it
|
||||
// once and cache the results if need be. Ideally discard the packet afterwards
|
||||
// to not double the memory use.
|
||||
func (p *AccountRangePacket) Unpack() ([]common.Hash, [][]byte, error) {
|
||||
var (
|
||||
hashes = make([]common.Hash, len(p.Accounts))
|
||||
accounts = make([][]byte, len(p.Accounts))
|
||||
)
|
||||
for i, acc := range p.Accounts {
|
||||
val, err := snapshot.FullAccountRLP(acc.Body)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid account %x: %v", acc.Body, err)
|
||||
}
|
||||
hashes[i], accounts[i] = acc.Hash, val
|
||||
}
|
||||
return hashes, accounts, nil
|
||||
}
|
||||
|
||||
// GetStorageRangesPacket represents an storage slot query.
|
||||
type GetStorageRangesPacket struct {
|
||||
ID uint64 // Request ID to match up responses with
|
||||
Root common.Hash // Root hash of the account trie to serve
|
||||
Accounts []common.Hash // Account hashes of the storage tries to serve
|
||||
Origin []byte // Hash of the first storage slot to retrieve (large contract mode)
|
||||
Limit []byte // Hash of the last storage slot to retrieve (large contract mode)
|
||||
Bytes uint64 // Soft limit at which to stop returning data
|
||||
}
|
||||
|
||||
// StorageRangesPacket represents a storage slot query response.
|
||||
type StorageRangesPacket struct {
|
||||
ID uint64 // ID of the request this is a response for
|
||||
Slots [][]*StorageData // Lists of consecutive storage slots for the requested accounts
|
||||
Proof [][]byte // Merkle proofs for the *last* slot range, if it's incomplete
|
||||
}
|
||||
|
||||
// StorageData represents a single storage slot in a query response.
|
||||
type StorageData struct {
|
||||
Hash common.Hash // Hash of the storage slot
|
||||
Body []byte // Data content of the slot
|
||||
}
|
||||
|
||||
// Unpack retrieves the storage slots from the range packet and returns them in
|
||||
// a split flat format that's more consistent with the internal data structures.
|
||||
func (p *StorageRangesPacket) Unpack() ([][]common.Hash, [][][]byte) {
|
||||
var (
|
||||
hashset = make([][]common.Hash, len(p.Slots))
|
||||
slotset = make([][][]byte, len(p.Slots))
|
||||
)
|
||||
for i, slots := range p.Slots {
|
||||
hashset[i] = make([]common.Hash, len(slots))
|
||||
slotset[i] = make([][]byte, len(slots))
|
||||
for j, slot := range slots {
|
||||
hashset[i][j] = slot.Hash
|
||||
slotset[i][j] = slot.Body
|
||||
}
|
||||
}
|
||||
return hashset, slotset
|
||||
}
|
||||
|
||||
// GetByteCodesPacket represents a contract bytecode query.
|
||||
type GetByteCodesPacket struct {
|
||||
ID uint64 // Request ID to match up responses with
|
||||
Hashes []common.Hash // Code hashes to retrieve the code for
|
||||
Bytes uint64 // Soft limit at which to stop returning data
|
||||
}
|
||||
|
||||
// ByteCodesPacket represents a contract bytecode query response.
|
||||
type ByteCodesPacket struct {
|
||||
ID uint64 // ID of the request this is a response for
|
||||
Codes [][]byte // Requested contract bytecodes
|
||||
}
|
||||
|
||||
// GetTrieNodesPacket represents a state trie node query.
|
||||
type GetTrieNodesPacket struct {
|
||||
ID uint64 // Request ID to match up responses with
|
||||
Root common.Hash // Root hash of the account trie to serve
|
||||
Paths []TrieNodePathSet // Trie node hashes to retrieve the nodes for
|
||||
Bytes uint64 // Soft limit at which to stop returning data
|
||||
}
|
||||
|
||||
// TrieNodePathSet is a list of trie node paths to retrieve. A naive way to
|
||||
// represent trie nodes would be a simple list of `account || storage` path
|
||||
// segments concatenated, but that would be very wasteful on the network.
|
||||
//
|
||||
// Instead, this array special cases the first element as the path in the
|
||||
// account trie and the remaining elements as paths in the storage trie. To
|
||||
// address an account node, the slice should have a length of 1 consisting
|
||||
// of only the account path. There's no need to be able to address both an
|
||||
// account node and a storage node in the same request as it cannot happen
|
||||
// that a slot is accessed before the account path is fully expanded.
|
||||
type TrieNodePathSet [][]byte
|
||||
|
||||
// TrieNodesPacket represents a state trie node query response.
|
||||
type TrieNodesPacket struct {
|
||||
ID uint64 // ID of the request this is a response for
|
||||
Nodes [][]byte // Requested state trie nodes
|
||||
}
|
||||
|
||||
func (*GetAccountRangePacket) Name() string { return "GetAccountRange" }
|
||||
func (*GetAccountRangePacket) Kind() byte { return GetAccountRangeMsg }
|
||||
|
||||
func (*AccountRangePacket) Name() string { return "AccountRange" }
|
||||
func (*AccountRangePacket) Kind() byte { return AccountRangeMsg }
|
||||
|
||||
func (*GetStorageRangesPacket) Name() string { return "GetStorageRanges" }
|
||||
func (*GetStorageRangesPacket) Kind() byte { return GetStorageRangesMsg }
|
||||
|
||||
func (*StorageRangesPacket) Name() string { return "StorageRanges" }
|
||||
func (*StorageRangesPacket) Kind() byte { return StorageRangesMsg }
|
||||
|
||||
func (*GetByteCodesPacket) Name() string { return "GetByteCodes" }
|
||||
func (*GetByteCodesPacket) Kind() byte { return GetByteCodesMsg }
|
||||
|
||||
func (*ByteCodesPacket) Name() string { return "ByteCodes" }
|
||||
func (*ByteCodesPacket) Kind() byte { return ByteCodesMsg }
|
||||
|
||||
func (*GetTrieNodesPacket) Name() string { return "GetTrieNodes" }
|
||||
func (*GetTrieNodesPacket) Kind() byte { return GetTrieNodesMsg }
|
||||
|
||||
func (*TrieNodesPacket) Name() string { return "TrieNodes" }
|
||||
func (*TrieNodesPacket) Kind() byte { return TrieNodesMsg }
|
2481
eth/protocols/snap/sync.go
Normal file
2481
eth/protocols/snap/sync.go
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user