core, eth: split eth package, implement snap protocol (#21482)
This commit splits the eth package, separating the handling of eth and snap protocols. It also includes the capability to run snap sync (https://github.com/ethereum/devp2p/blob/master/caps/snap.md) , but does not enable it by default. Co-authored-by: Marius van der Wijden <m.vanderwijden@live.de> Co-authored-by: Martin Holst Swende <martin@swende.se>
This commit is contained in:
195
eth/protocols/eth/broadcast.go
Normal file
195
eth/protocols/eth/broadcast.go
Normal file
@ -0,0 +1,195 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// This is the target size for the packs of transactions or announcements. A
|
||||
// pack can get larger than this if a single transactions exceeds this size.
|
||||
maxTxPacketSize = 100 * 1024
|
||||
)
|
||||
|
||||
// blockPropagation is a block propagation event, waiting for its turn in the
|
||||
// broadcast queue.
|
||||
type blockPropagation struct {
|
||||
block *types.Block
|
||||
td *big.Int
|
||||
}
|
||||
|
||||
// broadcastBlocks is a write loop that multiplexes blocks and block accouncements
|
||||
// to the remote peer. The goal is to have an async writer that does not lock up
|
||||
// node internals and at the same time rate limits queued data.
|
||||
func (p *Peer) broadcastBlocks() {
|
||||
for {
|
||||
select {
|
||||
case prop := <-p.queuedBlocks:
|
||||
if err := p.SendNewBlock(prop.block, prop.td); err != nil {
|
||||
return
|
||||
}
|
||||
p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
|
||||
|
||||
case block := <-p.queuedBlockAnns:
|
||||
if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
|
||||
return
|
||||
}
|
||||
p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash())
|
||||
|
||||
case <-p.term:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// broadcastTransactions is a write loop that schedules transaction broadcasts
|
||||
// to the remote peer. The goal is to have an async writer that does not lock up
|
||||
// node internals and at the same time rate limits queued data.
|
||||
func (p *Peer) broadcastTransactions() {
|
||||
var (
|
||||
queue []common.Hash // Queue of hashes to broadcast as full transactions
|
||||
done chan struct{} // Non-nil if background broadcaster is running
|
||||
fail = make(chan error, 1) // Channel used to receive network error
|
||||
failed bool // Flag whether a send failed, discard everything onward
|
||||
)
|
||||
for {
|
||||
// If there's no in-flight broadcast running, check if a new one is needed
|
||||
if done == nil && len(queue) > 0 {
|
||||
// Pile transaction until we reach our allowed network limit
|
||||
var (
|
||||
hashes []common.Hash
|
||||
txs []*types.Transaction
|
||||
size common.StorageSize
|
||||
)
|
||||
for i := 0; i < len(queue) && size < maxTxPacketSize; i++ {
|
||||
if tx := p.txpool.Get(queue[i]); tx != nil {
|
||||
txs = append(txs, tx)
|
||||
size += tx.Size()
|
||||
}
|
||||
hashes = append(hashes, queue[i])
|
||||
}
|
||||
queue = queue[:copy(queue, queue[len(hashes):])]
|
||||
|
||||
// If there's anything available to transfer, fire up an async writer
|
||||
if len(txs) > 0 {
|
||||
done = make(chan struct{})
|
||||
go func() {
|
||||
if err := p.SendTransactions(txs); err != nil {
|
||||
fail <- err
|
||||
return
|
||||
}
|
||||
close(done)
|
||||
p.Log().Trace("Sent transactions", "count", len(txs))
|
||||
}()
|
||||
}
|
||||
}
|
||||
// Transfer goroutine may or may not have been started, listen for events
|
||||
select {
|
||||
case hashes := <-p.txBroadcast:
|
||||
// If the connection failed, discard all transaction events
|
||||
if failed {
|
||||
continue
|
||||
}
|
||||
// New batch of transactions to be broadcast, queue them (with cap)
|
||||
queue = append(queue, hashes...)
|
||||
if len(queue) > maxQueuedTxs {
|
||||
// Fancy copy and resize to ensure buffer doesn't grow indefinitely
|
||||
queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
|
||||
}
|
||||
|
||||
case <-done:
|
||||
done = nil
|
||||
|
||||
case <-fail:
|
||||
failed = true
|
||||
|
||||
case <-p.term:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// announceTransactions is a write loop that schedules transaction broadcasts
|
||||
// to the remote peer. The goal is to have an async writer that does not lock up
|
||||
// node internals and at the same time rate limits queued data.
|
||||
func (p *Peer) announceTransactions() {
|
||||
var (
|
||||
queue []common.Hash // Queue of hashes to announce as transaction stubs
|
||||
done chan struct{} // Non-nil if background announcer is running
|
||||
fail = make(chan error, 1) // Channel used to receive network error
|
||||
failed bool // Flag whether a send failed, discard everything onward
|
||||
)
|
||||
for {
|
||||
// If there's no in-flight announce running, check if a new one is needed
|
||||
if done == nil && len(queue) > 0 {
|
||||
// Pile transaction hashes until we reach our allowed network limit
|
||||
var (
|
||||
hashes []common.Hash
|
||||
pending []common.Hash
|
||||
size common.StorageSize
|
||||
)
|
||||
for i := 0; i < len(queue) && size < maxTxPacketSize; i++ {
|
||||
if p.txpool.Get(queue[i]) != nil {
|
||||
pending = append(pending, queue[i])
|
||||
size += common.HashLength
|
||||
}
|
||||
hashes = append(hashes, queue[i])
|
||||
}
|
||||
queue = queue[:copy(queue, queue[len(hashes):])]
|
||||
|
||||
// If there's anything available to transfer, fire up an async writer
|
||||
if len(pending) > 0 {
|
||||
done = make(chan struct{})
|
||||
go func() {
|
||||
if err := p.sendPooledTransactionHashes(pending); err != nil {
|
||||
fail <- err
|
||||
return
|
||||
}
|
||||
close(done)
|
||||
p.Log().Trace("Sent transaction announcements", "count", len(pending))
|
||||
}()
|
||||
}
|
||||
}
|
||||
// Transfer goroutine may or may not have been started, listen for events
|
||||
select {
|
||||
case hashes := <-p.txAnnounce:
|
||||
// If the connection failed, discard all transaction events
|
||||
if failed {
|
||||
continue
|
||||
}
|
||||
// New batch of transactions to be broadcast, queue them (with cap)
|
||||
queue = append(queue, hashes...)
|
||||
if len(queue) > maxQueuedTxAnns {
|
||||
// Fancy copy and resize to ensure buffer doesn't grow indefinitely
|
||||
queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
|
||||
}
|
||||
|
||||
case <-done:
|
||||
done = nil
|
||||
|
||||
case <-fail:
|
||||
failed = true
|
||||
|
||||
case <-p.term:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
65
eth/protocols/eth/discovery.go
Normal file
65
eth/protocols/eth/discovery.go
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// enrEntry is the ENR entry which advertises `eth` protocol on the discovery.
|
||||
type enrEntry struct {
|
||||
ForkID forkid.ID // Fork identifier per EIP-2124
|
||||
|
||||
// Ignore additional fields (for forward compatibility).
|
||||
Rest []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
|
||||
// ENRKey implements enr.Entry.
|
||||
func (e enrEntry) ENRKey() string {
|
||||
return "eth"
|
||||
}
|
||||
|
||||
// StartENRUpdater starts the `eth` ENR updater loop, which listens for chain
|
||||
// head events and updates the requested node record whenever a fork is passed.
|
||||
func StartENRUpdater(chain *core.BlockChain, ln *enode.LocalNode) {
|
||||
var newHead = make(chan core.ChainHeadEvent, 10)
|
||||
sub := chain.SubscribeChainHeadEvent(newHead)
|
||||
|
||||
go func() {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case <-newHead:
|
||||
ln.Set(currentENREntry(chain))
|
||||
case <-sub.Err():
|
||||
// Would be nice to sync with Stop, but there is no
|
||||
// good way to do that.
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// currentENREntry constructs an `eth` ENR entry based on the current state of the chain.
|
||||
func currentENREntry(chain *core.BlockChain) *enrEntry {
|
||||
return &enrEntry{
|
||||
ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), chain.CurrentHeader().Number.Uint64()),
|
||||
}
|
||||
}
|
512
eth/protocols/eth/handler.go
Normal file
512
eth/protocols/eth/handler.go
Normal file
@ -0,0 +1,512 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
const (
|
||||
// softResponseLimit is the target maximum size of replies to data retrievals.
|
||||
softResponseLimit = 2 * 1024 * 1024
|
||||
|
||||
// estHeaderSize is the approximate size of an RLP encoded block header.
|
||||
estHeaderSize = 500
|
||||
|
||||
// maxHeadersServe is the maximum number of block headers to serve. This number
|
||||
// is there to limit the number of disk lookups.
|
||||
maxHeadersServe = 1024
|
||||
|
||||
// maxBodiesServe is the maximum number of block bodies to serve. This number
|
||||
// is mostly there to limit the number of disk lookups. With 24KB block sizes
|
||||
// nowadays, the practical limit will always be softResponseLimit.
|
||||
maxBodiesServe = 1024
|
||||
|
||||
// maxNodeDataServe is the maximum number of state trie nodes to serve. This
|
||||
// number is there to limit the number of disk lookups.
|
||||
maxNodeDataServe = 1024
|
||||
|
||||
// maxReceiptsServe is the maximum number of block receipts to serve. This
|
||||
// number is mostly there to limit the number of disk lookups. With block
|
||||
// containing 200+ transactions nowadays, the practical limit will always
|
||||
// be softResponseLimit.
|
||||
maxReceiptsServe = 1024
|
||||
)
|
||||
|
||||
// Handler is a callback to invoke from an outside runner after the boilerplate
|
||||
// exchanges have passed.
|
||||
type Handler func(peer *Peer) error
|
||||
|
||||
// Backend defines the data retrieval methods to serve remote requests and the
|
||||
// callback methods to invoke on remote deliveries.
|
||||
type Backend interface {
|
||||
// Chain retrieves the blockchain object to serve data.
|
||||
Chain() *core.BlockChain
|
||||
|
||||
// StateBloom retrieves the bloom filter - if any - for state trie nodes.
|
||||
StateBloom() *trie.SyncBloom
|
||||
|
||||
// TxPool retrieves the transaction pool object to serve data.
|
||||
TxPool() TxPool
|
||||
|
||||
// AcceptTxs retrieves whether transaction processing is enabled on the node
|
||||
// or if inbound transactions should simply be dropped.
|
||||
AcceptTxs() bool
|
||||
|
||||
// RunPeer is invoked when a peer joins on the `eth` protocol. The handler
|
||||
// should do any peer maintenance work, handshakes and validations. If all
|
||||
// is passed, control should be given back to the `handler` to process the
|
||||
// inbound messages going forward.
|
||||
RunPeer(peer *Peer, handler Handler) error
|
||||
|
||||
// PeerInfo retrieves all known `eth` information about a peer.
|
||||
PeerInfo(id enode.ID) interface{}
|
||||
|
||||
// Handle is a callback to be invoked when a data packet is received from
|
||||
// the remote peer. Only packets not consumed by the protocol handler will
|
||||
// be forwarded to the backend.
|
||||
Handle(peer *Peer, packet Packet) error
|
||||
}
|
||||
|
||||
// TxPool defines the methods needed by the protocol handler to serve transactions.
|
||||
type TxPool interface {
|
||||
// Get retrieves the the transaction from the local txpool with the given hash.
|
||||
Get(hash common.Hash) *types.Transaction
|
||||
}
|
||||
|
||||
// MakeProtocols constructs the P2P protocol definitions for `eth`.
|
||||
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
|
||||
protocols := make([]p2p.Protocol, len(protocolVersions))
|
||||
for i, version := range protocolVersions {
|
||||
version := version // Closure
|
||||
|
||||
protocols[i] = p2p.Protocol{
|
||||
Name: protocolName,
|
||||
Version: version,
|
||||
Length: protocolLengths[version],
|
||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
peer := NewPeer(version, p, rw, backend.TxPool())
|
||||
defer peer.Close()
|
||||
|
||||
return backend.RunPeer(peer, func(peer *Peer) error {
|
||||
return Handle(backend, peer)
|
||||
})
|
||||
},
|
||||
NodeInfo: func() interface{} {
|
||||
return nodeInfo(backend.Chain(), network)
|
||||
},
|
||||
PeerInfo: func(id enode.ID) interface{} {
|
||||
return backend.PeerInfo(id)
|
||||
},
|
||||
Attributes: []enr.Entry{currentENREntry(backend.Chain())},
|
||||
DialCandidates: dnsdisc,
|
||||
}
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// NodeInfo represents a short summary of the `eth` sub-protocol metadata
|
||||
// known about the host peer.
|
||||
type NodeInfo struct {
|
||||
Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
|
||||
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
|
||||
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
|
||||
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
|
||||
Head common.Hash `json:"head"` // Hex hash of the host's best owned block
|
||||
}
|
||||
|
||||
// nodeInfo retrieves some `eth` protocol metadata about the running host node.
|
||||
func nodeInfo(chain *core.BlockChain, network uint64) *NodeInfo {
|
||||
head := chain.CurrentBlock()
|
||||
return &NodeInfo{
|
||||
Network: network,
|
||||
Difficulty: chain.GetTd(head.Hash(), head.NumberU64()),
|
||||
Genesis: chain.Genesis().Hash(),
|
||||
Config: chain.Config(),
|
||||
Head: head.Hash(),
|
||||
}
|
||||
}
|
||||
|
||||
// Handle is invoked whenever an `eth` connection is made that successfully passes
|
||||
// the protocol handshake. This method will keep processing messages until the
|
||||
// connection is torn down.
|
||||
func Handle(backend Backend, peer *Peer) error {
|
||||
for {
|
||||
if err := handleMessage(backend, peer); err != nil {
|
||||
peer.Log().Debug("Message handling failed in `eth`", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleMessage is invoked whenever an inbound message is received from a remote
|
||||
// peer. The remote connection is torn down upon returning any error.
|
||||
func handleMessage(backend Backend, peer *Peer) error {
|
||||
// Read the next message from the remote peer, and ensure it's fully consumed
|
||||
msg, err := peer.rw.ReadMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.Size > maxMessageSize {
|
||||
return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
|
||||
}
|
||||
defer msg.Discard()
|
||||
|
||||
// Handle the message depending on its contents
|
||||
switch {
|
||||
case msg.Code == StatusMsg:
|
||||
// Status messages should never arrive after the handshake
|
||||
return fmt.Errorf("%w: uncontrolled status message", errExtraStatusMsg)
|
||||
|
||||
// Block header query, collect the requested headers and reply
|
||||
case msg.Code == GetBlockHeadersMsg:
|
||||
// Decode the complex header query
|
||||
var query GetBlockHeadersPacket
|
||||
if err := msg.Decode(&query); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
hashMode := query.Origin.Hash != (common.Hash{})
|
||||
first := true
|
||||
maxNonCanonical := uint64(100)
|
||||
|
||||
// Gather headers until the fetch or network limits is reached
|
||||
var (
|
||||
bytes common.StorageSize
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
lookups int
|
||||
)
|
||||
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit &&
|
||||
len(headers) < maxHeadersServe && lookups < 2*maxHeadersServe {
|
||||
lookups++
|
||||
// Retrieve the next header satisfying the query
|
||||
var origin *types.Header
|
||||
if hashMode {
|
||||
if first {
|
||||
first = false
|
||||
origin = backend.Chain().GetHeaderByHash(query.Origin.Hash)
|
||||
if origin != nil {
|
||||
query.Origin.Number = origin.Number.Uint64()
|
||||
}
|
||||
} else {
|
||||
origin = backend.Chain().GetHeader(query.Origin.Hash, query.Origin.Number)
|
||||
}
|
||||
} else {
|
||||
origin = backend.Chain().GetHeaderByNumber(query.Origin.Number)
|
||||
}
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
headers = append(headers, origin)
|
||||
bytes += estHeaderSize
|
||||
|
||||
// Advance to the next header of the query
|
||||
switch {
|
||||
case hashMode && query.Reverse:
|
||||
// Hash based traversal towards the genesis block
|
||||
ancestor := query.Skip + 1
|
||||
if ancestor == 0 {
|
||||
unknown = true
|
||||
} else {
|
||||
query.Origin.Hash, query.Origin.Number = backend.Chain().GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
|
||||
unknown = (query.Origin.Hash == common.Hash{})
|
||||
}
|
||||
case hashMode && !query.Reverse:
|
||||
// Hash based traversal towards the leaf block
|
||||
var (
|
||||
current = origin.Number.Uint64()
|
||||
next = current + query.Skip + 1
|
||||
)
|
||||
if next <= current {
|
||||
infos, _ := json.MarshalIndent(peer.Peer.Info(), "", " ")
|
||||
peer.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
|
||||
unknown = true
|
||||
} else {
|
||||
if header := backend.Chain().GetHeaderByNumber(next); header != nil {
|
||||
nextHash := header.Hash()
|
||||
expOldHash, _ := backend.Chain().GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
|
||||
if expOldHash == query.Origin.Hash {
|
||||
query.Origin.Hash, query.Origin.Number = nextHash, next
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
}
|
||||
case query.Reverse:
|
||||
// Number based traversal towards the genesis block
|
||||
if query.Origin.Number >= query.Skip+1 {
|
||||
query.Origin.Number -= query.Skip + 1
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
|
||||
case !query.Reverse:
|
||||
// Number based traversal towards the leaf block
|
||||
query.Origin.Number += query.Skip + 1
|
||||
}
|
||||
}
|
||||
return peer.SendBlockHeaders(headers)
|
||||
|
||||
case msg.Code == BlockHeadersMsg:
|
||||
// A batch of headers arrived to one of our previous requests
|
||||
res := new(BlockHeadersPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetBlockBodiesMsg:
|
||||
// Decode the block body retrieval message
|
||||
var query GetBlockBodiesPacket
|
||||
if err := msg.Decode(&query); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Gather blocks until the fetch or network limits is reached
|
||||
var (
|
||||
bytes int
|
||||
bodies []rlp.RawValue
|
||||
)
|
||||
for lookups, hash := range query {
|
||||
if bytes >= softResponseLimit || len(bodies) >= maxBodiesServe ||
|
||||
lookups >= 2*maxBodiesServe {
|
||||
break
|
||||
}
|
||||
if data := backend.Chain().GetBodyRLP(hash); len(data) != 0 {
|
||||
bodies = append(bodies, data)
|
||||
bytes += len(data)
|
||||
}
|
||||
}
|
||||
return peer.SendBlockBodiesRLP(bodies)
|
||||
|
||||
case msg.Code == BlockBodiesMsg:
|
||||
// A batch of block bodies arrived to one of our previous requests
|
||||
res := new(BlockBodiesPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetNodeDataMsg:
|
||||
// Decode the trie node data retrieval message
|
||||
var query GetNodeDataPacket
|
||||
if err := msg.Decode(&query); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Gather state data until the fetch or network limits is reached
|
||||
var (
|
||||
bytes int
|
||||
nodes [][]byte
|
||||
)
|
||||
for lookups, hash := range query {
|
||||
if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe ||
|
||||
lookups >= 2*maxNodeDataServe {
|
||||
break
|
||||
}
|
||||
// Retrieve the requested state entry
|
||||
if bloom := backend.StateBloom(); bloom != nil && !bloom.Contains(hash[:]) {
|
||||
// Only lookup the trie node if there's chance that we actually have it
|
||||
continue
|
||||
}
|
||||
entry, err := backend.Chain().TrieNode(hash)
|
||||
if len(entry) == 0 || err != nil {
|
||||
// Read the contract code with prefix only to save unnecessary lookups.
|
||||
entry, err = backend.Chain().ContractCodeWithPrefix(hash)
|
||||
}
|
||||
if err == nil && len(entry) > 0 {
|
||||
nodes = append(nodes, entry)
|
||||
bytes += len(entry)
|
||||
}
|
||||
}
|
||||
return peer.SendNodeData(nodes)
|
||||
|
||||
case msg.Code == NodeDataMsg:
|
||||
// A batch of node state data arrived to one of our previous requests
|
||||
res := new(NodeDataPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetReceiptsMsg:
|
||||
// Decode the block receipts retrieval message
|
||||
var query GetReceiptsPacket
|
||||
if err := msg.Decode(&query); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Gather state data until the fetch or network limits is reached
|
||||
var (
|
||||
bytes int
|
||||
receipts []rlp.RawValue
|
||||
)
|
||||
for lookups, hash := range query {
|
||||
if bytes >= softResponseLimit || len(receipts) >= maxReceiptsServe ||
|
||||
lookups >= 2*maxReceiptsServe {
|
||||
break
|
||||
}
|
||||
// Retrieve the requested block's receipts
|
||||
results := backend.Chain().GetReceiptsByHash(hash)
|
||||
if results == nil {
|
||||
if header := backend.Chain().GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// If known, encode and queue for response packet
|
||||
if encoded, err := rlp.EncodeToBytes(results); err != nil {
|
||||
log.Error("Failed to encode receipt", "err", err)
|
||||
} else {
|
||||
receipts = append(receipts, encoded)
|
||||
bytes += len(encoded)
|
||||
}
|
||||
}
|
||||
return peer.SendReceiptsRLP(receipts)
|
||||
|
||||
case msg.Code == ReceiptsMsg:
|
||||
// A batch of receipts arrived to one of our previous requests
|
||||
res := new(ReceiptsPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == NewBlockHashesMsg:
|
||||
// A batch of new block announcements just arrived
|
||||
ann := new(NewBlockHashesPacket)
|
||||
if err := msg.Decode(ann); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Mark the hashes as present at the remote node
|
||||
for _, block := range *ann {
|
||||
peer.markBlock(block.Hash)
|
||||
}
|
||||
// Deliver them all to the backend for queuing
|
||||
return backend.Handle(peer, ann)
|
||||
|
||||
case msg.Code == NewBlockMsg:
|
||||
// Retrieve and decode the propagated block
|
||||
ann := new(NewBlockPacket)
|
||||
if err := msg.Decode(ann); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if hash := types.CalcUncleHash(ann.Block.Uncles()); hash != ann.Block.UncleHash() {
|
||||
log.Warn("Propagated block has invalid uncles", "have", hash, "exp", ann.Block.UncleHash())
|
||||
break // TODO(karalabe): return error eventually, but wait a few releases
|
||||
}
|
||||
if hash := types.DeriveSha(ann.Block.Transactions(), trie.NewStackTrie(nil)); hash != ann.Block.TxHash() {
|
||||
log.Warn("Propagated block has invalid body", "have", hash, "exp", ann.Block.TxHash())
|
||||
break // TODO(karalabe): return error eventually, but wait a few releases
|
||||
}
|
||||
if err := ann.sanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
ann.Block.ReceivedAt = msg.ReceivedAt
|
||||
ann.Block.ReceivedFrom = peer
|
||||
|
||||
// Mark the peer as owning the block
|
||||
peer.markBlock(ann.Block.Hash())
|
||||
|
||||
return backend.Handle(peer, ann)
|
||||
|
||||
case msg.Code == NewPooledTransactionHashesMsg && peer.version >= ETH65:
|
||||
// New transaction announcement arrived, make sure we have
|
||||
// a valid and fresh chain to handle them
|
||||
if !backend.AcceptTxs() {
|
||||
break
|
||||
}
|
||||
ann := new(NewPooledTransactionHashesPacket)
|
||||
if err := msg.Decode(ann); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Schedule all the unknown hashes for retrieval
|
||||
for _, hash := range *ann {
|
||||
peer.markTransaction(hash)
|
||||
}
|
||||
return backend.Handle(peer, ann)
|
||||
|
||||
case msg.Code == GetPooledTransactionsMsg && peer.version >= ETH65:
|
||||
// Decode the pooled transactions retrieval message
|
||||
var query GetPooledTransactionsPacket
|
||||
if err := msg.Decode(&query); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Gather transactions until the fetch or network limits is reached
|
||||
var (
|
||||
bytes int
|
||||
hashes []common.Hash
|
||||
txs []rlp.RawValue
|
||||
)
|
||||
for _, hash := range query {
|
||||
if bytes >= softResponseLimit {
|
||||
break
|
||||
}
|
||||
// Retrieve the requested transaction, skipping if unknown to us
|
||||
tx := backend.TxPool().Get(hash)
|
||||
if tx == nil {
|
||||
continue
|
||||
}
|
||||
// If known, encode and queue for response packet
|
||||
if encoded, err := rlp.EncodeToBytes(tx); err != nil {
|
||||
log.Error("Failed to encode transaction", "err", err)
|
||||
} else {
|
||||
hashes = append(hashes, hash)
|
||||
txs = append(txs, encoded)
|
||||
bytes += len(encoded)
|
||||
}
|
||||
}
|
||||
return peer.SendPooledTransactionsRLP(hashes, txs)
|
||||
|
||||
case msg.Code == TransactionsMsg || (msg.Code == PooledTransactionsMsg && peer.version >= ETH65):
|
||||
// Transactions arrived, make sure we have a valid and fresh chain to handle them
|
||||
if !backend.AcceptTxs() {
|
||||
break
|
||||
}
|
||||
// Transactions can be processed, parse all of them and deliver to the pool
|
||||
var txs []*types.Transaction
|
||||
if err := msg.Decode(&txs); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
for i, tx := range txs {
|
||||
// Validate and mark the remote transaction
|
||||
if tx == nil {
|
||||
return fmt.Errorf("%w: transaction %d is nil", errDecode, i)
|
||||
}
|
||||
peer.markTransaction(tx.Hash())
|
||||
}
|
||||
if msg.Code == PooledTransactionsMsg {
|
||||
return backend.Handle(peer, (*PooledTransactionsPacket)(&txs))
|
||||
}
|
||||
return backend.Handle(peer, (*TransactionsPacket)(&txs))
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
|
||||
}
|
||||
return nil
|
||||
}
|
519
eth/protocols/eth/handler_test.go
Normal file
519
eth/protocols/eth/handler_test.go
Normal file
@ -0,0 +1,519 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
var (
|
||||
// testKey is a private key to use for funding a tester account.
|
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
|
||||
// testAddr is the Ethereum address of the tester account.
|
||||
testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
)
|
||||
|
||||
// testBackend is a mock implementation of the live Ethereum message handler. Its
|
||||
// purpose is to allow testing the request/reply workflows and wire serialization
|
||||
// in the `eth` protocol without actually doing any data processing.
|
||||
type testBackend struct {
|
||||
db ethdb.Database
|
||||
chain *core.BlockChain
|
||||
txpool *core.TxPool
|
||||
}
|
||||
|
||||
// newTestBackend creates an empty chain and wraps it into a mock backend.
|
||||
func newTestBackend(blocks int) *testBackend {
|
||||
return newTestBackendWithGenerator(blocks, nil)
|
||||
}
|
||||
|
||||
// newTestBackend creates a chain with a number of explicitly defined blocks and
|
||||
// wraps it into a mock backend.
|
||||
func newTestBackendWithGenerator(blocks int, generator func(int, *core.BlockGen)) *testBackend {
|
||||
// Create a database pre-initialize with a genesis block
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
(&core.Genesis{
|
||||
Config: params.TestChainConfig,
|
||||
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
|
||||
}).MustCommit(db)
|
||||
|
||||
chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
|
||||
bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, generator)
|
||||
if _, err := chain.InsertChain(bs); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
txconfig := core.DefaultTxPoolConfig
|
||||
txconfig.Journal = "" // Don't litter the disk with test journals
|
||||
|
||||
return &testBackend{
|
||||
db: db,
|
||||
chain: chain,
|
||||
txpool: core.NewTxPool(txconfig, params.TestChainConfig, chain),
|
||||
}
|
||||
}
|
||||
|
||||
// close tears down the transaction pool and chain behind the mock backend.
|
||||
func (b *testBackend) close() {
|
||||
b.txpool.Stop()
|
||||
b.chain.Stop()
|
||||
}
|
||||
|
||||
func (b *testBackend) Chain() *core.BlockChain { return b.chain }
|
||||
func (b *testBackend) StateBloom() *trie.SyncBloom { return nil }
|
||||
func (b *testBackend) TxPool() TxPool { return b.txpool }
|
||||
|
||||
func (b *testBackend) RunPeer(peer *Peer, handler Handler) error {
|
||||
// Normally the backend would do peer mainentance and handshakes. All that
|
||||
// is omitted and we will just give control back to the handler.
|
||||
return handler(peer)
|
||||
}
|
||||
func (b *testBackend) PeerInfo(enode.ID) interface{} { panic("not implemented") }
|
||||
|
||||
func (b *testBackend) AcceptTxs() bool {
|
||||
panic("data processing tests should be done in the handler package")
|
||||
}
|
||||
func (b *testBackend) Handle(*Peer, Packet) error {
|
||||
panic("data processing tests should be done in the handler package")
|
||||
}
|
||||
|
||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||
func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }
|
||||
func TestGetBlockHeaders65(t *testing.T) { testGetBlockHeaders(t, 65) }
|
||||
|
||||
func testGetBlockHeaders(t *testing.T, protocol uint) {
|
||||
t.Parallel()
|
||||
|
||||
backend := newTestBackend(maxHeadersServe + 15)
|
||||
defer backend.close()
|
||||
|
||||
peer, _ := newTestPeer("peer", protocol, backend)
|
||||
defer peer.close()
|
||||
|
||||
// Create a "random" unknown hash for testing
|
||||
var unknown common.Hash
|
||||
for i := range unknown {
|
||||
unknown[i] = byte(i)
|
||||
}
|
||||
// Create a batch of tests for various scenarios
|
||||
limit := uint64(maxHeadersServe)
|
||||
tests := []struct {
|
||||
query *GetBlockHeadersPacket // The query to execute for header retrieval
|
||||
expect []common.Hash // The hashes of the block whose headers are expected
|
||||
}{
|
||||
// A single random block should be retrievable by hash and number too
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
|
||||
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
|
||||
}, {
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
|
||||
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
|
||||
},
|
||||
// Multiple headers should be retrievable in both directions
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
||||
backend.chain.GetBlockByNumber(limit/2 + 1).Hash(),
|
||||
backend.chain.GetBlockByNumber(limit/2 + 2).Hash(),
|
||||
},
|
||||
}, {
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
||||
backend.chain.GetBlockByNumber(limit/2 - 1).Hash(),
|
||||
backend.chain.GetBlockByNumber(limit/2 - 2).Hash(),
|
||||
},
|
||||
},
|
||||
// Multiple headers with skip lists should be retrievable
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
||||
backend.chain.GetBlockByNumber(limit/2 + 4).Hash(),
|
||||
backend.chain.GetBlockByNumber(limit/2 + 8).Hash(),
|
||||
},
|
||||
}, {
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
||||
backend.chain.GetBlockByNumber(limit/2 - 4).Hash(),
|
||||
backend.chain.GetBlockByNumber(limit/2 - 8).Hash(),
|
||||
},
|
||||
},
|
||||
// The chain endpoints should be retrievable
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1},
|
||||
[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
|
||||
}, {
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64()}, Amount: 1},
|
||||
[]common.Hash{backend.chain.CurrentBlock().Hash()},
|
||||
},
|
||||
// Ensure protocol limits are honored
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
|
||||
backend.chain.GetBlockHashesFromHash(backend.chain.CurrentBlock().Hash(), limit),
|
||||
},
|
||||
// Check that requesting more than available is handled gracefully
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 4).Hash(),
|
||||
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64()).Hash(),
|
||||
},
|
||||
}, {
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(4).Hash(),
|
||||
backend.chain.GetBlockByNumber(0).Hash(),
|
||||
},
|
||||
},
|
||||
// Check that requesting more than available is handled gracefully, even if mid skip
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 4).Hash(),
|
||||
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 1).Hash(),
|
||||
},
|
||||
}, {
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(4).Hash(),
|
||||
backend.chain.GetBlockByNumber(1).Hash(),
|
||||
},
|
||||
},
|
||||
// Check a corner case where requesting more can iterate past the endpoints
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(2).Hash(),
|
||||
backend.chain.GetBlockByNumber(1).Hash(),
|
||||
backend.chain.GetBlockByNumber(0).Hash(),
|
||||
},
|
||||
},
|
||||
// Check a corner case where skipping overflow loops back into the chain start
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(3).Hash(),
|
||||
},
|
||||
},
|
||||
// Check a corner case where skipping overflow loops back to the same header
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
|
||||
[]common.Hash{
|
||||
backend.chain.GetBlockByNumber(1).Hash(),
|
||||
},
|
||||
},
|
||||
// Check that non existing headers aren't returned
|
||||
{
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
|
||||
[]common.Hash{},
|
||||
}, {
|
||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() + 1}, Amount: 1},
|
||||
[]common.Hash{},
|
||||
},
|
||||
}
|
||||
// Run each of the tests and verify the results against the chain
|
||||
for i, tt := range tests {
|
||||
// Collect the headers to expect in the response
|
||||
var headers []*types.Header
|
||||
for _, hash := range tt.expect {
|
||||
headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x03, tt.query)
|
||||
if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil {
|
||||
t.Errorf("test %d: headers mismatch: %v", i, err)
|
||||
}
|
||||
// If the test used number origins, repeat with hashes as the too
|
||||
if tt.query.Origin.Hash == (common.Hash{}) {
|
||||
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
|
||||
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
|
||||
|
||||
p2p.Send(peer.app, 0x03, tt.query)
|
||||
if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil {
|
||||
t.Errorf("test %d: headers mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
||||
func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }
|
||||
func TestGetBlockBodies65(t *testing.T) { testGetBlockBodies(t, 65) }
|
||||
|
||||
func testGetBlockBodies(t *testing.T, protocol uint) {
|
||||
t.Parallel()
|
||||
|
||||
backend := newTestBackend(maxBodiesServe + 15)
|
||||
defer backend.close()
|
||||
|
||||
peer, _ := newTestPeer("peer", protocol, backend)
|
||||
defer peer.close()
|
||||
|
||||
// Create a batch of tests for various scenarios
|
||||
limit := maxBodiesServe
|
||||
tests := []struct {
|
||||
random int // Number of blocks to fetch randomly from the chain
|
||||
explicit []common.Hash // Explicitly requested blocks
|
||||
available []bool // Availability of explicitly requested blocks
|
||||
expected int // Total number of existing blocks to expect
|
||||
}{
|
||||
{1, nil, nil, 1}, // A single random block should be retrievable
|
||||
{10, nil, nil, 10}, // Multiple random blocks should be retrievable
|
||||
{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
|
||||
{limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
|
||||
{0, []common.Hash{backend.chain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
|
||||
{0, []common.Hash{backend.chain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
|
||||
{0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned
|
||||
|
||||
// Existing and non-existing blocks interleaved should not cause problems
|
||||
{0, []common.Hash{
|
||||
{},
|
||||
backend.chain.GetBlockByNumber(1).Hash(),
|
||||
{},
|
||||
backend.chain.GetBlockByNumber(10).Hash(),
|
||||
{},
|
||||
backend.chain.GetBlockByNumber(100).Hash(),
|
||||
{},
|
||||
}, []bool{false, true, false, true, false, true, false}, 3},
|
||||
}
|
||||
// Run each of the tests and verify the results against the chain
|
||||
for i, tt := range tests {
|
||||
// Collect the hashes to request, and the response to expectva
|
||||
var (
|
||||
hashes []common.Hash
|
||||
bodies []*BlockBody
|
||||
seen = make(map[int64]bool)
|
||||
)
|
||||
for j := 0; j < tt.random; j++ {
|
||||
for {
|
||||
num := rand.Int63n(int64(backend.chain.CurrentBlock().NumberU64()))
|
||||
if !seen[num] {
|
||||
seen[num] = true
|
||||
|
||||
block := backend.chain.GetBlockByNumber(uint64(num))
|
||||
hashes = append(hashes, block.Hash())
|
||||
if len(bodies) < tt.expected {
|
||||
bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for j, hash := range tt.explicit {
|
||||
hashes = append(hashes, hash)
|
||||
if tt.available[j] && len(bodies) < tt.expected {
|
||||
block := backend.chain.GetBlockByHash(hash)
|
||||
bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
|
||||
}
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x05, hashes)
|
||||
if err := p2p.ExpectMsg(peer.app, 0x06, bodies); err != nil {
|
||||
t.Errorf("test %d: bodies mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the state trie nodes can be retrieved based on hashes.
|
||||
func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }
|
||||
func TestGetNodeData65(t *testing.T) { testGetNodeData(t, 65) }
|
||||
|
||||
func testGetNodeData(t *testing.T, protocol uint) {
|
||||
t.Parallel()
|
||||
|
||||
// Define three accounts to simulate transactions with
|
||||
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
|
||||
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
|
||||
|
||||
signer := types.HomesteadSigner{}
|
||||
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)
|
||||
generator := func(i int, block *core.BlockGen) {
|
||||
switch i {
|
||||
case 0:
|
||||
// In block 1, the test bank sends account #1 some ether.
|
||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testKey)
|
||||
block.AddTx(tx)
|
||||
case 1:
|
||||
// In block 2, the test bank sends some more ether to account #1.
|
||||
// acc1Addr passes it on to account #2.
|
||||
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
|
||||
tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
|
||||
block.AddTx(tx1)
|
||||
block.AddTx(tx2)
|
||||
case 2:
|
||||
// Block 3 is empty but was mined by account #2.
|
||||
block.SetCoinbase(acc2Addr)
|
||||
block.SetExtra([]byte("yeehaw"))
|
||||
case 3:
|
||||
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
||||
b2 := block.PrevBlock(1).Header()
|
||||
b2.Extra = []byte("foo")
|
||||
block.AddUncle(b2)
|
||||
b3 := block.PrevBlock(2).Header()
|
||||
b3.Extra = []byte("foo")
|
||||
block.AddUncle(b3)
|
||||
}
|
||||
}
|
||||
// Assemble the test environment
|
||||
backend := newTestBackendWithGenerator(4, generator)
|
||||
defer backend.close()
|
||||
|
||||
peer, _ := newTestPeer("peer", protocol, backend)
|
||||
defer peer.close()
|
||||
|
||||
// Fetch for now the entire chain db
|
||||
var hashes []common.Hash
|
||||
|
||||
it := backend.db.NewIterator(nil, nil)
|
||||
for it.Next() {
|
||||
if key := it.Key(); len(key) == common.HashLength {
|
||||
hashes = append(hashes, common.BytesToHash(key))
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
|
||||
p2p.Send(peer.app, 0x0d, hashes)
|
||||
msg, err := peer.app.ReadMsg()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read node data response: %v", err)
|
||||
}
|
||||
if msg.Code != 0x0e {
|
||||
t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, 0x0c)
|
||||
}
|
||||
var data [][]byte
|
||||
if err := msg.Decode(&data); err != nil {
|
||||
t.Fatalf("failed to decode response node data: %v", err)
|
||||
}
|
||||
// Verify that all hashes correspond to the requested data, and reconstruct a state tree
|
||||
for i, want := range hashes {
|
||||
if hash := crypto.Keccak256Hash(data[i]); hash != want {
|
||||
t.Errorf("data hash mismatch: have %x, want %x", hash, want)
|
||||
}
|
||||
}
|
||||
statedb := rawdb.NewMemoryDatabase()
|
||||
for i := 0; i < len(data); i++ {
|
||||
statedb.Put(hashes[i].Bytes(), data[i])
|
||||
}
|
||||
accounts := []common.Address{testAddr, acc1Addr, acc2Addr}
|
||||
for i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ {
|
||||
trie, _ := state.New(backend.chain.GetBlockByNumber(i).Root(), state.NewDatabase(statedb), nil)
|
||||
|
||||
for j, acc := range accounts {
|
||||
state, _ := backend.chain.State()
|
||||
bw := state.GetBalance(acc)
|
||||
bh := trie.GetBalance(acc)
|
||||
|
||||
if (bw != nil && bh == nil) || (bw == nil && bh != nil) {
|
||||
t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
|
||||
}
|
||||
if bw != nil && bh != nil && bw.Cmp(bw) != 0 {
|
||||
t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the transaction receipts can be retrieved based on hashes.
|
||||
func TestGetBlockReceipts64(t *testing.T) { testGetBlockReceipts(t, 64) }
|
||||
func TestGetBlockReceipts65(t *testing.T) { testGetBlockReceipts(t, 65) }
|
||||
|
||||
func testGetBlockReceipts(t *testing.T, protocol uint) {
|
||||
t.Parallel()
|
||||
|
||||
// Define three accounts to simulate transactions with
|
||||
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
|
||||
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
|
||||
|
||||
signer := types.HomesteadSigner{}
|
||||
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)
|
||||
generator := func(i int, block *core.BlockGen) {
|
||||
switch i {
|
||||
case 0:
|
||||
// In block 1, the test bank sends account #1 some ether.
|
||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testKey)
|
||||
block.AddTx(tx)
|
||||
case 1:
|
||||
// In block 2, the test bank sends some more ether to account #1.
|
||||
// acc1Addr passes it on to account #2.
|
||||
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
|
||||
tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
|
||||
block.AddTx(tx1)
|
||||
block.AddTx(tx2)
|
||||
case 2:
|
||||
// Block 3 is empty but was mined by account #2.
|
||||
block.SetCoinbase(acc2Addr)
|
||||
block.SetExtra([]byte("yeehaw"))
|
||||
case 3:
|
||||
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
||||
b2 := block.PrevBlock(1).Header()
|
||||
b2.Extra = []byte("foo")
|
||||
block.AddUncle(b2)
|
||||
b3 := block.PrevBlock(2).Header()
|
||||
b3.Extra = []byte("foo")
|
||||
block.AddUncle(b3)
|
||||
}
|
||||
}
|
||||
// Assemble the test environment
|
||||
backend := newTestBackendWithGenerator(4, generator)
|
||||
defer backend.close()
|
||||
|
||||
peer, _ := newTestPeer("peer", protocol, backend)
|
||||
defer peer.close()
|
||||
|
||||
// Collect the hashes to request, and the response to expect
|
||||
var (
|
||||
hashes []common.Hash
|
||||
receipts []types.Receipts
|
||||
)
|
||||
for i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ {
|
||||
block := backend.chain.GetBlockByNumber(i)
|
||||
|
||||
hashes = append(hashes, block.Hash())
|
||||
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x0f, hashes)
|
||||
if err := p2p.ExpectMsg(peer.app, 0x10, receipts); err != nil {
|
||||
t.Errorf("receipts mismatch: %v", err)
|
||||
}
|
||||
}
|
107
eth/protocols/eth/handshake.go
Normal file
107
eth/protocols/eth/handshake.go
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
const (
|
||||
// handshakeTimeout is the maximum allowed time for the `eth` handshake to
|
||||
// complete before dropping the connection.= as malicious.
|
||||
handshakeTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// Handshake executes the eth protocol handshake, negotiating version number,
|
||||
// network IDs, difficulties, head and genesis blocks.
|
||||
func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {
|
||||
// Send out own handshake in a new thread
|
||||
errc := make(chan error, 2)
|
||||
|
||||
var status StatusPacket // safe to read after two values have been received from errc
|
||||
|
||||
go func() {
|
||||
errc <- p2p.Send(p.rw, StatusMsg, &StatusPacket{
|
||||
ProtocolVersion: uint32(p.version),
|
||||
NetworkID: network,
|
||||
TD: td,
|
||||
Head: head,
|
||||
Genesis: genesis,
|
||||
ForkID: forkID,
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
errc <- p.readStatus(network, &status, genesis, forkFilter)
|
||||
}()
|
||||
timeout := time.NewTimer(handshakeTimeout)
|
||||
defer timeout.Stop()
|
||||
for i := 0; i < 2; i++ {
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-timeout.C:
|
||||
return p2p.DiscReadTimeout
|
||||
}
|
||||
}
|
||||
p.td, p.head = status.TD, status.Head
|
||||
|
||||
// TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times
|
||||
// larger, it will still fit within 100 bits
|
||||
if tdlen := p.td.BitLen(); tdlen > 100 {
|
||||
return fmt.Errorf("too large total difficulty: bitlen %d", tdlen)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readStatus reads the remote handshake message.
|
||||
func (p *Peer) readStatus(network uint64, status *StatusPacket, genesis common.Hash, forkFilter forkid.Filter) error {
|
||||
msg, err := p.rw.ReadMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.Code != StatusMsg {
|
||||
return fmt.Errorf("%w: first msg has code %x (!= %x)", errNoStatusMsg, msg.Code, StatusMsg)
|
||||
}
|
||||
if msg.Size > maxMessageSize {
|
||||
return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
|
||||
}
|
||||
// Decode the handshake and make sure everything matches
|
||||
if err := msg.Decode(&status); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if status.NetworkID != network {
|
||||
return fmt.Errorf("%w: %d (!= %d)", errNetworkIDMismatch, status.NetworkID, network)
|
||||
}
|
||||
if uint(status.ProtocolVersion) != p.version {
|
||||
return fmt.Errorf("%w: %d (!= %d)", errProtocolVersionMismatch, status.ProtocolVersion, p.version)
|
||||
}
|
||||
if status.Genesis != genesis {
|
||||
return fmt.Errorf("%w: %x (!= %x)", errGenesisMismatch, status.Genesis, genesis)
|
||||
}
|
||||
if err := forkFilter(status.ForkID); err != nil {
|
||||
return fmt.Errorf("%w: %v", errForkIDRejected, err)
|
||||
}
|
||||
return nil
|
||||
}
|
91
eth/protocols/eth/handshake_test.go
Normal file
91
eth/protocols/eth/handshake_test.go
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// Tests that handshake failures are detected and reported correctly.
|
||||
func TestHandshake64(t *testing.T) { testHandshake(t, 64) }
|
||||
func TestHandshake65(t *testing.T) { testHandshake(t, 65) }
|
||||
|
||||
func testHandshake(t *testing.T, protocol uint) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test backend only to have some valid genesis chain
|
||||
backend := newTestBackend(3)
|
||||
defer backend.close()
|
||||
|
||||
var (
|
||||
genesis = backend.chain.Genesis()
|
||||
head = backend.chain.CurrentBlock()
|
||||
td = backend.chain.GetTd(head.Hash(), head.NumberU64())
|
||||
forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64())
|
||||
)
|
||||
tests := []struct {
|
||||
code uint64
|
||||
data interface{}
|
||||
want error
|
||||
}{
|
||||
{
|
||||
code: TransactionsMsg, data: []interface{}{},
|
||||
want: errNoStatusMsg,
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: StatusPacket{10, 1, td, head.Hash(), genesis.Hash(), forkID},
|
||||
want: errProtocolVersionMismatch,
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: StatusPacket{uint32(protocol), 999, td, head.Hash(), genesis.Hash(), forkID},
|
||||
want: errNetworkIDMismatch,
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), common.Hash{3}, forkID},
|
||||
want: errGenesisMismatch,
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}},
|
||||
want: errForkIDRejected,
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
// Create the two peers to shake with each other
|
||||
app, net := p2p.MsgPipe()
|
||||
defer app.Close()
|
||||
defer net.Close()
|
||||
|
||||
peer := NewPeer(protocol, p2p.NewPeer(enode.ID{}, "peer", nil), net, nil)
|
||||
defer peer.Close()
|
||||
|
||||
// Send the junk test with one peer, check the handshake failure
|
||||
go p2p.Send(app, test.code, test.data)
|
||||
|
||||
err := peer.Handshake(1, td, head.Hash(), genesis.Hash(), forkID, forkid.NewFilter(backend.chain))
|
||||
if err == nil {
|
||||
t.Errorf("test %d: protocol returned nil error, want %q", i, test.want)
|
||||
} else if !errors.Is(err, test.want) {
|
||||
t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.want)
|
||||
}
|
||||
}
|
||||
}
|
429
eth/protocols/eth/peer.go
Normal file
429
eth/protocols/eth/peer.go
Normal file
@ -0,0 +1,429 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"sync"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxKnownTxs is the maximum transactions hashes to keep in the known list
|
||||
// before starting to randomly evict them.
|
||||
maxKnownTxs = 32768
|
||||
|
||||
// maxKnownBlocks is the maximum block hashes to keep in the known list
|
||||
// before starting to randomly evict them.
|
||||
maxKnownBlocks = 1024
|
||||
|
||||
// maxQueuedTxs is the maximum number of transactions to queue up before dropping
|
||||
// older broadcasts.
|
||||
maxQueuedTxs = 4096
|
||||
|
||||
// maxQueuedTxAnns is the maximum number of transaction announcements to queue up
|
||||
// before dropping older announcements.
|
||||
maxQueuedTxAnns = 4096
|
||||
|
||||
// maxQueuedBlocks is the maximum number of block propagations to queue up before
|
||||
// dropping broadcasts. There's not much point in queueing stale blocks, so a few
|
||||
// that might cover uncles should be enough.
|
||||
maxQueuedBlocks = 4
|
||||
|
||||
// maxQueuedBlockAnns is the maximum number of block announcements to queue up before
|
||||
// dropping broadcasts. Similarly to block propagations, there's no point to queue
|
||||
// above some healthy uncle limit, so use that.
|
||||
maxQueuedBlockAnns = 4
|
||||
)
|
||||
|
||||
// max is a helper function which returns the larger of the two given integers.
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Peer is a collection of relevant information we have about a `eth` peer.
|
||||
type Peer struct {
|
||||
id string // Unique ID for the peer, cached
|
||||
|
||||
*p2p.Peer // The embedded P2P package peer
|
||||
rw p2p.MsgReadWriter // Input/output streams for snap
|
||||
version uint // Protocol version negotiated
|
||||
|
||||
head common.Hash // Latest advertised head block hash
|
||||
td *big.Int // Latest advertised head block total difficulty
|
||||
|
||||
knownBlocks mapset.Set // Set of block hashes known to be known by this peer
|
||||
queuedBlocks chan *blockPropagation // Queue of blocks to broadcast to the peer
|
||||
queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer
|
||||
|
||||
txpool TxPool // Transaction pool used by the broadcasters for liveness checks
|
||||
knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
|
||||
txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
|
||||
txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
|
||||
|
||||
term chan struct{} // Termination channel to stop the broadcasters
|
||||
lock sync.RWMutex // Mutex protecting the internal fields
|
||||
}
|
||||
|
||||
// NewPeer create a wrapper for a network connection and negotiated protocol
|
||||
// version.
|
||||
func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Peer {
|
||||
peer := &Peer{
|
||||
id: p.ID().String(),
|
||||
Peer: p,
|
||||
rw: rw,
|
||||
version: version,
|
||||
knownTxs: mapset.NewSet(),
|
||||
knownBlocks: mapset.NewSet(),
|
||||
queuedBlocks: make(chan *blockPropagation, maxQueuedBlocks),
|
||||
queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
|
||||
txBroadcast: make(chan []common.Hash),
|
||||
txAnnounce: make(chan []common.Hash),
|
||||
txpool: txpool,
|
||||
term: make(chan struct{}),
|
||||
}
|
||||
// Start up all the broadcasters
|
||||
go peer.broadcastBlocks()
|
||||
go peer.broadcastTransactions()
|
||||
if version >= ETH65 {
|
||||
go peer.announceTransactions()
|
||||
}
|
||||
return peer
|
||||
}
|
||||
|
||||
// Close signals the broadcast goroutine to terminate. Only ever call this if
|
||||
// you created the peer yourself via NewPeer. Otherwise let whoever created it
|
||||
// clean it up!
|
||||
func (p *Peer) Close() {
|
||||
close(p.term)
|
||||
}
|
||||
|
||||
// ID retrieves the peer's unique identifier.
|
||||
func (p *Peer) ID() string {
|
||||
return p.id
|
||||
}
|
||||
|
||||
// Version retrieves the peer's negoatiated `eth` protocol version.
|
||||
func (p *Peer) Version() uint {
|
||||
return p.version
|
||||
}
|
||||
|
||||
// Head retrieves the current head hash and total difficulty of the peer.
|
||||
func (p *Peer) Head() (hash common.Hash, td *big.Int) {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
copy(hash[:], p.head[:])
|
||||
return hash, new(big.Int).Set(p.td)
|
||||
}
|
||||
|
||||
// SetHead updates the head hash and total difficulty of the peer.
|
||||
func (p *Peer) SetHead(hash common.Hash, td *big.Int) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
copy(p.head[:], hash[:])
|
||||
p.td.Set(td)
|
||||
}
|
||||
|
||||
// KnownBlock returns whether peer is known to already have a block.
|
||||
func (p *Peer) KnownBlock(hash common.Hash) bool {
|
||||
return p.knownBlocks.Contains(hash)
|
||||
}
|
||||
|
||||
// KnownTransaction returns whether peer is known to already have a transaction.
|
||||
func (p *Peer) KnownTransaction(hash common.Hash) bool {
|
||||
return p.knownTxs.Contains(hash)
|
||||
}
|
||||
|
||||
// markBlock marks a block as known for the peer, ensuring that the block will
|
||||
// never be propagated to this particular peer.
|
||||
func (p *Peer) markBlock(hash common.Hash) {
|
||||
// If we reached the memory allowance, drop a previously known block hash
|
||||
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
||||
p.knownBlocks.Pop()
|
||||
}
|
||||
p.knownBlocks.Add(hash)
|
||||
}
|
||||
|
||||
// markTransaction marks a transaction as known for the peer, ensuring that it
|
||||
// will never be propagated to this particular peer.
|
||||
func (p *Peer) markTransaction(hash common.Hash) {
|
||||
// If we reached the memory allowance, drop a previously known transaction hash
|
||||
for p.knownTxs.Cardinality() >= maxKnownTxs {
|
||||
p.knownTxs.Pop()
|
||||
}
|
||||
p.knownTxs.Add(hash)
|
||||
}
|
||||
|
||||
// SendTransactions sends transactions to the peer and includes the hashes
|
||||
// in its transaction hash set for future reference.
|
||||
//
|
||||
// This method is a helper used by the async transaction sender. Don't call it
|
||||
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
||||
// not be managed directly.
|
||||
//
|
||||
// The reasons this is public is to allow packages using this protocol to write
|
||||
// tests that directly send messages without having to do the asyn queueing.
|
||||
func (p *Peer) SendTransactions(txs types.Transactions) error {
|
||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) {
|
||||
p.knownTxs.Pop()
|
||||
}
|
||||
for _, tx := range txs {
|
||||
p.knownTxs.Add(tx.Hash())
|
||||
}
|
||||
return p2p.Send(p.rw, TransactionsMsg, txs)
|
||||
}
|
||||
|
||||
// AsyncSendTransactions queues a list of transactions (by hash) to eventually
|
||||
// propagate to a remote peer. The number of pending sends are capped (new ones
|
||||
// will force old sends to be dropped)
|
||||
func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
|
||||
select {
|
||||
case p.txBroadcast <- hashes:
|
||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
||||
p.knownTxs.Pop()
|
||||
}
|
||||
for _, hash := range hashes {
|
||||
p.knownTxs.Add(hash)
|
||||
}
|
||||
case <-p.term:
|
||||
p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
|
||||
}
|
||||
}
|
||||
|
||||
// sendPooledTransactionHashes sends transaction hashes to the peer and includes
|
||||
// them in its transaction hash set for future reference.
|
||||
//
|
||||
// This method is a helper used by the async transaction announcer. Don't call it
|
||||
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
||||
// not be managed directly.
|
||||
func (p *Peer) sendPooledTransactionHashes(hashes []common.Hash) error {
|
||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
||||
p.knownTxs.Pop()
|
||||
}
|
||||
for _, hash := range hashes {
|
||||
p.knownTxs.Add(hash)
|
||||
}
|
||||
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket(hashes))
|
||||
}
|
||||
|
||||
// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
|
||||
// announce to a remote peer. The number of pending sends are capped (new ones
|
||||
// will force old sends to be dropped)
|
||||
func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
|
||||
select {
|
||||
case p.txAnnounce <- hashes:
|
||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
||||
p.knownTxs.Pop()
|
||||
}
|
||||
for _, hash := range hashes {
|
||||
p.knownTxs.Add(hash)
|
||||
}
|
||||
case <-p.term:
|
||||
p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
|
||||
}
|
||||
}
|
||||
|
||||
// SendPooledTransactionsRLP sends requested transactions to the peer and adds the
|
||||
// hashes in its transaction hash set for future reference.
|
||||
//
|
||||
// Note, the method assumes the hashes are correct and correspond to the list of
|
||||
// transactions being sent.
|
||||
func (p *Peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error {
|
||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
||||
p.knownTxs.Pop()
|
||||
}
|
||||
for _, hash := range hashes {
|
||||
p.knownTxs.Add(hash)
|
||||
}
|
||||
return p2p.Send(p.rw, PooledTransactionsMsg, txs) // Not packed into PooledTransactionsPacket to avoid RLP decoding
|
||||
}
|
||||
|
||||
// SendNewBlockHashes announces the availability of a number of blocks through
|
||||
// a hash notification.
|
||||
func (p *Peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
|
||||
// Mark all the block hashes as known, but ensure we don't overflow our limits
|
||||
for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) {
|
||||
p.knownBlocks.Pop()
|
||||
}
|
||||
for _, hash := range hashes {
|
||||
p.knownBlocks.Add(hash)
|
||||
}
|
||||
request := make(NewBlockHashesPacket, len(hashes))
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
request[i].Hash = hashes[i]
|
||||
request[i].Number = numbers[i]
|
||||
}
|
||||
return p2p.Send(p.rw, NewBlockHashesMsg, request)
|
||||
}
|
||||
|
||||
// AsyncSendNewBlockHash queues the availability of a block for propagation to a
|
||||
// remote peer. If the peer's broadcast queue is full, the event is silently
|
||||
// dropped.
|
||||
func (p *Peer) AsyncSendNewBlockHash(block *types.Block) {
|
||||
select {
|
||||
case p.queuedBlockAnns <- block:
|
||||
// Mark all the block hash as known, but ensure we don't overflow our limits
|
||||
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
||||
p.knownBlocks.Pop()
|
||||
}
|
||||
p.knownBlocks.Add(block.Hash())
|
||||
default:
|
||||
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// SendNewBlock propagates an entire block to a remote peer.
|
||||
func (p *Peer) SendNewBlock(block *types.Block, td *big.Int) error {
|
||||
// Mark all the block hash as known, but ensure we don't overflow our limits
|
||||
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
||||
p.knownBlocks.Pop()
|
||||
}
|
||||
p.knownBlocks.Add(block.Hash())
|
||||
return p2p.Send(p.rw, NewBlockMsg, &NewBlockPacket{block, td})
|
||||
}
|
||||
|
||||
// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If
|
||||
// the peer's broadcast queue is full, the event is silently dropped.
|
||||
func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
|
||||
select {
|
||||
case p.queuedBlocks <- &blockPropagation{block: block, td: td}:
|
||||
// Mark all the block hash as known, but ensure we don't overflow our limits
|
||||
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
||||
p.knownBlocks.Pop()
|
||||
}
|
||||
p.knownBlocks.Add(block.Hash())
|
||||
default:
|
||||
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// SendBlockHeaders sends a batch of block headers to the remote peer.
|
||||
func (p *Peer) SendBlockHeaders(headers []*types.Header) error {
|
||||
return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket(headers))
|
||||
}
|
||||
|
||||
// SendBlockBodies sends a batch of block contents to the remote peer.
|
||||
func (p *Peer) SendBlockBodies(bodies []*BlockBody) error {
|
||||
return p2p.Send(p.rw, BlockBodiesMsg, BlockBodiesPacket(bodies))
|
||||
}
|
||||
|
||||
// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
|
||||
// an already RLP encoded format.
|
||||
func (p *Peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
|
||||
return p2p.Send(p.rw, BlockBodiesMsg, bodies) // Not packed into BlockBodiesPacket to avoid RLP decoding
|
||||
}
|
||||
|
||||
// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the
|
||||
// hashes requested.
|
||||
func (p *Peer) SendNodeData(data [][]byte) error {
|
||||
return p2p.Send(p.rw, NodeDataMsg, NodeDataPacket(data))
|
||||
}
|
||||
|
||||
// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
|
||||
// ones requested from an already RLP encoded format.
|
||||
func (p *Peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
|
||||
return p2p.Send(p.rw, ReceiptsMsg, receipts) // Not packed into ReceiptsPacket to avoid RLP decoding
|
||||
}
|
||||
|
||||
// RequestOneHeader is a wrapper around the header query functions to fetch a
|
||||
// single header. It is used solely by the fetcher.
|
||||
func (p *Peer) RequestOneHeader(hash common.Hash) error {
|
||||
p.Log().Debug("Fetching single header", "hash", hash)
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket{
|
||||
Origin: HashOrNumber{Hash: hash},
|
||||
Amount: uint64(1),
|
||||
Skip: uint64(0),
|
||||
Reverse: false,
|
||||
})
|
||||
}
|
||||
|
||||
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the hash of an origin block.
|
||||
func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket{
|
||||
Origin: HashOrNumber{Hash: origin},
|
||||
Amount: uint64(amount),
|
||||
Skip: uint64(skip),
|
||||
Reverse: reverse,
|
||||
})
|
||||
}
|
||||
|
||||
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the number of an origin block.
|
||||
func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket{
|
||||
Origin: HashOrNumber{Number: origin},
|
||||
Amount: uint64(amount),
|
||||
Skip: uint64(skip),
|
||||
Reverse: reverse,
|
||||
})
|
||||
}
|
||||
|
||||
// ExpectRequestHeadersByNumber is a testing method to mirror the recipient side
|
||||
// of the RequestHeadersByNumber operation.
|
||||
func (p *Peer) ExpectRequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
||||
req := &GetBlockHeadersPacket{
|
||||
Origin: HashOrNumber{Number: origin},
|
||||
Amount: uint64(amount),
|
||||
Skip: uint64(skip),
|
||||
Reverse: reverse,
|
||||
}
|
||||
return p2p.ExpectMsg(p.rw, GetBlockHeadersMsg, req)
|
||||
}
|
||||
|
||||
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||
// specified.
|
||||
func (p *Peer) RequestBodies(hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
||||
return p2p.Send(p.rw, GetBlockBodiesMsg, GetBlockBodiesPacket(hashes))
|
||||
}
|
||||
|
||||
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
||||
// data, corresponding to the specified hashes.
|
||||
func (p *Peer) RequestNodeData(hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
|
||||
return p2p.Send(p.rw, GetNodeDataMsg, GetNodeDataPacket(hashes))
|
||||
}
|
||||
|
||||
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
||||
func (p *Peer) RequestReceipts(hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
||||
return p2p.Send(p.rw, GetReceiptsMsg, GetReceiptsPacket(hashes))
|
||||
}
|
||||
|
||||
// RequestTxs fetches a batch of transactions from a remote node.
|
||||
func (p *Peer) RequestTxs(hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
|
||||
return p2p.Send(p.rw, GetPooledTransactionsMsg, GetPooledTransactionsPacket(hashes))
|
||||
}
|
61
eth/protocols/eth/peer_test.go
Normal file
61
eth/protocols/eth/peer_test.go
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains some shares testing functionality, common to multiple
|
||||
// different files and modules being tested.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// testPeer is a simulated peer to allow testing direct network calls.
|
||||
type testPeer struct {
|
||||
*Peer
|
||||
|
||||
net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging
|
||||
app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side
|
||||
}
|
||||
|
||||
// newTestPeer creates a new peer registered at the given data backend.
|
||||
func newTestPeer(name string, version uint, backend Backend) (*testPeer, <-chan error) {
|
||||
// Create a message pipe to communicate through
|
||||
app, net := p2p.MsgPipe()
|
||||
|
||||
// Start the peer on a new thread
|
||||
var id enode.ID
|
||||
rand.Read(id[:])
|
||||
|
||||
peer := NewPeer(version, p2p.NewPeer(id, name, nil), net, backend.TxPool())
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
errc <- backend.RunPeer(peer, func(peer *Peer) error {
|
||||
return Handle(backend, peer)
|
||||
})
|
||||
}()
|
||||
return &testPeer{app: app, net: net, Peer: peer}, errc
|
||||
}
|
||||
|
||||
// close terminates the local side of the peer, notifying the remote protocol
|
||||
// manager of termination.
|
||||
func (p *testPeer) close() {
|
||||
p.Peer.Close()
|
||||
p.app.Close()
|
||||
}
|
279
eth/protocols/eth/protocol.go
Normal file
279
eth/protocols/eth/protocol.go
Normal file
@ -0,0 +1,279 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// Constants to match up protocol versions and messages
|
||||
const (
|
||||
ETH64 = 64
|
||||
ETH65 = 65
|
||||
)
|
||||
|
||||
// protocolName is the official short name of the `eth` protocol used during
|
||||
// devp2p capability negotiation.
|
||||
const protocolName = "eth"
|
||||
|
||||
// protocolVersions are the supported versions of the `eth` protocol (first
|
||||
// is primary).
|
||||
var protocolVersions = []uint{ETH65, ETH64}
|
||||
|
||||
// protocolLengths are the number of implemented message corresponding to
|
||||
// different protocol versions.
|
||||
var protocolLengths = map[uint]uint64{ETH65: 17, ETH64: 17}
|
||||
|
||||
// maxMessageSize is the maximum cap on the size of a protocol message.
|
||||
const maxMessageSize = 10 * 1024 * 1024
|
||||
|
||||
const (
|
||||
// Protocol messages in eth/64
|
||||
StatusMsg = 0x00
|
||||
NewBlockHashesMsg = 0x01
|
||||
TransactionsMsg = 0x02
|
||||
GetBlockHeadersMsg = 0x03
|
||||
BlockHeadersMsg = 0x04
|
||||
GetBlockBodiesMsg = 0x05
|
||||
BlockBodiesMsg = 0x06
|
||||
NewBlockMsg = 0x07
|
||||
GetNodeDataMsg = 0x0d
|
||||
NodeDataMsg = 0x0e
|
||||
GetReceiptsMsg = 0x0f
|
||||
ReceiptsMsg = 0x10
|
||||
|
||||
// Protocol messages overloaded in eth/65
|
||||
NewPooledTransactionHashesMsg = 0x08
|
||||
GetPooledTransactionsMsg = 0x09
|
||||
PooledTransactionsMsg = 0x0a
|
||||
)
|
||||
|
||||
var (
|
||||
errNoStatusMsg = errors.New("no status message")
|
||||
errMsgTooLarge = errors.New("message too long")
|
||||
errDecode = errors.New("invalid message")
|
||||
errInvalidMsgCode = errors.New("invalid message code")
|
||||
errProtocolVersionMismatch = errors.New("protocol version mismatch")
|
||||
errNetworkIDMismatch = errors.New("network ID mismatch")
|
||||
errGenesisMismatch = errors.New("genesis mismatch")
|
||||
errForkIDRejected = errors.New("fork ID rejected")
|
||||
errExtraStatusMsg = errors.New("extra status message")
|
||||
)
|
||||
|
||||
// Packet represents a p2p message in the `eth` protocol.
|
||||
type Packet interface {
|
||||
Name() string // Name returns a string corresponding to the message type.
|
||||
Kind() byte // Kind returns the message type.
|
||||
}
|
||||
|
||||
// StatusPacket is the network packet for the status message for eth/64 and later.
|
||||
type StatusPacket struct {
|
||||
ProtocolVersion uint32
|
||||
NetworkID uint64
|
||||
TD *big.Int
|
||||
Head common.Hash
|
||||
Genesis common.Hash
|
||||
ForkID forkid.ID
|
||||
}
|
||||
|
||||
// NewBlockHashesPacket is the network packet for the block announcements.
|
||||
type NewBlockHashesPacket []struct {
|
||||
Hash common.Hash // Hash of one particular block being announced
|
||||
Number uint64 // Number of one particular block being announced
|
||||
}
|
||||
|
||||
// Unpack retrieves the block hashes and numbers from the announcement packet
|
||||
// and returns them in a split flat format that's more consistent with the
|
||||
// internal data structures.
|
||||
func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) {
|
||||
var (
|
||||
hashes = make([]common.Hash, len(*p))
|
||||
numbers = make([]uint64, len(*p))
|
||||
)
|
||||
for i, body := range *p {
|
||||
hashes[i], numbers[i] = body.Hash, body.Number
|
||||
}
|
||||
return hashes, numbers
|
||||
}
|
||||
|
||||
// TransactionsPacket is the network packet for broadcasting new transactions.
|
||||
type TransactionsPacket []*types.Transaction
|
||||
|
||||
// GetBlockHeadersPacket represents a block header query.
|
||||
type GetBlockHeadersPacket struct {
|
||||
Origin HashOrNumber // Block from which to retrieve headers
|
||||
Amount uint64 // Maximum number of headers to retrieve
|
||||
Skip uint64 // Blocks to skip between consecutive headers
|
||||
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
|
||||
}
|
||||
|
||||
// HashOrNumber is a combined field for specifying an origin block.
|
||||
type HashOrNumber struct {
|
||||
Hash common.Hash // Block hash from which to retrieve headers (excludes Number)
|
||||
Number uint64 // Block hash from which to retrieve headers (excludes Hash)
|
||||
}
|
||||
|
||||
// EncodeRLP is a specialized encoder for HashOrNumber to encode only one of the
|
||||
// two contained union fields.
|
||||
func (hn *HashOrNumber) EncodeRLP(w io.Writer) error {
|
||||
if hn.Hash == (common.Hash{}) {
|
||||
return rlp.Encode(w, hn.Number)
|
||||
}
|
||||
if hn.Number != 0 {
|
||||
return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number)
|
||||
}
|
||||
return rlp.Encode(w, hn.Hash)
|
||||
}
|
||||
|
||||
// DecodeRLP is a specialized decoder for HashOrNumber to decode the contents
|
||||
// into either a block hash or a block number.
|
||||
func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error {
|
||||
_, size, _ := s.Kind()
|
||||
origin, err := s.Raw()
|
||||
if err == nil {
|
||||
switch {
|
||||
case size == 32:
|
||||
err = rlp.DecodeBytes(origin, &hn.Hash)
|
||||
case size <= 8:
|
||||
err = rlp.DecodeBytes(origin, &hn.Number)
|
||||
default:
|
||||
err = fmt.Errorf("invalid input size %d for origin", size)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// BlockHeadersPacket represents a block header response.
|
||||
type BlockHeadersPacket []*types.Header
|
||||
|
||||
// NewBlockPacket is the network packet for the block propagation message.
|
||||
type NewBlockPacket struct {
|
||||
Block *types.Block
|
||||
TD *big.Int
|
||||
}
|
||||
|
||||
// sanityCheck verifies that the values are reasonable, as a DoS protection
|
||||
func (request *NewBlockPacket) sanityCheck() error {
|
||||
if err := request.Block.SanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
//TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times
|
||||
// larger, it will still fit within 100 bits
|
||||
if tdlen := request.TD.BitLen(); tdlen > 100 {
|
||||
return fmt.Errorf("too large block TD: bitlen %d", tdlen)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockBodiesPacket represents a block body query.
|
||||
type GetBlockBodiesPacket []common.Hash
|
||||
|
||||
// BlockBodiesPacket is the network packet for block content distribution.
|
||||
type BlockBodiesPacket []*BlockBody
|
||||
|
||||
// BlockBody represents the data content of a single block.
|
||||
type BlockBody struct {
|
||||
Transactions []*types.Transaction // Transactions contained within a block
|
||||
Uncles []*types.Header // Uncles contained within a block
|
||||
}
|
||||
|
||||
// Unpack retrieves the transactions and uncles from the range packet and returns
|
||||
// them in a split flat format that's more consistent with the internal data structures.
|
||||
func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header) {
|
||||
var (
|
||||
txset = make([][]*types.Transaction, len(*p))
|
||||
uncleset = make([][]*types.Header, len(*p))
|
||||
)
|
||||
for i, body := range *p {
|
||||
txset[i], uncleset[i] = body.Transactions, body.Uncles
|
||||
}
|
||||
return txset, uncleset
|
||||
}
|
||||
|
||||
// GetNodeDataPacket represents a trie node data query.
|
||||
type GetNodeDataPacket []common.Hash
|
||||
|
||||
// NodeDataPacket is the network packet for trie node data distribution.
|
||||
type NodeDataPacket [][]byte
|
||||
|
||||
// GetReceiptsPacket represents a block receipts query.
|
||||
type GetReceiptsPacket []common.Hash
|
||||
|
||||
// ReceiptsPacket is the network packet for block receipts distribution.
|
||||
type ReceiptsPacket [][]*types.Receipt
|
||||
|
||||
// NewPooledTransactionHashesPacket represents a transaction announcement packet.
|
||||
type NewPooledTransactionHashesPacket []common.Hash
|
||||
|
||||
// GetPooledTransactionsPacket represents a transaction query.
|
||||
type GetPooledTransactionsPacket []common.Hash
|
||||
|
||||
// PooledTransactionsPacket is the network packet for transaction distribution.
|
||||
type PooledTransactionsPacket []*types.Transaction
|
||||
|
||||
func (*StatusPacket) Name() string { return "Status" }
|
||||
func (*StatusPacket) Kind() byte { return StatusMsg }
|
||||
|
||||
func (*NewBlockHashesPacket) Name() string { return "NewBlockHashes" }
|
||||
func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg }
|
||||
|
||||
func (*TransactionsPacket) Name() string { return "Transactions" }
|
||||
func (*TransactionsPacket) Kind() byte { return TransactionsMsg }
|
||||
|
||||
func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" }
|
||||
func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg }
|
||||
|
||||
func (*BlockHeadersPacket) Name() string { return "BlockHeaders" }
|
||||
func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg }
|
||||
|
||||
func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" }
|
||||
func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg }
|
||||
|
||||
func (*BlockBodiesPacket) Name() string { return "BlockBodies" }
|
||||
func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg }
|
||||
|
||||
func (*NewBlockPacket) Name() string { return "NewBlock" }
|
||||
func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
|
||||
|
||||
func (*GetNodeDataPacket) Name() string { return "GetNodeData" }
|
||||
func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg }
|
||||
|
||||
func (*NodeDataPacket) Name() string { return "NodeData" }
|
||||
func (*NodeDataPacket) Kind() byte { return NodeDataMsg }
|
||||
|
||||
func (*GetReceiptsPacket) Name() string { return "GetReceipts" }
|
||||
func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg }
|
||||
|
||||
func (*ReceiptsPacket) Name() string { return "Receipts" }
|
||||
func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg }
|
||||
|
||||
func (*NewPooledTransactionHashesPacket) Name() string { return "NewPooledTransactionHashes" }
|
||||
func (*NewPooledTransactionHashesPacket) Kind() byte { return NewPooledTransactionHashesMsg }
|
||||
|
||||
func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" }
|
||||
func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg }
|
||||
|
||||
func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" }
|
||||
func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg }
|
68
eth/protocols/eth/protocol_test.go
Normal file
68
eth/protocols/eth/protocol_test.go
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// Tests that the custom union field encoder and decoder works correctly.
|
||||
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
|
||||
// Create a "random" hash for testing
|
||||
var hash common.Hash
|
||||
for i := range hash {
|
||||
hash[i] = byte(i)
|
||||
}
|
||||
// Assemble some table driven tests
|
||||
tests := []struct {
|
||||
packet *GetBlockHeadersPacket
|
||||
fail bool
|
||||
}{
|
||||
// Providing the origin as either a hash or a number should both work
|
||||
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}},
|
||||
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}},
|
||||
|
||||
// Providing arbitrary query field should also work
|
||||
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
|
||||
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
|
||||
|
||||
// Providing both the origin hash and origin number must fail
|
||||
{fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}},
|
||||
}
|
||||
// Iterate over each of the tests and try to encode and then decode
|
||||
for i, tt := range tests {
|
||||
bytes, err := rlp.EncodeToBytes(tt.packet)
|
||||
if err != nil && !tt.fail {
|
||||
t.Fatalf("test %d: failed to encode packet: %v", i, err)
|
||||
} else if err == nil && tt.fail {
|
||||
t.Fatalf("test %d: encode should have failed", i)
|
||||
}
|
||||
if !tt.fail {
|
||||
packet := new(GetBlockHeadersPacket)
|
||||
if err := rlp.DecodeBytes(bytes, packet); err != nil {
|
||||
t.Fatalf("test %d: failed to decode packet: %v", i, err)
|
||||
}
|
||||
if packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount ||
|
||||
packet.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse {
|
||||
t.Fatalf("test %d: encode decode mismatch: have %+v, want %+v", i, packet, tt.packet)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
32
eth/protocols/snap/discovery.go
Normal file
32
eth/protocols/snap/discovery.go
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snap
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// enrEntry is the ENR entry which advertises `snap` protocol on the discovery.
|
||||
type enrEntry struct {
|
||||
// Ignore additional fields (for forward compatibility).
|
||||
Rest []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
|
||||
// ENRKey implements enr.Entry.
|
||||
func (e enrEntry) ENRKey() string {
|
||||
return "snap"
|
||||
}
|
490
eth/protocols/snap/handler.go
Normal file
490
eth/protocols/snap/handler.go
Normal file
@ -0,0 +1,490 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
const (
|
||||
// softResponseLimit is the target maximum size of replies to data retrievals.
|
||||
softResponseLimit = 2 * 1024 * 1024
|
||||
|
||||
// maxCodeLookups is the maximum number of bytecodes to serve. This number is
|
||||
// there to limit the number of disk lookups.
|
||||
maxCodeLookups = 1024
|
||||
|
||||
// stateLookupSlack defines the ratio by how much a state response can exceed
|
||||
// the requested limit in order to try and avoid breaking up contracts into
|
||||
// multiple packages and proving them.
|
||||
stateLookupSlack = 0.1
|
||||
|
||||
// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
|
||||
// number is there to limit the number of disk lookups.
|
||||
maxTrieNodeLookups = 1024
|
||||
)
|
||||
|
||||
// Handler is a callback to invoke from an outside runner after the boilerplate
|
||||
// exchanges have passed.
|
||||
type Handler func(peer *Peer) error
|
||||
|
||||
// Backend defines the data retrieval methods to serve remote requests and the
|
||||
// callback methods to invoke on remote deliveries.
|
||||
type Backend interface {
|
||||
// Chain retrieves the blockchain object to serve data.
|
||||
Chain() *core.BlockChain
|
||||
|
||||
// RunPeer is invoked when a peer joins on the `eth` protocol. The handler
|
||||
// should do any peer maintenance work, handshakes and validations. If all
|
||||
// is passed, control should be given back to the `handler` to process the
|
||||
// inbound messages going forward.
|
||||
RunPeer(peer *Peer, handler Handler) error
|
||||
|
||||
// PeerInfo retrieves all known `snap` information about a peer.
|
||||
PeerInfo(id enode.ID) interface{}
|
||||
|
||||
// Handle is a callback to be invoked when a data packet is received from
|
||||
// the remote peer. Only packets not consumed by the protocol handler will
|
||||
// be forwarded to the backend.
|
||||
Handle(peer *Peer, packet Packet) error
|
||||
}
|
||||
|
||||
// MakeProtocols constructs the P2P protocol definitions for `snap`.
|
||||
func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
|
||||
protocols := make([]p2p.Protocol, len(protocolVersions))
|
||||
for i, version := range protocolVersions {
|
||||
version := version // Closure
|
||||
|
||||
protocols[i] = p2p.Protocol{
|
||||
Name: protocolName,
|
||||
Version: version,
|
||||
Length: protocolLengths[version],
|
||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error {
|
||||
return handle(backend, peer)
|
||||
})
|
||||
},
|
||||
NodeInfo: func() interface{} {
|
||||
return nodeInfo(backend.Chain())
|
||||
},
|
||||
PeerInfo: func(id enode.ID) interface{} {
|
||||
return backend.PeerInfo(id)
|
||||
},
|
||||
Attributes: []enr.Entry{&enrEntry{}},
|
||||
DialCandidates: dnsdisc,
|
||||
}
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// handle is the callback invoked to manage the life cycle of a `snap` peer.
|
||||
// When this function terminates, the peer is disconnected.
|
||||
func handle(backend Backend, peer *Peer) error {
|
||||
for {
|
||||
if err := handleMessage(backend, peer); err != nil {
|
||||
peer.Log().Debug("Message handling failed in `snap`", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleMessage is invoked whenever an inbound message is received from a
|
||||
// remote peer on the `spap` protocol. The remote connection is torn down upon
|
||||
// returning any error.
|
||||
func handleMessage(backend Backend, peer *Peer) error {
|
||||
// Read the next message from the remote peer, and ensure it's fully consumed
|
||||
msg, err := peer.rw.ReadMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.Size > maxMessageSize {
|
||||
return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
|
||||
}
|
||||
defer msg.Discard()
|
||||
|
||||
// Handle the message depending on its contents
|
||||
switch {
|
||||
case msg.Code == GetAccountRangeMsg:
|
||||
// Decode the account retrieval request
|
||||
var req GetAccountRangePacket
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if req.Bytes > softResponseLimit {
|
||||
req.Bytes = softResponseLimit
|
||||
}
|
||||
// Retrieve the requested state and bail out if non existent
|
||||
tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
||||
}
|
||||
it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin)
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
||||
}
|
||||
// Iterate over the requested range and pile accounts up
|
||||
var (
|
||||
accounts []*AccountData
|
||||
size uint64
|
||||
last common.Hash
|
||||
)
|
||||
for it.Next() && size < req.Bytes {
|
||||
hash, account := it.Hash(), common.CopyBytes(it.Account())
|
||||
|
||||
// Track the returned interval for the Merkle proofs
|
||||
last = hash
|
||||
|
||||
// Assemble the reply item
|
||||
size += uint64(common.HashLength + len(account))
|
||||
accounts = append(accounts, &AccountData{
|
||||
Hash: hash,
|
||||
Body: account,
|
||||
})
|
||||
// If we've exceeded the request threshold, abort
|
||||
if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
|
||||
// Generate the Merkle proofs for the first and last account
|
||||
proof := light.NewNodeSet()
|
||||
if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
|
||||
log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
||||
}
|
||||
if last != (common.Hash{}) {
|
||||
if err := tr.Prove(last[:], 0, proof); err != nil {
|
||||
log.Warn("Failed to prove account range", "last", last, "err", err)
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
||||
}
|
||||
}
|
||||
var proofs [][]byte
|
||||
for _, blob := range proof.NodeList() {
|
||||
proofs = append(proofs, blob)
|
||||
}
|
||||
// Send back anything accumulated
|
||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
|
||||
ID: req.ID,
|
||||
Accounts: accounts,
|
||||
Proof: proofs,
|
||||
})
|
||||
|
||||
case msg.Code == AccountRangeMsg:
|
||||
// A range of accounts arrived to one of our previous requests
|
||||
res := new(AccountRangePacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Ensure the range is monotonically increasing
|
||||
for i := 1; i < len(res.Accounts); i++ {
|
||||
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
|
||||
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
|
||||
}
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetStorageRangesMsg:
|
||||
// Decode the storage retrieval request
|
||||
var req GetStorageRangesPacket
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if req.Bytes > softResponseLimit {
|
||||
req.Bytes = softResponseLimit
|
||||
}
|
||||
// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
|
||||
// TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user
|
||||
// TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
|
||||
|
||||
// Calculate the hard limit at which to abort, even if mid storage trie
|
||||
hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
|
||||
|
||||
// Retrieve storage ranges until the packet limit is reached
|
||||
var (
|
||||
slots [][]*StorageData
|
||||
proofs [][]byte
|
||||
size uint64
|
||||
)
|
||||
for _, account := range req.Accounts {
|
||||
// If we've exceeded the requested data limit, abort without opening
|
||||
// a new storage range (that we'd need to prove due to exceeded size)
|
||||
if size >= req.Bytes {
|
||||
break
|
||||
}
|
||||
// The first account might start from a different origin and end sooner
|
||||
var origin common.Hash
|
||||
if len(req.Origin) > 0 {
|
||||
origin, req.Origin = common.BytesToHash(req.Origin), nil
|
||||
}
|
||||
var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
if len(req.Limit) > 0 {
|
||||
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
||||
}
|
||||
// Retrieve the requested state and bail out if non existent
|
||||
it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin)
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
// Iterate over the requested range and pile slots up
|
||||
var (
|
||||
storage []*StorageData
|
||||
last common.Hash
|
||||
)
|
||||
for it.Next() && size < hardLimit {
|
||||
hash, slot := it.Hash(), common.CopyBytes(it.Slot())
|
||||
|
||||
// Track the returned interval for the Merkle proofs
|
||||
last = hash
|
||||
|
||||
// Assemble the reply item
|
||||
size += uint64(common.HashLength + len(slot))
|
||||
storage = append(storage, &StorageData{
|
||||
Hash: hash,
|
||||
Body: slot,
|
||||
})
|
||||
// If we've exceeded the request threshold, abort
|
||||
if bytes.Compare(hash[:], limit[:]) >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
slots = append(slots, storage)
|
||||
it.Release()
|
||||
|
||||
// Generate the Merkle proofs for the first and last storage slot, but
|
||||
// only if the response was capped. If the entire storage trie included
|
||||
// in the response, no need for any proofs.
|
||||
if origin != (common.Hash{}) || size >= hardLimit {
|
||||
// Request started at a non-zero hash or was capped prematurely, add
|
||||
// the endpoint Merkle proofs
|
||||
accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
var acc state.Account
|
||||
if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB())
|
||||
if err != nil {
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
proof := light.NewNodeSet()
|
||||
if err := stTrie.Prove(origin[:], 0, proof); err != nil {
|
||||
log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
if last != (common.Hash{}) {
|
||||
if err := stTrie.Prove(last[:], 0, proof); err != nil {
|
||||
log.Warn("Failed to prove storage range", "last", last, "err", err)
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
||||
}
|
||||
}
|
||||
for _, blob := range proof.NodeList() {
|
||||
proofs = append(proofs, blob)
|
||||
}
|
||||
// Proof terminates the reply as proofs are only added if a node
|
||||
// refuses to serve more data (exception when a contract fetch is
|
||||
// finishing, but that's that).
|
||||
break
|
||||
}
|
||||
}
|
||||
// Send back anything accumulated
|
||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
|
||||
ID: req.ID,
|
||||
Slots: slots,
|
||||
Proof: proofs,
|
||||
})
|
||||
|
||||
case msg.Code == StorageRangesMsg:
|
||||
// A range of storage slots arrived to one of our previous requests
|
||||
res := new(StorageRangesPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
// Ensure the ranges ae monotonically increasing
|
||||
for i, slots := range res.Slots {
|
||||
for j := 1; j < len(slots); j++ {
|
||||
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
|
||||
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetByteCodesMsg:
|
||||
// Decode bytecode retrieval request
|
||||
var req GetByteCodesPacket
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if req.Bytes > softResponseLimit {
|
||||
req.Bytes = softResponseLimit
|
||||
}
|
||||
if len(req.Hashes) > maxCodeLookups {
|
||||
req.Hashes = req.Hashes[:maxCodeLookups]
|
||||
}
|
||||
// Retrieve bytecodes until the packet size limit is reached
|
||||
var (
|
||||
codes [][]byte
|
||||
bytes uint64
|
||||
)
|
||||
for _, hash := range req.Hashes {
|
||||
if hash == emptyCode {
|
||||
// Peers should not request the empty code, but if they do, at
|
||||
// least sent them back a correct response without db lookups
|
||||
codes = append(codes, []byte{})
|
||||
} else if blob, err := backend.Chain().ContractCode(hash); err == nil {
|
||||
codes = append(codes, blob)
|
||||
bytes += uint64(len(blob))
|
||||
}
|
||||
if bytes > req.Bytes {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Send back anything accumulated
|
||||
return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
|
||||
ID: req.ID,
|
||||
Codes: codes,
|
||||
})
|
||||
|
||||
case msg.Code == ByteCodesMsg:
|
||||
// A batch of byte codes arrived to one of our previous requests
|
||||
res := new(ByteCodesPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
case msg.Code == GetTrieNodesMsg:
|
||||
// Decode trie node retrieval request
|
||||
var req GetTrieNodesPacket
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
if req.Bytes > softResponseLimit {
|
||||
req.Bytes = softResponseLimit
|
||||
}
|
||||
// Make sure we have the state associated with the request
|
||||
triedb := backend.Chain().StateCache().TrieDB()
|
||||
|
||||
accTrie, err := trie.NewSecure(req.Root, triedb)
|
||||
if err != nil {
|
||||
// We don't have the requested state available, bail out
|
||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
|
||||
}
|
||||
snap := backend.Chain().Snapshots().Snapshot(req.Root)
|
||||
if snap == nil {
|
||||
// We don't have the requested state snapshotted yet, bail out.
|
||||
// In reality we could still serve using the account and storage
|
||||
// tries only, but let's protect the node a bit while it's doing
|
||||
// snapshot generation.
|
||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
|
||||
}
|
||||
// Retrieve trie nodes until the packet size limit is reached
|
||||
var (
|
||||
nodes [][]byte
|
||||
bytes uint64
|
||||
loads int // Trie hash expansions to cound database reads
|
||||
)
|
||||
for _, pathset := range req.Paths {
|
||||
switch len(pathset) {
|
||||
case 0:
|
||||
// Ensure we penalize invalid requests
|
||||
return fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
|
||||
|
||||
case 1:
|
||||
// If we're only retrieving an account trie node, fetch it directly
|
||||
blob, resolved, err := accTrie.TryGetNode(pathset[0])
|
||||
loads += resolved // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
nodes = append(nodes, blob)
|
||||
bytes += uint64(len(blob))
|
||||
|
||||
default:
|
||||
// Storage slots requested, open the storage trie and retrieve from there
|
||||
account, err := snap.Account(common.BytesToHash(pathset[0]))
|
||||
loads++ // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb)
|
||||
loads++ // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
for _, path := range pathset[1:] {
|
||||
blob, resolved, err := stTrie.TryGetNode(path)
|
||||
loads += resolved // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
nodes = append(nodes, blob)
|
||||
bytes += uint64(len(blob))
|
||||
|
||||
// Sanity check limits to avoid DoS on the store trie loads
|
||||
if bytes > req.Bytes || loads > maxTrieNodeLookups {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// Abort request processing if we've exceeded our limits
|
||||
if bytes > req.Bytes || loads > maxTrieNodeLookups {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Send back anything accumulated
|
||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
|
||||
ID: req.ID,
|
||||
Nodes: nodes,
|
||||
})
|
||||
|
||||
case msg.Code == TrieNodesMsg:
|
||||
// A batch of trie nodes arrived to one of our previous requests
|
||||
res := new(TrieNodesPacket)
|
||||
if err := msg.Decode(res); err != nil {
|
||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||
}
|
||||
return backend.Handle(peer, res)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// NodeInfo represents a short summary of the `snap` sub-protocol metadata
|
||||
// known about the host peer.
|
||||
type NodeInfo struct{}
|
||||
|
||||
// nodeInfo retrieves some `snap` protocol metadata about the running host node.
|
||||
func nodeInfo(chain *core.BlockChain) *NodeInfo {
|
||||
return &NodeInfo{}
|
||||
}
|
111
eth/protocols/snap/peer.go
Normal file
111
eth/protocols/snap/peer.go
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snap
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
// Peer is a collection of relevant information we have about a `snap` peer.
|
||||
type Peer struct {
|
||||
id string // Unique ID for the peer, cached
|
||||
|
||||
*p2p.Peer // The embedded P2P package peer
|
||||
rw p2p.MsgReadWriter // Input/output streams for snap
|
||||
version uint // Protocol version negotiated
|
||||
|
||||
logger log.Logger // Contextual logger with the peer id injected
|
||||
}
|
||||
|
||||
// newPeer create a wrapper for a network connection and negotiated protocol
|
||||
// version.
|
||||
func newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer {
|
||||
id := p.ID().String()
|
||||
return &Peer{
|
||||
id: id,
|
||||
Peer: p,
|
||||
rw: rw,
|
||||
version: version,
|
||||
logger: log.New("peer", id[:8]),
|
||||
}
|
||||
}
|
||||
|
||||
// ID retrieves the peer's unique identifier.
|
||||
func (p *Peer) ID() string {
|
||||
return p.id
|
||||
}
|
||||
|
||||
// Version retrieves the peer's negoatiated `snap` protocol version.
|
||||
func (p *Peer) Version() uint {
|
||||
return p.version
|
||||
}
|
||||
|
||||
// RequestAccountRange fetches a batch of accounts rooted in a specific account
|
||||
// trie, starting with the origin.
|
||||
func (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit common.Hash, bytes uint64) error {
|
||||
p.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
|
||||
return p2p.Send(p.rw, GetAccountRangeMsg, &GetAccountRangePacket{
|
||||
ID: id,
|
||||
Root: root,
|
||||
Origin: origin,
|
||||
Limit: limit,
|
||||
Bytes: bytes,
|
||||
})
|
||||
}
|
||||
|
||||
// RequestStorageRange fetches a batch of storage slots belonging to one or more
|
||||
// accounts. If slots from only one accout is requested, an origin marker may also
|
||||
// be used to retrieve from there.
|
||||
func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
|
||||
if len(accounts) == 1 && origin != nil {
|
||||
p.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
|
||||
} else {
|
||||
p.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
|
||||
}
|
||||
return p2p.Send(p.rw, GetStorageRangesMsg, &GetStorageRangesPacket{
|
||||
ID: id,
|
||||
Root: root,
|
||||
Accounts: accounts,
|
||||
Origin: origin,
|
||||
Limit: limit,
|
||||
Bytes: bytes,
|
||||
})
|
||||
}
|
||||
|
||||
// RequestByteCodes fetches a batch of bytecodes by hash.
|
||||
func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
|
||||
p.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
|
||||
return p2p.Send(p.rw, GetByteCodesMsg, &GetByteCodesPacket{
|
||||
ID: id,
|
||||
Hashes: hashes,
|
||||
Bytes: bytes,
|
||||
})
|
||||
}
|
||||
|
||||
// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
|
||||
// a specificstate trie.
|
||||
func (p *Peer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
|
||||
p.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
|
||||
return p2p.Send(p.rw, GetTrieNodesMsg, &GetTrieNodesPacket{
|
||||
ID: id,
|
||||
Root: root,
|
||||
Paths: paths,
|
||||
Bytes: bytes,
|
||||
})
|
||||
}
|
218
eth/protocols/snap/protocol.go
Normal file
218
eth/protocols/snap/protocol.go
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// Constants to match up protocol versions and messages
|
||||
const (
|
||||
snap1 = 1
|
||||
)
|
||||
|
||||
// protocolName is the official short name of the `snap` protocol used during
|
||||
// devp2p capability negotiation.
|
||||
const protocolName = "snap"
|
||||
|
||||
// protocolVersions are the supported versions of the `snap` protocol (first
|
||||
// is primary).
|
||||
var protocolVersions = []uint{snap1}
|
||||
|
||||
// protocolLengths are the number of implemented message corresponding to
|
||||
// different protocol versions.
|
||||
var protocolLengths = map[uint]uint64{snap1: 8}
|
||||
|
||||
// maxMessageSize is the maximum cap on the size of a protocol message.
|
||||
const maxMessageSize = 10 * 1024 * 1024
|
||||
|
||||
const (
|
||||
GetAccountRangeMsg = 0x00
|
||||
AccountRangeMsg = 0x01
|
||||
GetStorageRangesMsg = 0x02
|
||||
StorageRangesMsg = 0x03
|
||||
GetByteCodesMsg = 0x04
|
||||
ByteCodesMsg = 0x05
|
||||
GetTrieNodesMsg = 0x06
|
||||
TrieNodesMsg = 0x07
|
||||
)
|
||||
|
||||
var (
|
||||
errMsgTooLarge = errors.New("message too long")
|
||||
errDecode = errors.New("invalid message")
|
||||
errInvalidMsgCode = errors.New("invalid message code")
|
||||
errBadRequest = errors.New("bad request")
|
||||
)
|
||||
|
||||
// Packet represents a p2p message in the `snap` protocol.
|
||||
type Packet interface {
|
||||
Name() string // Name returns a string corresponding to the message type.
|
||||
Kind() byte // Kind returns the message type.
|
||||
}
|
||||
|
||||
// GetAccountRangePacket represents an account query.
|
||||
type GetAccountRangePacket struct {
|
||||
ID uint64 // Request ID to match up responses with
|
||||
Root common.Hash // Root hash of the account trie to serve
|
||||
Origin common.Hash // Hash of the first account to retrieve
|
||||
Limit common.Hash // Hash of the last account to retrieve
|
||||
Bytes uint64 // Soft limit at which to stop returning data
|
||||
}
|
||||
|
||||
// AccountRangePacket represents an account query response.
|
||||
type AccountRangePacket struct {
|
||||
ID uint64 // ID of the request this is a response for
|
||||
Accounts []*AccountData // List of consecutive accounts from the trie
|
||||
Proof [][]byte // List of trie nodes proving the account range
|
||||
}
|
||||
|
||||
// AccountData represents a single account in a query response.
|
||||
type AccountData struct {
|
||||
Hash common.Hash // Hash of the account
|
||||
Body rlp.RawValue // Account body in slim format
|
||||
}
|
||||
|
||||
// Unpack retrieves the accounts from the range packet and converts from slim
|
||||
// wire representation to consensus format. The returned data is RLP encoded
|
||||
// since it's expected to be serialized to disk without further interpretation.
|
||||
//
|
||||
// Note, this method does a round of RLP decoding and reencoding, so only use it
|
||||
// once and cache the results if need be. Ideally discard the packet afterwards
|
||||
// to not double the memory use.
|
||||
func (p *AccountRangePacket) Unpack() ([]common.Hash, [][]byte, error) {
|
||||
var (
|
||||
hashes = make([]common.Hash, len(p.Accounts))
|
||||
accounts = make([][]byte, len(p.Accounts))
|
||||
)
|
||||
for i, acc := range p.Accounts {
|
||||
val, err := snapshot.FullAccountRLP(acc.Body)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid account %x: %v", acc.Body, err)
|
||||
}
|
||||
hashes[i], accounts[i] = acc.Hash, val
|
||||
}
|
||||
return hashes, accounts, nil
|
||||
}
|
||||
|
||||
// GetStorageRangesPacket represents an storage slot query.
|
||||
type GetStorageRangesPacket struct {
|
||||
ID uint64 // Request ID to match up responses with
|
||||
Root common.Hash // Root hash of the account trie to serve
|
||||
Accounts []common.Hash // Account hashes of the storage tries to serve
|
||||
Origin []byte // Hash of the first storage slot to retrieve (large contract mode)
|
||||
Limit []byte // Hash of the last storage slot to retrieve (large contract mode)
|
||||
Bytes uint64 // Soft limit at which to stop returning data
|
||||
}
|
||||
|
||||
// StorageRangesPacket represents a storage slot query response.
|
||||
type StorageRangesPacket struct {
|
||||
ID uint64 // ID of the request this is a response for
|
||||
Slots [][]*StorageData // Lists of consecutive storage slots for the requested accounts
|
||||
Proof [][]byte // Merkle proofs for the *last* slot range, if it's incomplete
|
||||
}
|
||||
|
||||
// StorageData represents a single storage slot in a query response.
|
||||
type StorageData struct {
|
||||
Hash common.Hash // Hash of the storage slot
|
||||
Body []byte // Data content of the slot
|
||||
}
|
||||
|
||||
// Unpack retrieves the storage slots from the range packet and returns them in
|
||||
// a split flat format that's more consistent with the internal data structures.
|
||||
func (p *StorageRangesPacket) Unpack() ([][]common.Hash, [][][]byte) {
|
||||
var (
|
||||
hashset = make([][]common.Hash, len(p.Slots))
|
||||
slotset = make([][][]byte, len(p.Slots))
|
||||
)
|
||||
for i, slots := range p.Slots {
|
||||
hashset[i] = make([]common.Hash, len(slots))
|
||||
slotset[i] = make([][]byte, len(slots))
|
||||
for j, slot := range slots {
|
||||
hashset[i][j] = slot.Hash
|
||||
slotset[i][j] = slot.Body
|
||||
}
|
||||
}
|
||||
return hashset, slotset
|
||||
}
|
||||
|
||||
// GetByteCodesPacket represents a contract bytecode query.
|
||||
type GetByteCodesPacket struct {
|
||||
ID uint64 // Request ID to match up responses with
|
||||
Hashes []common.Hash // Code hashes to retrieve the code for
|
||||
Bytes uint64 // Soft limit at which to stop returning data
|
||||
}
|
||||
|
||||
// ByteCodesPacket represents a contract bytecode query response.
|
||||
type ByteCodesPacket struct {
|
||||
ID uint64 // ID of the request this is a response for
|
||||
Codes [][]byte // Requested contract bytecodes
|
||||
}
|
||||
|
||||
// GetTrieNodesPacket represents a state trie node query.
|
||||
type GetTrieNodesPacket struct {
|
||||
ID uint64 // Request ID to match up responses with
|
||||
Root common.Hash // Root hash of the account trie to serve
|
||||
Paths []TrieNodePathSet // Trie node hashes to retrieve the nodes for
|
||||
Bytes uint64 // Soft limit at which to stop returning data
|
||||
}
|
||||
|
||||
// TrieNodePathSet is a list of trie node paths to retrieve. A naive way to
|
||||
// represent trie nodes would be a simple list of `account || storage` path
|
||||
// segments concatenated, but that would be very wasteful on the network.
|
||||
//
|
||||
// Instead, this array special cases the first element as the path in the
|
||||
// account trie and the remaining elements as paths in the storage trie. To
|
||||
// address an account node, the slice should have a length of 1 consisting
|
||||
// of only the account path. There's no need to be able to address both an
|
||||
// account node and a storage node in the same request as it cannot happen
|
||||
// that a slot is accessed before the account path is fully expanded.
|
||||
type TrieNodePathSet [][]byte
|
||||
|
||||
// TrieNodesPacket represents a state trie node query response.
|
||||
type TrieNodesPacket struct {
|
||||
ID uint64 // ID of the request this is a response for
|
||||
Nodes [][]byte // Requested state trie nodes
|
||||
}
|
||||
|
||||
func (*GetAccountRangePacket) Name() string { return "GetAccountRange" }
|
||||
func (*GetAccountRangePacket) Kind() byte { return GetAccountRangeMsg }
|
||||
|
||||
func (*AccountRangePacket) Name() string { return "AccountRange" }
|
||||
func (*AccountRangePacket) Kind() byte { return AccountRangeMsg }
|
||||
|
||||
func (*GetStorageRangesPacket) Name() string { return "GetStorageRanges" }
|
||||
func (*GetStorageRangesPacket) Kind() byte { return GetStorageRangesMsg }
|
||||
|
||||
func (*StorageRangesPacket) Name() string { return "StorageRanges" }
|
||||
func (*StorageRangesPacket) Kind() byte { return StorageRangesMsg }
|
||||
|
||||
func (*GetByteCodesPacket) Name() string { return "GetByteCodes" }
|
||||
func (*GetByteCodesPacket) Kind() byte { return GetByteCodesMsg }
|
||||
|
||||
func (*ByteCodesPacket) Name() string { return "ByteCodes" }
|
||||
func (*ByteCodesPacket) Kind() byte { return ByteCodesMsg }
|
||||
|
||||
func (*GetTrieNodesPacket) Name() string { return "GetTrieNodes" }
|
||||
func (*GetTrieNodesPacket) Kind() byte { return GetTrieNodesMsg }
|
||||
|
||||
func (*TrieNodesPacket) Name() string { return "TrieNodes" }
|
||||
func (*TrieNodesPacket) Kind() byte { return TrieNodesMsg }
|
2481
eth/protocols/snap/sync.go
Normal file
2481
eth/protocols/snap/sync.go
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user