p2p/discover: split out discv4 code

This change restructures the internals of p2p/discover to make room for
the discv5 code which will soon be added to this package.

- packet type names now have a "V4" suffix.
- ListenUDP returns *UDPv4 instead of *Table. This technically breaks
  the API but the only caller in go-ethereum is package p2p, which uses
  a compatible interface and doesn't need changes.
- The internal transport interface is changed to make Table reusable for v5.
- The 'lookup' code moves from table to transport. This required
  updating the lookup unit test to use udpTest instead of a custom transport.
This commit is contained in:
Felix Lange
2019-04-30 13:13:22 +02:00
parent a43ec8bba5
commit dba1750eda
7 changed files with 700 additions and 700 deletions

View File

@ -23,7 +23,6 @@
package discover
import (
"crypto/ecdsa"
crand "crypto/rand"
"encoding/binary"
"fmt"
@ -34,7 +33,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/netutil"
@ -71,26 +69,23 @@ type Table struct {
rand *mrand.Rand // source of randomness, periodically reseeded
ips netutil.DistinctNetSet
log log.Logger
db *enode.DB // database of known nodes
net transport
refreshReq chan chan struct{}
initDone chan struct{}
closeOnce sync.Once
closeReq chan struct{}
closed chan struct{}
closeReq chan struct{}
closed chan struct{}
nodeAddedHook func(*node) // for testing
}
// transport is implemented by the UDP transport.
// it is an interface so we can test without opening lots of UDP
// sockets and without generating a private key.
// transport is implemented by UDP transports.
type transport interface {
self() *enode.Node
ping(enode.ID, *net.UDPAddr) error
findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error)
close()
Self() *enode.Node
lookupRandom() []*enode.Node
lookupSelf() []*enode.Node
ping(*enode.Node) error
}
// bucket contains nodes, ordered by their last activity. the entry
@ -101,7 +96,7 @@ type bucket struct {
ips netutil.DistinctNetSet
}
func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
func newTable(t transport, db *enode.DB, bootnodes []*enode.Node, log log.Logger) (*Table, error) {
tab := &Table{
net: t,
db: db,
@ -111,6 +106,7 @@ func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error
closed: make(chan struct{}),
rand: mrand.New(mrand.NewSource(0)),
ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
log: log,
}
if err := tab.setFallbackNodes(bootnodes); err != nil {
return nil, err
@ -128,7 +124,7 @@ func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error
}
func (tab *Table) self() *enode.Node {
return tab.net.self()
return tab.net.Self()
}
func (tab *Table) seedRand() {
@ -180,16 +176,22 @@ func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
return i + 1
}
// Close terminates the network listener and flushes the node database.
func (tab *Table) Close() {
tab.closeOnce.Do(func() {
if tab.net != nil {
tab.net.close()
}
// Wait for loop to end.
close(tab.closeReq)
<-tab.closed
})
// Resolve searches for a specific node with the given ID.
// It returns nil if the node could not be found.
func (tab *Table) Resolve(n *enode.Node) *enode.Node {
tab.mutex.Lock()
cl := tab.closest(n.ID(), 1, false)
tab.mutex.Unlock()
if len(cl.entries) > 0 && cl.entries[0].ID() == n.ID() {
return unwrapNode(cl.entries[0])
}
return nil
}
// close terminates the network listener and flushes the node database.
func (tab *Table) close() {
close(tab.closeReq)
<-tab.closed
}
// setFallbackNodes sets the initial points of contact. These nodes
@ -215,124 +217,6 @@ func (tab *Table) isInitDone() bool {
}
}
// Resolve searches for a specific node with the given ID.
// It returns nil if the node could not be found.
func (tab *Table) Resolve(n *enode.Node) *enode.Node {
// If the node is present in the local table, no
// network interaction is required.
hash := n.ID()
tab.mutex.Lock()
cl := tab.closest(hash, 1)
tab.mutex.Unlock()
if len(cl.entries) > 0 && cl.entries[0].ID() == hash {
return unwrapNode(cl.entries[0])
}
// Otherwise, do a network lookup.
result := tab.lookup(encodePubkey(n.Pubkey()), true)
for _, n := range result {
if n.ID() == hash {
return unwrapNode(n)
}
}
return nil
}
// LookupRandom finds random nodes in the network.
func (tab *Table) LookupRandom() []*enode.Node {
var target encPubkey
crand.Read(target[:])
return unwrapNodes(tab.lookup(target, true))
}
// lookup performs a network search for nodes close to the given target. It approaches the
// target by querying nodes that are closer to it on each iteration. The given target does
// not need to be an actual node identifier.
func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
var (
target = enode.ID(crypto.Keccak256Hash(targetKey[:]))
asked = make(map[enode.ID]bool)
seen = make(map[enode.ID]bool)
reply = make(chan []*node, alpha)
pendingQueries = 0
result *nodesByDistance
)
// don't query further if we hit ourself.
// unlikely to happen often in practice.
asked[tab.self().ID()] = true
for {
tab.mutex.Lock()
// generate initial result set
result = tab.closest(target, bucketSize)
tab.mutex.Unlock()
if len(result.entries) > 0 || !refreshIfEmpty {
break
}
// The result set is empty, all nodes were dropped, refresh.
// We actually wait for the refresh to complete here. The very
// first query will hit this case and run the bootstrapping
// logic.
<-tab.refresh()
refreshIfEmpty = false
}
for {
// ask the alpha closest nodes that we haven't asked yet
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
n := result.entries[i]
if !asked[n.ID()] {
asked[n.ID()] = true
pendingQueries++
go tab.findnode(n, targetKey, reply)
}
}
if pendingQueries == 0 {
// we have asked all closest nodes, stop the search
break
}
select {
case nodes := <-reply:
for _, n := range nodes {
if n != nil && !seen[n.ID()] {
seen[n.ID()] = true
result.push(n, bucketSize)
}
}
case <-tab.closeReq:
return nil // shutdown, no need to continue.
}
pendingQueries--
}
return result.entries
}
func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
fails := tab.db.FindFails(n.ID(), n.IP())
r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
if err == errClosed {
// Avoid recording failures on shutdown.
reply <- nil
return
} else if len(r) == 0 {
fails++
tab.db.UpdateFindFails(n.ID(), n.IP(), fails)
log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
if fails >= maxFindnodeFailures {
log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
tab.delete(n)
}
} else if fails > 0 {
tab.db.UpdateFindFails(n.ID(), n.IP(), fails-1)
}
// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
// just remove those again during revalidation.
for _, n := range r {
tab.addSeenNode(n)
}
reply <- r
}
func (tab *Table) refresh() <-chan struct{} {
done := make(chan struct{})
select {
@ -417,11 +301,7 @@ func (tab *Table) doRefresh(done chan struct{}) {
tab.loadSeedNodes()
// Run self lookup to discover new neighbor nodes.
// We can only do this if we have a secp256k1 identity.
var key ecdsa.PublicKey
if err := tab.self().Load((*enode.Secp256k1)(&key)); err == nil {
tab.lookup(encodePubkey(&key), false)
}
tab.net.lookupSelf()
// The Kademlia paper specifies that the bucket refresh should
// perform a lookup in the least recently used bucket. We cannot
@ -430,9 +310,7 @@ func (tab *Table) doRefresh(done chan struct{}) {
// sha3 preimage that falls into a chosen bucket.
// We perform a few lookups with a random target instead.
for i := 0; i < 3; i++ {
var target encPubkey
crand.Read(target[:])
tab.lookup(target, false)
tab.net.lookupRandom()
}
}
@ -442,7 +320,7 @@ func (tab *Table) loadSeedNodes() {
for i := range seeds {
seed := seeds[i]
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }}
log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.addSeenNode(seed)
}
}
@ -459,7 +337,7 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
}
// Ping the selected node and wait for a pong.
err := tab.net.ping(last.ID(), last.addr())
err := tab.net.ping(unwrapNode(last))
tab.mutex.Lock()
defer tab.mutex.Unlock()
@ -467,16 +345,16 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
if err == nil {
// The node responded, move it to the front.
last.livenessChecks++
log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks)
tab.log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks)
tab.bumpInBucket(b, last)
return
}
// No reply received, pick a replacement or delete the node if there aren't
// any replacements.
if r := tab.replace(b, last); r != nil {
log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP())
tab.log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP())
} else {
log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks)
tab.log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks)
}
}
@ -520,22 +398,27 @@ func (tab *Table) copyLiveNodes() {
// closest returns the n nodes in the table that are closest to the
// given id. The caller must hold tab.mutex.
func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
func (tab *Table) closest(target enode.ID, nresults int, checklive bool) *nodesByDistance {
// This is a very wasteful way to find the closest nodes but
// obviously correct. I believe that tree-based buckets would make
// this easier to implement efficiently.
close := &nodesByDistance{target: target}
for _, b := range &tab.buckets {
for _, n := range b.entries {
if n.livenessChecks > 0 {
close.push(n, nresults)
if checklive && n.livenessChecks == 0 {
continue
}
close.push(n, nresults)
}
}
return close
}
// len returns the number of nodes in the table.
func (tab *Table) len() (n int) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
for _, b := range &tab.buckets {
n += len(b.entries)
}
@ -641,11 +524,11 @@ func (tab *Table) addIP(b *bucket, ip net.IP) bool {
return true
}
if !tab.ips.Add(ip) {
log.Debug("IP exceeds table limit", "ip", ip)
tab.log.Debug("IP exceeds table limit", "ip", ip)
return false
}
if !b.ips.Add(ip) {
log.Debug("IP exceeds bucket limit", "ip", ip)
tab.log.Debug("IP exceeds bucket limit", "ip", ip)
tab.ips.Remove(ip)
return false
}
@ -754,8 +637,7 @@ func deleteNode(list []*node, n *node) []*node {
return list
}
// nodesByDistance is a list of nodes, ordered by
// distance to target.
// nodesByDistance is a list of nodes, ordered by distance to target.
type nodesByDistance struct {
entries []*node
target enode.ID