les: fix UDP connection query (#22451)

This PR fixes multiple issues with the UDP connection pre-negotiation feature:

- the enable condition was wrong (it checked the existence of the DiscV5 struct where it wasn't initialized yet, disabling the feature even if discv5 was enabled)
- the server pool queried already connected nodes when the discovery iterators returned them again
- servers responded positively before they were synced and really willing to accept connections

Metrics are also added on the server side that count the positive and negative replies to served connection queries.
This commit is contained in:
Felföldi Zsolt
2021-03-16 12:53:54 +01:00
committed by GitHub
parent 94ab4ea341
commit 62d8022b51
8 changed files with 112 additions and 78 deletions

View File

@@ -72,6 +72,7 @@ type clientPool struct {
clock mclock.Clock
closed bool
removePeer func(enode.ID)
synced func() bool
ns *nodestate.NodeStateMachine
pp *vfs.PriorityPool
bt *vfs.BalanceTracker
@@ -107,7 +108,7 @@ type clientInfo struct {
}
// newClientPool creates a new client pool
func newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool {
func newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID), synced func() bool) *clientPool {
pool := &clientPool{
ns: ns,
BalanceTrackerSetup: balanceTrackerSetup,
@@ -116,6 +117,7 @@ func newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap
minCap: minCap,
connectedBias: connectedBias,
removePeer: removePeer,
synced: synced,
}
pool.bt = vfs.NewBalanceTracker(ns, balanceTrackerSetup, lesDb, clock, &utils.Expirer{}, &utils.Expirer{})
pool.pp = vfs.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4)
@@ -396,6 +398,13 @@ func (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []by
if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen {
return nil
}
result := make(vflux.CapacityQueryReply, len(req.AddTokens))
if !f.synced() {
capacityQueryZeroMeter.Mark(1)
reply, _ := rlp.EncodeToBytes(&result)
return reply
}
node := f.ns.GetNode(id)
if node == nil {
node = enode.SignNull(&enr.Record{}, id)
@@ -416,7 +425,6 @@ func (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []by
}
// use vfs.CapacityCurve to answer request for multiple newly bought token amounts
curve := f.pp.GetCapacityCurve().Exclude(id)
result := make(vflux.CapacityQueryReply, len(req.AddTokens))
bias := time.Second * time.Duration(req.Bias)
if f.connectedBias > bias {
bias = f.connectedBias
@@ -434,6 +442,12 @@ func (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []by
result[i] = 0
}
}
// add first result to metrics (don't care about priority client multi-queries yet)
if result[0] == 0 {
capacityQueryZeroMeter.Mark(1)
} else {
capacityQueryNonZeroMeter.Mark(1)
}
reply, _ := rlp.EncodeToBytes(&result)
return reply
}