les, les/lespay/client: add service value statistics and API (#20837)
This PR adds service value measurement statistics to the light client. It also adds a private API that makes these statistics accessible. A follow-up PR will add the new server pool which uses these statistics to select servers with good performance. This document describes the function of the new components: https://gist.github.com/zsfelfoldi/3c7ace895234b7b345ab4f71dab102d4 Co-authored-by: rjl493456442 <garyrong0905@gmail.com> Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
This commit is contained in:
122
les/peer.go
122
les/peer.go
@ -32,6 +32,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||
lpc "github.com/ethereum/go-ethereum/les/lespay/client"
|
||||
"github.com/ethereum/go-ethereum/les/utils"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
@ -356,8 +357,12 @@ type serverPeer struct {
|
||||
checkpointNumber uint64 // The block height which the checkpoint is registered.
|
||||
checkpoint params.TrustedCheckpoint // The advertised checkpoint sent by server.
|
||||
|
||||
poolEntry *poolEntry // Statistic for server peer.
|
||||
fcServer *flowcontrol.ServerNode // Client side mirror token bucket.
|
||||
poolEntry *poolEntry // Statistic for server peer.
|
||||
fcServer *flowcontrol.ServerNode // Client side mirror token bucket.
|
||||
vtLock sync.Mutex
|
||||
valueTracker *lpc.ValueTracker
|
||||
nodeValueTracker *lpc.NodeValueTracker
|
||||
sentReqs map[uint64]sentReqEntry
|
||||
|
||||
// Statistics
|
||||
errCount int // Counter the invalid responses server has replied
|
||||
@ -428,62 +433,71 @@ func sendRequest(w p2p.MsgWriter, msgcode, reqID uint64, data interface{}) error
|
||||
return p2p.Send(w, msgcode, req{reqID, data})
|
||||
}
|
||||
|
||||
func (p *serverPeer) sendRequest(msgcode, reqID uint64, data interface{}, amount int) error {
|
||||
p.sentRequest(reqID, uint32(msgcode), uint32(amount))
|
||||
return sendRequest(p.rw, msgcode, reqID, data)
|
||||
}
|
||||
|
||||
// requestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the hash of an origin block.
|
||||
func (p *serverPeer) requestHeadersByHash(reqID uint64, origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
||||
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||
return p.sendRequest(GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount)
|
||||
}
|
||||
|
||||
// requestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the number of an origin block.
|
||||
func (p *serverPeer) requestHeadersByNumber(reqID, origin uint64, amount int, skip int, reverse bool) error {
|
||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
||||
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||
return p.sendRequest(GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount)
|
||||
}
|
||||
|
||||
// requestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||
// specified.
|
||||
func (p *serverPeer) requestBodies(reqID uint64, hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
||||
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, hashes)
|
||||
return p.sendRequest(GetBlockBodiesMsg, reqID, hashes, len(hashes))
|
||||
}
|
||||
|
||||
// requestCode fetches a batch of arbitrary data from a node's known state
|
||||
// data, corresponding to the specified hashes.
|
||||
func (p *serverPeer) requestCode(reqID uint64, reqs []CodeReq) error {
|
||||
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
|
||||
return sendRequest(p.rw, GetCodeMsg, reqID, reqs)
|
||||
return p.sendRequest(GetCodeMsg, reqID, reqs, len(reqs))
|
||||
}
|
||||
|
||||
// requestReceipts fetches a batch of transaction receipts from a remote node.
|
||||
func (p *serverPeer) requestReceipts(reqID uint64, hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
||||
return sendRequest(p.rw, GetReceiptsMsg, reqID, hashes)
|
||||
return p.sendRequest(GetReceiptsMsg, reqID, hashes, len(hashes))
|
||||
}
|
||||
|
||||
// requestProofs fetches a batch of merkle proofs from a remote node.
|
||||
func (p *serverPeer) requestProofs(reqID uint64, reqs []ProofReq) error {
|
||||
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
|
||||
return sendRequest(p.rw, GetProofsV2Msg, reqID, reqs)
|
||||
return p.sendRequest(GetProofsV2Msg, reqID, reqs, len(reqs))
|
||||
}
|
||||
|
||||
// requestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
|
||||
func (p *serverPeer) requestHelperTrieProofs(reqID uint64, reqs []HelperTrieReq) error {
|
||||
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
|
||||
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, reqs)
|
||||
return p.sendRequest(GetHelperTrieProofsMsg, reqID, reqs, len(reqs))
|
||||
}
|
||||
|
||||
// requestTxStatus fetches a batch of transaction status records from a remote node.
|
||||
func (p *serverPeer) requestTxStatus(reqID uint64, txHashes []common.Hash) error {
|
||||
p.Log().Debug("Requesting transaction status", "count", len(txHashes))
|
||||
return sendRequest(p.rw, GetTxStatusMsg, reqID, txHashes)
|
||||
return p.sendRequest(GetTxStatusMsg, reqID, txHashes, len(txHashes))
|
||||
}
|
||||
|
||||
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
|
||||
func (p *serverPeer) sendTxs(reqID uint64, txs rlp.RawValue) error {
|
||||
p.Log().Debug("Sending batch of transactions", "size", len(txs))
|
||||
return sendRequest(p.rw, SendTxV2Msg, reqID, txs)
|
||||
func (p *serverPeer) sendTxs(reqID uint64, amount int, txs rlp.RawValue) error {
|
||||
p.Log().Debug("Sending batch of transactions", "amount", amount, "size", len(txs))
|
||||
sizeFactor := (len(txs) + txSizeCostLimit/2) / txSizeCostLimit
|
||||
if sizeFactor > amount {
|
||||
amount = sizeFactor
|
||||
}
|
||||
return p.sendRequest(SendTxV2Msg, reqID, txs, amount)
|
||||
}
|
||||
|
||||
// waitBefore implements distPeer interface
|
||||
@ -532,6 +546,7 @@ func (p *serverPeer) getTxRelayCost(amount, size int) uint64 {
|
||||
// HasBlock checks if the peer has a given block
|
||||
func (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
|
||||
p.lock.RLock()
|
||||
|
||||
head := p.headInfo.Number
|
||||
var since, recent uint64
|
||||
if hasState {
|
||||
@ -630,6 +645,87 @@ func (p *serverPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge
|
||||
})
|
||||
}
|
||||
|
||||
// setValueTracker sets the value tracker references for connected servers. Note that the
|
||||
// references should be removed upon disconnection by setValueTracker(nil, nil).
|
||||
func (p *serverPeer) setValueTracker(vt *lpc.ValueTracker, nvt *lpc.NodeValueTracker) {
|
||||
p.vtLock.Lock()
|
||||
p.valueTracker = vt
|
||||
p.nodeValueTracker = nvt
|
||||
if nvt != nil {
|
||||
p.sentReqs = make(map[uint64]sentReqEntry)
|
||||
} else {
|
||||
p.sentReqs = nil
|
||||
}
|
||||
p.vtLock.Unlock()
|
||||
}
|
||||
|
||||
// updateVtParams updates the server's price table in the value tracker.
|
||||
func (p *serverPeer) updateVtParams() {
|
||||
p.vtLock.Lock()
|
||||
defer p.vtLock.Unlock()
|
||||
|
||||
if p.nodeValueTracker == nil {
|
||||
return
|
||||
}
|
||||
reqCosts := make([]uint64, len(requestList))
|
||||
for code, costs := range p.fcCosts {
|
||||
if m, ok := requestMapping[uint32(code)]; ok {
|
||||
reqCosts[m.first] = costs.baseCost + costs.reqCost
|
||||
if m.rest != -1 {
|
||||
reqCosts[m.rest] = costs.reqCost
|
||||
}
|
||||
}
|
||||
}
|
||||
p.valueTracker.UpdateCosts(p.nodeValueTracker, reqCosts)
|
||||
}
|
||||
|
||||
// sentReqEntry remembers sent requests and their sending times
|
||||
type sentReqEntry struct {
|
||||
reqType, amount uint32
|
||||
at mclock.AbsTime
|
||||
}
|
||||
|
||||
// sentRequest marks a request sent at the current moment to this server.
|
||||
func (p *serverPeer) sentRequest(id uint64, reqType, amount uint32) {
|
||||
p.vtLock.Lock()
|
||||
if p.sentReqs != nil {
|
||||
p.sentReqs[id] = sentReqEntry{reqType, amount, mclock.Now()}
|
||||
}
|
||||
p.vtLock.Unlock()
|
||||
}
|
||||
|
||||
// answeredRequest marks a request answered at the current moment by this server.
|
||||
func (p *serverPeer) answeredRequest(id uint64) {
|
||||
p.vtLock.Lock()
|
||||
if p.sentReqs == nil {
|
||||
p.vtLock.Unlock()
|
||||
return
|
||||
}
|
||||
e, ok := p.sentReqs[id]
|
||||
delete(p.sentReqs, id)
|
||||
vt := p.valueTracker
|
||||
nvt := p.nodeValueTracker
|
||||
p.vtLock.Unlock()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var (
|
||||
vtReqs [2]lpc.ServedRequest
|
||||
reqCount int
|
||||
)
|
||||
m := requestMapping[e.reqType]
|
||||
if m.rest == -1 || e.amount <= 1 {
|
||||
reqCount = 1
|
||||
vtReqs[0] = lpc.ServedRequest{ReqType: uint32(m.first), Amount: e.amount}
|
||||
} else {
|
||||
reqCount = 2
|
||||
vtReqs[0] = lpc.ServedRequest{ReqType: uint32(m.first), Amount: 1}
|
||||
vtReqs[1] = lpc.ServedRequest{ReqType: uint32(m.rest), Amount: e.amount - 1}
|
||||
}
|
||||
dt := time.Duration(mclock.Now() - e.at)
|
||||
vt.Served(nvt, vtReqs[:reqCount], dt)
|
||||
}
|
||||
|
||||
// clientPeer represents each node to which the les server is connected.
|
||||
// The node here refers to the light client.
|
||||
type clientPeer struct {
|
||||
|
Reference in New Issue
Block a user