les: move client pool to les/vflux/server (#22495)
* les: move client pool to les/vflux/server * les/vflux/server: un-expose NodeBalance, remove unused fn, fix bugs * tests/fuzzers/vflux: add ClientPool fuzzer * les/vflux/server: fixed balance tests * les: rebase fix * les/vflux/server: fixed more bugs * les/vflux/server: unexported NodeStateMachine fields and flags * les/vflux/server: unexport all internal components and functions * les/vflux/server: fixed priorityPool test * les/vflux/server: polish balance * les/vflux/server: fixed mutex locking error * les/vflux/server: priorityPool bug fixed * common/prque: make Prque wrap-around priority handling optional * les/vflux/server: rename funcs, small optimizations * les/vflux/server: fixed timeUntil * les/vflux/server: separated balance.posValue and negValue * les/vflux/server: polish setup * les/vflux/server: enforce capacity curve monotonicity * les/vflux/server: simplified requestCapacity * les/vflux/server: requestCapacity with target range, no iterations in SetCapacity * les/vflux/server: minor changes * les/vflux/server: moved default factors to balanceTracker * les/vflux/server: set inactiveFlag in priorityPool * les/vflux/server: moved related metrics to vfs package * les/vflux/client: make priorityPool temp state logic cleaner * les/vflux/server: changed log.Crit to log.Error * add vflux fuzzer to oss-fuzz Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
This commit is contained in:
268
les/peer.go
268
les/peer.go
@ -17,6 +17,7 @@
|
||||
package les
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
@ -37,6 +38,7 @@ import (
|
||||
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
@ -762,15 +764,22 @@ type clientPeer struct {
|
||||
responseLock sync.Mutex
|
||||
responseCount uint64 // Counter to generate an unique id for request processing.
|
||||
|
||||
balance *vfs.NodeBalance
|
||||
balance vfs.ConnectedBalance
|
||||
|
||||
// invalidLock is used for protecting invalidCount.
|
||||
invalidLock sync.RWMutex
|
||||
invalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made.
|
||||
|
||||
server bool
|
||||
errCh chan error
|
||||
fcClient *flowcontrol.ClientNode // Server side mirror token bucket.
|
||||
capacity uint64
|
||||
// lastAnnounce is the last broadcast created by the server; may be newer than the last head
|
||||
// sent to the specific client (stored in headInfo) if capacity is zero. In this case the
|
||||
// latest head is sent when the client gains non-zero capacity.
|
||||
lastAnnounce announceData
|
||||
|
||||
connectedAt mclock.AbsTime
|
||||
server bool
|
||||
errCh chan error
|
||||
fcClient *flowcontrol.ClientNode // Server side mirror token bucket.
|
||||
}
|
||||
|
||||
func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer {
|
||||
@ -789,9 +798,9 @@ func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWrite
|
||||
}
|
||||
}
|
||||
|
||||
// freeClientId returns a string identifier for the peer. Multiple peers with
|
||||
// FreeClientId returns a string identifier for the peer. Multiple peers with
|
||||
// the same identifier can not be connected in free mode simultaneously.
|
||||
func (p *clientPeer) freeClientId() string {
|
||||
func (p *clientPeer) FreeClientId() string {
|
||||
if addr, ok := p.RemoteAddr().(*net.TCPAddr); ok {
|
||||
if addr.IP.IsLoopback() {
|
||||
// using peer id instead of loopback ip address allows multiple free
|
||||
@ -921,25 +930,69 @@ func (p *clientPeer) sendAnnounce(request announceData) error {
|
||||
return p2p.Send(p.rw, AnnounceMsg, request)
|
||||
}
|
||||
|
||||
// allowInactive implements clientPoolPeer
|
||||
func (p *clientPeer) allowInactive() bool {
|
||||
return false
|
||||
// InactiveAllowance implements vfs.clientPeer
|
||||
func (p *clientPeer) InactiveAllowance() time.Duration {
|
||||
return 0 // will return more than zero for les/5 clients
|
||||
}
|
||||
|
||||
// updateCapacity updates the request serving capacity assigned to a given client
|
||||
// and also sends an announcement about the updated flow control parameters
|
||||
func (p *clientPeer) updateCapacity(cap uint64) {
|
||||
// getCapacity returns the current capacity of the peer
|
||||
func (p *clientPeer) getCapacity() uint64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return p.capacity
|
||||
}
|
||||
|
||||
// UpdateCapacity updates the request serving capacity assigned to a given client
|
||||
// and also sends an announcement about the updated flow control parameters.
|
||||
// Note: UpdateCapacity implements vfs.clientPeer and should not block. The requested
|
||||
// parameter is true if the callback was initiated by ClientPool.SetCapacity on the given peer.
|
||||
func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
if cap != p.fcParams.MinRecharge {
|
||||
p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}
|
||||
if newCap != p.fcParams.MinRecharge {
|
||||
p.fcParams = flowcontrol.ServerParams{MinRecharge: newCap, BufLimit: newCap * bufLimitRatio}
|
||||
p.fcClient.UpdateParams(p.fcParams)
|
||||
var kvList keyValueList
|
||||
kvList = kvList.add("flowControl/MRR", cap)
|
||||
kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
|
||||
kvList = kvList.add("flowControl/MRR", newCap)
|
||||
kvList = kvList.add("flowControl/BL", newCap*bufLimitRatio)
|
||||
p.queueSend(func() { p.sendAnnounce(announceData{Update: kvList}) })
|
||||
}
|
||||
|
||||
if p.capacity == 0 && newCap != 0 {
|
||||
p.sendLastAnnounce()
|
||||
}
|
||||
p.capacity = newCap
|
||||
}
|
||||
|
||||
// announceOrStore sends the given head announcement to the client if the client is
|
||||
// active (capacity != 0) and the same announcement hasn't been sent before. If the
|
||||
// client is inactive the announcement is stored and sent later if the client is
|
||||
// activated again.
|
||||
func (p *clientPeer) announceOrStore(announce announceData) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.lastAnnounce = announce
|
||||
if p.capacity != 0 {
|
||||
p.sendLastAnnounce()
|
||||
}
|
||||
}
|
||||
|
||||
// announce sends the given head announcement to the client if it hasn't been sent before
|
||||
func (p *clientPeer) sendLastAnnounce() {
|
||||
if p.lastAnnounce.Td == nil {
|
||||
return
|
||||
}
|
||||
if p.headInfo.Td == nil || p.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 {
|
||||
if !p.queueSend(func() { p.sendAnnounce(p.lastAnnounce) }) {
|
||||
p.Log().Debug("Dropped announcement because queue is full", "number", p.lastAnnounce.Number, "hash", p.lastAnnounce.Hash)
|
||||
} else {
|
||||
p.Log().Debug("Sent announcement", "number", p.lastAnnounce.Number, "hash", p.lastAnnounce.Hash)
|
||||
}
|
||||
p.headInfo = blockInfo{Hash: p.lastAnnounce.Hash, Number: p.lastAnnounce.Number, Td: p.lastAnnounce.Td}
|
||||
}
|
||||
}
|
||||
|
||||
// freezeClient temporarily puts the client in a frozen state which means all
|
||||
@ -1064,6 +1117,11 @@ func (p *clientPeer) getInvalid() uint64 {
|
||||
return p.invalidCount.Value(mclock.Now())
|
||||
}
|
||||
|
||||
// Disconnect implements vfs.clientPeer
|
||||
func (p *clientPeer) Disconnect() {
|
||||
p.Peer.Disconnect(p2p.DiscRequested)
|
||||
}
|
||||
|
||||
// serverPeerSubscriber is an interface to notify services about added or
|
||||
// removed server peers
|
||||
type serverPeerSubscriber interface {
|
||||
@ -1221,3 +1279,181 @@ func (ps *serverPeerSet) close() {
|
||||
}
|
||||
ps.closed = true
|
||||
}
|
||||
|
||||
// clientPeerSet represents the set of active client peers currently
|
||||
// participating in the Light Ethereum sub-protocol.
|
||||
type clientPeerSet struct {
|
||||
peers map[enode.ID]*clientPeer
|
||||
lock sync.RWMutex
|
||||
closed bool
|
||||
|
||||
privateKey *ecdsa.PrivateKey
|
||||
lastAnnounce, signedAnnounce announceData
|
||||
}
|
||||
|
||||
// newClientPeerSet creates a new peer set to track the client peers.
|
||||
func newClientPeerSet() *clientPeerSet {
|
||||
return &clientPeerSet{peers: make(map[enode.ID]*clientPeer)}
|
||||
}
|
||||
|
||||
// register adds a new peer into the peer set, or returns an error if the
|
||||
// peer is already known.
|
||||
func (ps *clientPeerSet) register(peer *clientPeer) error {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
if ps.closed {
|
||||
return errClosed
|
||||
}
|
||||
if _, exist := ps.peers[peer.ID()]; exist {
|
||||
return errAlreadyRegistered
|
||||
}
|
||||
ps.peers[peer.ID()] = peer
|
||||
ps.announceOrStore(peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// unregister removes a remote peer from the peer set, disabling any further
|
||||
// actions to/from that particular entity. It also initiates disconnection
|
||||
// at the networking layer.
|
||||
func (ps *clientPeerSet) unregister(id enode.ID) error {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
p, ok := ps.peers[id]
|
||||
if !ok {
|
||||
return errNotRegistered
|
||||
}
|
||||
delete(ps.peers, id)
|
||||
p.Peer.Disconnect(p2p.DiscRequested)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ids returns a list of all registered peer IDs
|
||||
func (ps *clientPeerSet) ids() []enode.ID {
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
var ids []enode.ID
|
||||
for id := range ps.peers {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// peer retrieves the registered peer with the given id.
|
||||
func (ps *clientPeerSet) peer(id enode.ID) *clientPeer {
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
return ps.peers[id]
|
||||
}
|
||||
|
||||
// len returns if the current number of peers in the set.
|
||||
func (ps *clientPeerSet) len() int {
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
return len(ps.peers)
|
||||
}
|
||||
|
||||
// setSignerKey sets the signer key for signed announcements. Should be called before
|
||||
// starting the protocol handler.
|
||||
func (ps *clientPeerSet) setSignerKey(privateKey *ecdsa.PrivateKey) {
|
||||
ps.privateKey = privateKey
|
||||
}
|
||||
|
||||
// broadcast sends the given announcements to all active peers
|
||||
func (ps *clientPeerSet) broadcast(announce announceData) {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
ps.lastAnnounce = announce
|
||||
for _, peer := range ps.peers {
|
||||
ps.announceOrStore(peer)
|
||||
}
|
||||
}
|
||||
|
||||
// announceOrStore sends the requested type of announcement to the given peer or stores
|
||||
// it for later if the peer is inactive (capacity == 0).
|
||||
func (ps *clientPeerSet) announceOrStore(p *clientPeer) {
|
||||
if ps.lastAnnounce.Td == nil {
|
||||
return
|
||||
}
|
||||
switch p.announceType {
|
||||
case announceTypeSimple:
|
||||
p.announceOrStore(ps.lastAnnounce)
|
||||
case announceTypeSigned:
|
||||
if ps.signedAnnounce.Hash != ps.lastAnnounce.Hash {
|
||||
ps.signedAnnounce = ps.lastAnnounce
|
||||
ps.signedAnnounce.sign(ps.privateKey)
|
||||
}
|
||||
p.announceOrStore(ps.signedAnnounce)
|
||||
}
|
||||
}
|
||||
|
||||
// close disconnects all peers. No new peers can be registered
|
||||
// after close has returned.
|
||||
func (ps *clientPeerSet) close() {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
for _, p := range ps.peers {
|
||||
p.Peer.Disconnect(p2p.DiscQuitting)
|
||||
}
|
||||
ps.closed = true
|
||||
}
|
||||
|
||||
// serverSet is a special set which contains all connected les servers.
|
||||
// Les servers will also be discovered by discovery protocol because they
|
||||
// also run the LES protocol. We can't drop them although they are useless
|
||||
// for us(server) but for other protocols(e.g. ETH) upon the devp2p they
|
||||
// may be useful.
|
||||
type serverSet struct {
|
||||
lock sync.Mutex
|
||||
set map[string]*clientPeer
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newServerSet() *serverSet {
|
||||
return &serverSet{set: make(map[string]*clientPeer)}
|
||||
}
|
||||
|
||||
func (s *serverSet) register(peer *clientPeer) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return errClosed
|
||||
}
|
||||
if _, exist := s.set[peer.id]; exist {
|
||||
return errAlreadyRegistered
|
||||
}
|
||||
s.set[peer.id] = peer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *serverSet) unregister(peer *clientPeer) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return errClosed
|
||||
}
|
||||
if _, exist := s.set[peer.id]; !exist {
|
||||
return errNotRegistered
|
||||
}
|
||||
delete(s.set, peer.id)
|
||||
peer.Peer.Disconnect(p2p.DiscQuitting)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *serverSet) close() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for _, p := range s.set {
|
||||
p.Peer.Disconnect(p2p.DiscQuitting)
|
||||
}
|
||||
s.closed = true
|
||||
}
|
||||
|
Reference in New Issue
Block a user