les: wait for all task goroutines before dropping the peer (#20010)
* les: wait all task routines before drop the peer * les: address comments * les: fix issue
This commit is contained in:
committed by
Péter Szilágyi
parent
a978adfd7c
commit
68502595f6
@ -181,52 +181,53 @@ func (f *clientPool) stop() {
|
||||
f.lock.Unlock()
|
||||
}
|
||||
|
||||
// registerPeer implements peerSetNotify
|
||||
func (f *clientPool) registerPeer(p *peer) {
|
||||
c := f.connect(p, 0)
|
||||
if c != nil {
|
||||
p.balanceTracker = &c.balanceTracker
|
||||
}
|
||||
}
|
||||
|
||||
// connect should be called after a successful handshake. If the connection was
|
||||
// rejected, there is no need to call disconnect.
|
||||
func (f *clientPool) connect(peer clientPeer, capacity uint64) *clientInfo {
|
||||
func (f *clientPool) connect(peer clientPeer, capacity uint64) bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
// Short circuit is clientPool is already closed.
|
||||
if f.closed {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
address := peer.freeClientId()
|
||||
id := peer.ID()
|
||||
idStr := peerIdToString(id)
|
||||
// Dedup connected peers.
|
||||
id, freeID := peer.ID(), peer.freeClientId()
|
||||
if _, ok := f.connectedMap[id]; ok {
|
||||
clientRejectedMeter.Mark(1)
|
||||
log.Debug("Client already connected", "address", address, "id", idStr)
|
||||
return nil
|
||||
log.Debug("Client already connected", "address", freeID, "id", peerIdToString(id))
|
||||
return false
|
||||
}
|
||||
// Create a clientInfo but do not add it yet
|
||||
now := f.clock.Now()
|
||||
// create a clientInfo but do not add it yet
|
||||
e := &clientInfo{pool: f, peer: peer, address: address, queueIndex: -1, id: id}
|
||||
posBalance := f.getPosBalance(id).value
|
||||
e.priority = posBalance != 0
|
||||
e := &clientInfo{pool: f, peer: peer, address: freeID, queueIndex: -1, id: id, priority: posBalance != 0}
|
||||
|
||||
var negBalance uint64
|
||||
nb := f.negBalanceMap[address]
|
||||
nb := f.negBalanceMap[freeID]
|
||||
if nb != nil {
|
||||
negBalance = uint64(math.Exp(float64(nb.logValue-f.logOffset(now)) / fixedPointMultiplier))
|
||||
}
|
||||
// If the client is a free client, assign with a low free capacity,
|
||||
// Otherwise assign with the given value(priority client)
|
||||
if !e.priority {
|
||||
capacity = f.freeClientCap
|
||||
}
|
||||
// check whether it fits into connectedQueue
|
||||
// Ensure the capacity will never lower than the free capacity.
|
||||
if capacity < f.freeClientCap {
|
||||
capacity = f.freeClientCap
|
||||
}
|
||||
e.capacity = capacity
|
||||
|
||||
e.balanceTracker.init(f.clock, capacity)
|
||||
e.balanceTracker.setBalance(posBalance, negBalance)
|
||||
f.setClientPriceFactors(e)
|
||||
|
||||
// If the number of clients already connected in the clientpool exceeds its
|
||||
// capacity, evict some clients with lowest priority.
|
||||
//
|
||||
// If the priority of the newly added client is lower than the priority of
|
||||
// all connected clients, the client is rejected.
|
||||
newCapacity := f.connectedCapacity + capacity
|
||||
newCount := f.connectedQueue.Size() + 1
|
||||
if newCapacity > f.capacityLimit || newCount > f.countLimit {
|
||||
@ -248,8 +249,8 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) *clientInfo {
|
||||
f.connectedQueue.Push(c)
|
||||
}
|
||||
clientRejectedMeter.Mark(1)
|
||||
log.Debug("Client rejected", "address", address, "id", idStr)
|
||||
return nil
|
||||
log.Debug("Client rejected", "address", freeID, "id", peerIdToString(id))
|
||||
return false
|
||||
}
|
||||
// accept new client, drop old ones
|
||||
for _, c := range kickList {
|
||||
@ -258,7 +259,7 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) *clientInfo {
|
||||
}
|
||||
// client accepted, finish setting it up
|
||||
if nb != nil {
|
||||
delete(f.negBalanceMap, address)
|
||||
delete(f.negBalanceMap, freeID)
|
||||
f.negBalanceQueue.Remove(nb.queueIndex)
|
||||
}
|
||||
if e.priority {
|
||||
@ -272,13 +273,8 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) *clientInfo {
|
||||
e.peer.updateCapacity(e.capacity)
|
||||
}
|
||||
clientConnectedMeter.Mark(1)
|
||||
log.Debug("Client accepted", "address", address)
|
||||
return e
|
||||
}
|
||||
|
||||
// unregisterPeer implements peerSetNotify
|
||||
func (f *clientPool) unregisterPeer(p *peer) {
|
||||
f.disconnect(p)
|
||||
log.Debug("Client accepted", "address", freeID)
|
||||
return true
|
||||
}
|
||||
|
||||
// disconnect should be called when a connection is terminated. If the disconnection
|
||||
@ -378,6 +374,18 @@ func (f *clientPool) setLimits(count int, totalCap uint64) {
|
||||
})
|
||||
}
|
||||
|
||||
// requestCost feeds request cost after serving a request from the given peer.
|
||||
func (f *clientPool) requestCost(p *peer, cost uint64) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
info, exist := f.connectedMap[p.ID()]
|
||||
if !exist || f.closed {
|
||||
return
|
||||
}
|
||||
info.balanceTracker.requestCost(cost)
|
||||
}
|
||||
|
||||
// logOffset calculates the time-dependent offset for the logarithmic
|
||||
// representation of negative balance
|
||||
func (f *clientPool) logOffset(now mclock.AbsTime) int64 {
|
||||
|
Reference in New Issue
Block a user