les, les/flowcontrol: implement LES/3 (#19329)
les, les/flowcontrol: implement LES/3
This commit is contained in:
@ -56,11 +56,12 @@ type scheduledUpdate struct {
|
||||
// (used in server mode only)
|
||||
type ClientNode struct {
|
||||
params ServerParams
|
||||
bufValue uint64
|
||||
bufValue int64
|
||||
lastTime mclock.AbsTime
|
||||
updateSchedule []scheduledUpdate
|
||||
sumCost uint64 // sum of req costs received from this client
|
||||
accepted map[uint64]uint64 // value = sumCost after accepting the given req
|
||||
connected bool
|
||||
lock sync.Mutex
|
||||
cm *ClientManager
|
||||
log *logger
|
||||
@ -70,11 +71,12 @@ type ClientNode struct {
|
||||
// NewClientNode returns a new ClientNode
|
||||
func NewClientNode(cm *ClientManager, params ServerParams) *ClientNode {
|
||||
node := &ClientNode{
|
||||
cm: cm,
|
||||
params: params,
|
||||
bufValue: params.BufLimit,
|
||||
lastTime: cm.clock.Now(),
|
||||
accepted: make(map[uint64]uint64),
|
||||
cm: cm,
|
||||
params: params,
|
||||
bufValue: int64(params.BufLimit),
|
||||
lastTime: cm.clock.Now(),
|
||||
accepted: make(map[uint64]uint64),
|
||||
connected: true,
|
||||
}
|
||||
if keepLogs > 0 {
|
||||
node.log = newLogger(keepLogs)
|
||||
@ -85,9 +87,55 @@ func NewClientNode(cm *ClientManager, params ServerParams) *ClientNode {
|
||||
|
||||
// Disconnect should be called when a client is disconnected
|
||||
func (node *ClientNode) Disconnect() {
|
||||
node.lock.Lock()
|
||||
defer node.lock.Unlock()
|
||||
|
||||
node.connected = false
|
||||
node.cm.disconnect(node)
|
||||
}
|
||||
|
||||
// BufferStatus returns the current buffer value and limit
|
||||
func (node *ClientNode) BufferStatus() (uint64, uint64) {
|
||||
node.lock.Lock()
|
||||
defer node.lock.Unlock()
|
||||
|
||||
if !node.connected {
|
||||
return 0, 0
|
||||
}
|
||||
now := node.cm.clock.Now()
|
||||
node.update(now)
|
||||
node.cm.updateBuffer(node, 0, now)
|
||||
bv := node.bufValue
|
||||
if bv < 0 {
|
||||
bv = 0
|
||||
}
|
||||
return uint64(bv), node.params.BufLimit
|
||||
}
|
||||
|
||||
// OneTimeCost subtracts the given amount from the node's buffer.
|
||||
//
|
||||
// Note: this call can take the buffer into the negative region internally.
|
||||
// In this case zero buffer value is returned by exported calls and no requests
|
||||
// are accepted.
|
||||
func (node *ClientNode) OneTimeCost(cost uint64) {
|
||||
node.lock.Lock()
|
||||
defer node.lock.Unlock()
|
||||
|
||||
now := node.cm.clock.Now()
|
||||
node.update(now)
|
||||
node.bufValue -= int64(cost)
|
||||
node.cm.updateBuffer(node, -int64(cost), now)
|
||||
}
|
||||
|
||||
// Freeze notifies the client manager about a client freeze event in which case
|
||||
// the total capacity allowance is slightly reduced.
|
||||
func (node *ClientNode) Freeze() {
|
||||
node.lock.Lock()
|
||||
frozenCap := node.params.MinRecharge
|
||||
node.lock.Unlock()
|
||||
node.cm.reduceTotalCapacity(frozenCap)
|
||||
}
|
||||
|
||||
// update recalculates the buffer value at a specified time while also performing
|
||||
// scheduled flow control parameter updates if necessary
|
||||
func (node *ClientNode) update(now mclock.AbsTime) {
|
||||
@ -105,9 +153,9 @@ func (node *ClientNode) recalcBV(now mclock.AbsTime) {
|
||||
if now < node.lastTime {
|
||||
dt = 0
|
||||
}
|
||||
node.bufValue += node.params.MinRecharge * dt / uint64(fcTimeConst)
|
||||
if node.bufValue > node.params.BufLimit {
|
||||
node.bufValue = node.params.BufLimit
|
||||
node.bufValue += int64(node.params.MinRecharge * dt / uint64(fcTimeConst))
|
||||
if node.bufValue > int64(node.params.BufLimit) {
|
||||
node.bufValue = int64(node.params.BufLimit)
|
||||
}
|
||||
if node.log != nil {
|
||||
node.log.add(now, fmt.Sprintf("updated bv=%d MRR=%d BufLimit=%d", node.bufValue, node.params.MinRecharge, node.params.BufLimit))
|
||||
@ -139,11 +187,11 @@ func (node *ClientNode) UpdateParams(params ServerParams) {
|
||||
|
||||
// updateParams updates the flow control parameters of the node
|
||||
func (node *ClientNode) updateParams(params ServerParams, now mclock.AbsTime) {
|
||||
diff := params.BufLimit - node.params.BufLimit
|
||||
if int64(diff) > 0 {
|
||||
diff := int64(params.BufLimit - node.params.BufLimit)
|
||||
if diff > 0 {
|
||||
node.bufValue += diff
|
||||
} else if node.bufValue > params.BufLimit {
|
||||
node.bufValue = params.BufLimit
|
||||
} else if node.bufValue > int64(params.BufLimit) {
|
||||
node.bufValue = int64(params.BufLimit)
|
||||
}
|
||||
node.cm.updateParams(node, params, now)
|
||||
}
|
||||
@ -157,14 +205,14 @@ func (node *ClientNode) AcceptRequest(reqID, index, maxCost uint64) (accepted bo
|
||||
|
||||
now := node.cm.clock.Now()
|
||||
node.update(now)
|
||||
if maxCost > node.bufValue {
|
||||
if int64(maxCost) > node.bufValue {
|
||||
if node.log != nil {
|
||||
node.log.add(now, fmt.Sprintf("rejected reqID=%d bv=%d maxCost=%d", reqID, node.bufValue, maxCost))
|
||||
node.log.dump(now)
|
||||
}
|
||||
return false, maxCost - node.bufValue, 0
|
||||
return false, maxCost - uint64(node.bufValue), 0
|
||||
}
|
||||
node.bufValue -= maxCost
|
||||
node.bufValue -= int64(maxCost)
|
||||
node.sumCost += maxCost
|
||||
if node.log != nil {
|
||||
node.log.add(now, fmt.Sprintf("accepted reqID=%d bv=%d maxCost=%d sumCost=%d", reqID, node.bufValue, maxCost, node.sumCost))
|
||||
@ -174,19 +222,22 @@ func (node *ClientNode) AcceptRequest(reqID, index, maxCost uint64) (accepted bo
|
||||
}
|
||||
|
||||
// RequestProcessed should be called when the request has been processed
|
||||
func (node *ClientNode) RequestProcessed(reqID, index, maxCost, realCost uint64) (bv uint64) {
|
||||
func (node *ClientNode) RequestProcessed(reqID, index, maxCost, realCost uint64) uint64 {
|
||||
node.lock.Lock()
|
||||
defer node.lock.Unlock()
|
||||
|
||||
now := node.cm.clock.Now()
|
||||
node.update(now)
|
||||
node.cm.processed(node, maxCost, realCost, now)
|
||||
bv = node.bufValue + node.sumCost - node.accepted[index]
|
||||
bv := node.bufValue + int64(node.sumCost-node.accepted[index])
|
||||
if node.log != nil {
|
||||
node.log.add(now, fmt.Sprintf("processed reqID=%d bv=%d maxCost=%d realCost=%d sumCost=%d oldSumCost=%d reportedBV=%d", reqID, node.bufValue, maxCost, realCost, node.sumCost, node.accepted[index], bv))
|
||||
}
|
||||
delete(node.accepted, index)
|
||||
return
|
||||
if bv < 0 {
|
||||
return 0
|
||||
}
|
||||
return uint64(bv)
|
||||
}
|
||||
|
||||
// ServerNode is the flow control system's representation of a server
|
||||
@ -345,6 +396,28 @@ func (node *ServerNode) ReceivedReply(reqID, bv uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
// ResumeFreeze cleans all pending requests and sets the buffer estimate to the
|
||||
// reported value after resuming from a frozen state
|
||||
func (node *ServerNode) ResumeFreeze(bv uint64) {
|
||||
node.lock.Lock()
|
||||
defer node.lock.Unlock()
|
||||
|
||||
for reqID := range node.pending {
|
||||
delete(node.pending, reqID)
|
||||
}
|
||||
now := node.clock.Now()
|
||||
node.recalcBLE(now)
|
||||
if bv > node.params.BufLimit {
|
||||
bv = node.params.BufLimit
|
||||
}
|
||||
node.bufEstimate = bv
|
||||
node.bufRecharge = node.bufEstimate < node.params.BufLimit
|
||||
node.lastTime = now
|
||||
if node.log != nil {
|
||||
node.log.add(now, fmt.Sprintf("unfreeze bv=%d sumCost=%d", bv, node.sumCost))
|
||||
}
|
||||
}
|
||||
|
||||
// DumpLogs dumps the event log if logging is used
|
||||
func (node *ServerNode) DumpLogs() {
|
||||
node.lock.Lock()
|
||||
|
@ -47,9 +47,9 @@ type cmNodeFields struct {
|
||||
const FixedPointMultiplier = 1000000
|
||||
|
||||
var (
|
||||
capFactorDropTC = 1 / float64(time.Second*10) // time constant for dropping the capacity factor
|
||||
capFactorRaiseTC = 1 / float64(time.Hour) // time constant for raising the capacity factor
|
||||
capFactorRaiseThreshold = 0.75 // connected / total capacity ratio threshold for raising the capacity factor
|
||||
capacityDropFactor = 0.1
|
||||
capacityRaiseTC = 1 / (3 * float64(time.Hour)) // time constant for raising the capacity factor
|
||||
capacityRaiseThresholdRatio = 1.125 // total/connected capacity ratio threshold for raising the capacity factor
|
||||
)
|
||||
|
||||
// ClientManager controls the capacity assigned to the clients of a server.
|
||||
@ -61,10 +61,14 @@ type ClientManager struct {
|
||||
clock mclock.Clock
|
||||
lock sync.Mutex
|
||||
enabledCh chan struct{}
|
||||
stop chan chan struct{}
|
||||
|
||||
curve PieceWiseLinear
|
||||
sumRecharge, totalRecharge, totalConnected uint64
|
||||
capLogFactor, totalCapacity float64
|
||||
logTotalCap, totalCapacity float64
|
||||
logTotalCapRaiseLimit float64
|
||||
minLogTotalCap, maxLogTotalCap float64
|
||||
capacityRaiseThreshold uint64
|
||||
capLastUpdate mclock.AbsTime
|
||||
totalCapacityCh chan uint64
|
||||
|
||||
@ -106,13 +110,35 @@ func NewClientManager(curve PieceWiseLinear, clock mclock.Clock) *ClientManager
|
||||
clock: clock,
|
||||
rcQueue: prque.New(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }),
|
||||
capLastUpdate: clock.Now(),
|
||||
stop: make(chan chan struct{}),
|
||||
}
|
||||
if curve != nil {
|
||||
cm.SetRechargeCurve(curve)
|
||||
}
|
||||
go func() {
|
||||
// regularly recalculate and update total capacity
|
||||
for {
|
||||
select {
|
||||
case <-time.After(time.Minute):
|
||||
cm.lock.Lock()
|
||||
cm.updateTotalCapacity(cm.clock.Now(), true)
|
||||
cm.lock.Unlock()
|
||||
case stop := <-cm.stop:
|
||||
close(stop)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return cm
|
||||
}
|
||||
|
||||
// Stop stops the client manager
|
||||
func (cm *ClientManager) Stop() {
|
||||
stop := make(chan struct{})
|
||||
cm.stop <- stop
|
||||
<-stop
|
||||
}
|
||||
|
||||
// SetRechargeCurve updates the recharge curve
|
||||
func (cm *ClientManager) SetRechargeCurve(curve PieceWiseLinear) {
|
||||
cm.lock.Lock()
|
||||
@ -120,13 +146,29 @@ func (cm *ClientManager) SetRechargeCurve(curve PieceWiseLinear) {
|
||||
|
||||
now := cm.clock.Now()
|
||||
cm.updateRecharge(now)
|
||||
cm.updateCapFactor(now, false)
|
||||
cm.curve = curve
|
||||
if len(curve) > 0 {
|
||||
cm.totalRecharge = curve[len(curve)-1].Y
|
||||
} else {
|
||||
cm.totalRecharge = 0
|
||||
}
|
||||
}
|
||||
|
||||
// SetCapacityRaiseThreshold sets a threshold value used for raising capFactor.
|
||||
// Either if the difference between total allowed and connected capacity is less
|
||||
// than this threshold or if their ratio is less than capacityRaiseThresholdRatio
|
||||
// then capFactor is allowed to slowly raise.
|
||||
func (cm *ClientManager) SetCapacityLimits(min, max, raiseThreshold uint64) {
|
||||
if min < 1 {
|
||||
min = 1
|
||||
}
|
||||
cm.minLogTotalCap = math.Log(float64(min))
|
||||
if max < 1 {
|
||||
max = 1
|
||||
}
|
||||
cm.maxLogTotalCap = math.Log(float64(max))
|
||||
cm.logTotalCap = cm.maxLogTotalCap
|
||||
cm.capacityRaiseThreshold = raiseThreshold
|
||||
cm.refreshCapacity()
|
||||
}
|
||||
|
||||
@ -141,8 +183,9 @@ func (cm *ClientManager) connect(node *ClientNode) {
|
||||
node.corrBufValue = int64(node.params.BufLimit)
|
||||
node.rcLastIntValue = cm.rcLastIntValue
|
||||
node.queueIndex = -1
|
||||
cm.updateCapFactor(now, true)
|
||||
cm.updateTotalCapacity(now, true)
|
||||
cm.totalConnected += node.params.MinRecharge
|
||||
cm.updateRaiseLimit()
|
||||
}
|
||||
|
||||
// disconnect should be called when a client is disconnected
|
||||
@ -152,8 +195,9 @@ func (cm *ClientManager) disconnect(node *ClientNode) {
|
||||
|
||||
now := cm.clock.Now()
|
||||
cm.updateRecharge(cm.clock.Now())
|
||||
cm.updateCapFactor(now, true)
|
||||
cm.updateTotalCapacity(now, true)
|
||||
cm.totalConnected -= node.params.MinRecharge
|
||||
cm.updateRaiseLimit()
|
||||
}
|
||||
|
||||
// accepted is called when a request with given maximum cost is accepted.
|
||||
@ -174,18 +218,24 @@ func (cm *ClientManager) accepted(node *ClientNode, maxCost uint64, now mclock.A
|
||||
//
|
||||
// Note: processed should always be called for all accepted requests
|
||||
func (cm *ClientManager) processed(node *ClientNode, maxCost, realCost uint64, now mclock.AbsTime) {
|
||||
cm.lock.Lock()
|
||||
defer cm.lock.Unlock()
|
||||
|
||||
if realCost > maxCost {
|
||||
realCost = maxCost
|
||||
}
|
||||
cm.updateNodeRc(node, int64(maxCost-realCost), &node.params, now)
|
||||
if uint64(node.corrBufValue) > node.bufValue {
|
||||
cm.updateBuffer(node, int64(maxCost-realCost), now)
|
||||
}
|
||||
|
||||
// updateBuffer recalulates the corrected buffer value, adds the given value to it
|
||||
// and updates the node's actual buffer value if possible
|
||||
func (cm *ClientManager) updateBuffer(node *ClientNode, add int64, now mclock.AbsTime) {
|
||||
cm.lock.Lock()
|
||||
defer cm.lock.Unlock()
|
||||
|
||||
cm.updateNodeRc(node, add, &node.params, now)
|
||||
if node.corrBufValue > node.bufValue {
|
||||
if node.log != nil {
|
||||
node.log.add(now, fmt.Sprintf("corrected bv=%d oldBv=%d", node.corrBufValue, node.bufValue))
|
||||
}
|
||||
node.bufValue = uint64(node.corrBufValue)
|
||||
node.bufValue = node.corrBufValue
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,11 +245,30 @@ func (cm *ClientManager) updateParams(node *ClientNode, params ServerParams, now
|
||||
defer cm.lock.Unlock()
|
||||
|
||||
cm.updateRecharge(now)
|
||||
cm.updateCapFactor(now, true)
|
||||
cm.updateTotalCapacity(now, true)
|
||||
cm.totalConnected += params.MinRecharge - node.params.MinRecharge
|
||||
cm.updateRaiseLimit()
|
||||
cm.updateNodeRc(node, 0, ¶ms, now)
|
||||
}
|
||||
|
||||
// updateRaiseLimit recalculates the limiting value until which logTotalCap
|
||||
// can be raised when no client freeze events occur
|
||||
func (cm *ClientManager) updateRaiseLimit() {
|
||||
if cm.capacityRaiseThreshold == 0 {
|
||||
cm.logTotalCapRaiseLimit = 0
|
||||
return
|
||||
}
|
||||
limit := float64(cm.totalConnected + cm.capacityRaiseThreshold)
|
||||
limit2 := float64(cm.totalConnected) * capacityRaiseThresholdRatio
|
||||
if limit2 > limit {
|
||||
limit = limit2
|
||||
}
|
||||
if limit < 1 {
|
||||
limit = 1
|
||||
}
|
||||
cm.logTotalCapRaiseLimit = math.Log(limit)
|
||||
}
|
||||
|
||||
// updateRecharge updates the recharge integrator and checks the recharge queue
|
||||
// for nodes with recently filled buffers
|
||||
func (cm *ClientManager) updateRecharge(now mclock.AbsTime) {
|
||||
@ -208,9 +277,15 @@ func (cm *ClientManager) updateRecharge(now mclock.AbsTime) {
|
||||
// updating is done in multiple steps if node buffers are filled and sumRecharge
|
||||
// is decreased before the given target time
|
||||
for cm.sumRecharge > 0 {
|
||||
bonusRatio := cm.curve.ValueAt(cm.sumRecharge) / float64(cm.sumRecharge)
|
||||
if bonusRatio < 1 {
|
||||
bonusRatio = 1
|
||||
sumRecharge := cm.sumRecharge
|
||||
if sumRecharge > cm.totalRecharge {
|
||||
sumRecharge = cm.totalRecharge
|
||||
}
|
||||
bonusRatio := float64(1)
|
||||
v := cm.curve.ValueAt(sumRecharge)
|
||||
s := float64(sumRecharge)
|
||||
if v > s && s > 0 {
|
||||
bonusRatio = v / s
|
||||
}
|
||||
dt := now - lastUpdate
|
||||
// fetch the client that finishes first
|
||||
@ -228,7 +303,6 @@ func (cm *ClientManager) updateRecharge(now mclock.AbsTime) {
|
||||
// finished recharging, update corrBufValue and sumRecharge if necessary and do next step
|
||||
if rcqNode.corrBufValue < int64(rcqNode.params.BufLimit) {
|
||||
rcqNode.corrBufValue = int64(rcqNode.params.BufLimit)
|
||||
cm.updateCapFactor(lastUpdate, true)
|
||||
cm.sumRecharge -= rcqNode.params.MinRecharge
|
||||
}
|
||||
cm.rcLastIntValue = rcqNode.rcFullIntValue
|
||||
@ -249,9 +323,6 @@ func (cm *ClientManager) updateNodeRc(node *ClientNode, bvc int64, params *Serve
|
||||
node.rcLastIntValue = cm.rcLastIntValue
|
||||
}
|
||||
node.corrBufValue += bvc
|
||||
if node.corrBufValue < 0 {
|
||||
node.corrBufValue = 0
|
||||
}
|
||||
diff := int64(params.BufLimit - node.params.BufLimit)
|
||||
if diff > 0 {
|
||||
node.corrBufValue += diff
|
||||
@ -261,15 +332,14 @@ func (cm *ClientManager) updateNodeRc(node *ClientNode, bvc int64, params *Serve
|
||||
node.corrBufValue = int64(params.BufLimit)
|
||||
isFull = true
|
||||
}
|
||||
sumRecharge := cm.sumRecharge
|
||||
if !wasFull {
|
||||
sumRecharge -= node.params.MinRecharge
|
||||
cm.sumRecharge -= node.params.MinRecharge
|
||||
}
|
||||
if params != &node.params {
|
||||
node.params = *params
|
||||
}
|
||||
if !isFull {
|
||||
sumRecharge += node.params.MinRecharge
|
||||
cm.sumRecharge += node.params.MinRecharge
|
||||
if node.queueIndex != -1 {
|
||||
cm.rcQueue.Remove(node.queueIndex)
|
||||
}
|
||||
@ -277,63 +347,54 @@ func (cm *ClientManager) updateNodeRc(node *ClientNode, bvc int64, params *Serve
|
||||
node.rcFullIntValue = cm.rcLastIntValue + (int64(node.params.BufLimit)-node.corrBufValue)*FixedPointMultiplier/int64(node.params.MinRecharge)
|
||||
cm.rcQueue.Push(node, -node.rcFullIntValue)
|
||||
}
|
||||
if sumRecharge != cm.sumRecharge {
|
||||
cm.updateCapFactor(now, true)
|
||||
cm.sumRecharge = sumRecharge
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// updateCapFactor updates the total capacity factor. The capacity factor allows
|
||||
// the total capacity of the system to go over the allowed total recharge value
|
||||
// if the sum of momentarily recharging clients only exceeds the total recharge
|
||||
// allowance in a very small fraction of time.
|
||||
// The capacity factor is dropped quickly (with a small time constant) if sumRecharge
|
||||
// exceeds totalRecharge. It is raised slowly (with a large time constant) if most
|
||||
// of the total capacity is used by connected clients (totalConnected is larger than
|
||||
// totalCapacity*capFactorRaiseThreshold) and sumRecharge stays under
|
||||
// totalRecharge*totalConnected/totalCapacity.
|
||||
func (cm *ClientManager) updateCapFactor(now mclock.AbsTime, refresh bool) {
|
||||
if cm.totalRecharge == 0 {
|
||||
return
|
||||
// reduceTotalCapacity reduces the total capacity allowance in case of a client freeze event
|
||||
func (cm *ClientManager) reduceTotalCapacity(frozenCap uint64) {
|
||||
cm.lock.Lock()
|
||||
defer cm.lock.Unlock()
|
||||
|
||||
ratio := float64(1)
|
||||
if frozenCap < cm.totalConnected {
|
||||
ratio = float64(frozenCap) / float64(cm.totalConnected)
|
||||
}
|
||||
now := cm.clock.Now()
|
||||
cm.updateTotalCapacity(now, false)
|
||||
cm.logTotalCap -= capacityDropFactor * ratio
|
||||
if cm.logTotalCap < cm.minLogTotalCap {
|
||||
cm.logTotalCap = cm.minLogTotalCap
|
||||
}
|
||||
cm.updateTotalCapacity(now, true)
|
||||
}
|
||||
|
||||
// updateTotalCapacity updates the total capacity factor. The capacity factor allows
|
||||
// the total capacity of the system to go over the allowed total recharge value
|
||||
// if clients go to frozen state sufficiently rarely.
|
||||
// The capacity factor is dropped instantly by a small amount if a clients is frozen.
|
||||
// It is raised slowly (with a large time constant) if the total connected capacity
|
||||
// is close to the total allowed amount and no clients are frozen.
|
||||
func (cm *ClientManager) updateTotalCapacity(now mclock.AbsTime, refresh bool) {
|
||||
dt := now - cm.capLastUpdate
|
||||
cm.capLastUpdate = now
|
||||
|
||||
var d float64
|
||||
if cm.sumRecharge > cm.totalRecharge {
|
||||
d = (1 - float64(cm.sumRecharge)/float64(cm.totalRecharge)) * capFactorDropTC
|
||||
} else {
|
||||
totalConnected := float64(cm.totalConnected)
|
||||
var connRatio float64
|
||||
if totalConnected < cm.totalCapacity {
|
||||
connRatio = totalConnected / cm.totalCapacity
|
||||
} else {
|
||||
connRatio = 1
|
||||
}
|
||||
if connRatio > capFactorRaiseThreshold {
|
||||
sumRecharge := float64(cm.sumRecharge)
|
||||
limit := float64(cm.totalRecharge) * connRatio
|
||||
if sumRecharge < limit {
|
||||
d = (1 - sumRecharge/limit) * (connRatio - capFactorRaiseThreshold) * (1 / (1 - capFactorRaiseThreshold)) * capFactorRaiseTC
|
||||
}
|
||||
if cm.logTotalCap < cm.logTotalCapRaiseLimit {
|
||||
cm.logTotalCap += capacityRaiseTC * float64(dt)
|
||||
if cm.logTotalCap > cm.logTotalCapRaiseLimit {
|
||||
cm.logTotalCap = cm.logTotalCapRaiseLimit
|
||||
}
|
||||
}
|
||||
if d != 0 {
|
||||
cm.capLogFactor += d * float64(dt)
|
||||
if cm.capLogFactor < 0 {
|
||||
cm.capLogFactor = 0
|
||||
}
|
||||
if refresh {
|
||||
cm.refreshCapacity()
|
||||
}
|
||||
if cm.logTotalCap > cm.maxLogTotalCap {
|
||||
cm.logTotalCap = cm.maxLogTotalCap
|
||||
}
|
||||
if refresh {
|
||||
cm.refreshCapacity()
|
||||
}
|
||||
}
|
||||
|
||||
// refreshCapacity recalculates the total capacity value and sends an update to the subscription
|
||||
// channel if the relative change of the value since the last update is more than 0.1 percent
|
||||
func (cm *ClientManager) refreshCapacity() {
|
||||
totalCapacity := float64(cm.totalRecharge) * math.Exp(cm.capLogFactor)
|
||||
totalCapacity := math.Exp(cm.logTotalCap)
|
||||
if totalCapacity >= cm.totalCapacity*0.999 && totalCapacity <= cm.totalCapacity*1.001 {
|
||||
return
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, random
|
||||
}
|
||||
m := NewClientManager(PieceWiseLinear{{0, totalCapacity}}, clock)
|
||||
for _, n := range nodes {
|
||||
n.bufLimit = n.capacity * 6000 //uint64(2000+rand.Intn(10000))
|
||||
n.bufLimit = n.capacity * 6000
|
||||
n.node = NewClientNode(m, ServerParams{BufLimit: n.bufLimit, MinRecharge: n.capacity})
|
||||
}
|
||||
maxNodes := make([]int, maxCapacityNodes)
|
||||
@ -73,6 +73,7 @@ func testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, random
|
||||
maxNodes[i] = rand.Intn(nodeCount)
|
||||
}
|
||||
|
||||
var sendCount int
|
||||
for i := 0; i < testLength; i++ {
|
||||
now := clock.Now()
|
||||
for _, idx := range maxNodes {
|
||||
@ -83,13 +84,15 @@ func testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, random
|
||||
maxNodes[rand.Intn(maxCapacityNodes)] = rand.Intn(nodeCount)
|
||||
}
|
||||
|
||||
sendCount := randomSend
|
||||
for sendCount > 0 {
|
||||
sendCount += randomSend
|
||||
failCount := randomSend * 10
|
||||
for sendCount > 0 && failCount > 0 {
|
||||
if nodes[rand.Intn(nodeCount)].send(t, now) {
|
||||
sendCount--
|
||||
} else {
|
||||
failCount--
|
||||
}
|
||||
}
|
||||
|
||||
clock.Run(time.Millisecond)
|
||||
}
|
||||
|
||||
@ -117,7 +120,6 @@ func (n *testNode) send(t *testing.T, now mclock.AbsTime) bool {
|
||||
if bv < testMaxCost {
|
||||
n.waitUntil = now + mclock.AbsTime((testMaxCost-bv)*1001000/n.capacity)
|
||||
}
|
||||
//n.waitUntil = now + mclock.AbsTime(float64(testMaxCost)*1001000/float64(n.capacity)*(1-float64(bv)/float64(n.bufLimit)))
|
||||
n.totalCost += rcost
|
||||
return true
|
||||
}
|
||||
|
Reference in New Issue
Block a user