les: renamed lespay to vflux (#22347)

This commit is contained in:
Felföldi Zsolt
2021-02-19 14:44:16 +01:00
committed by GitHub
parent d36276d85e
commit c027507e03
30 changed files with 80 additions and 80 deletions

608
les/vflux/server/balance.go Normal file
View File

@@ -0,0 +1,608 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"errors"
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
var errBalanceOverflow = errors.New("balance overflow")
const maxBalance = math.MaxInt64 // maximum allowed balance value
const (
balanceCallbackUpdate = iota // called when priority drops below the last minimum estimate
balanceCallbackZero // called when priority drops to zero (positive balance exhausted)
balanceCallbackCount // total number of balance callbacks
)
// PriceFactors determine the pricing policy (may apply either to positive or
// negative balances which may have different factors).
// - TimeFactor is cost unit per nanosecond of connection time
// - CapacityFactor is cost unit per nanosecond of connection time per 1000000 capacity
// - RequestFactor is cost unit per request "realCost" unit
type PriceFactors struct {
TimeFactor, CapacityFactor, RequestFactor float64
}
// timePrice returns the price of connection per nanosecond at the given capacity
func (p PriceFactors) timePrice(cap uint64) float64 {
return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000
}
// NodeBalance keeps track of the positive and negative balances of a connected
// client and calculates actual and projected future priority values.
// Implements nodePriority interface.
type NodeBalance struct {
bt *BalanceTracker
lock sync.RWMutex
node *enode.Node
connAddress string
active bool
priority bool
capacity uint64
balance balance
posFactor, negFactor PriceFactors
sumReqCost uint64
lastUpdate, nextUpdate, initTime mclock.AbsTime
updateEvent mclock.Timer
// since only a limited and fixed number of callbacks are needed, they are
// stored in a fixed size array ordered by priority threshold.
callbacks [balanceCallbackCount]balanceCallback
// callbackIndex maps balanceCallback constants to callbacks array indexes (-1 if not active)
callbackIndex [balanceCallbackCount]int
callbackCount int // number of active callbacks
}
// balance represents a pair of positive and negative balances
type balance struct {
pos, neg utils.ExpiredValue
}
// balanceCallback represents a single callback that is activated when client priority
// reaches the given threshold
type balanceCallback struct {
id int
threshold int64
callback func()
}
// GetBalance returns the current positive and negative balance.
func (n *NodeBalance) GetBalance() (uint64, uint64) {
n.lock.Lock()
defer n.lock.Unlock()
now := n.bt.clock.Now()
n.updateBalance(now)
return n.balance.pos.Value(n.bt.posExp.LogOffset(now)), n.balance.neg.Value(n.bt.negExp.LogOffset(now))
}
// GetRawBalance returns the current positive and negative balance
// but in the raw(expired value) format.
func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) {
n.lock.Lock()
defer n.lock.Unlock()
now := n.bt.clock.Now()
n.updateBalance(now)
return n.balance.pos, n.balance.neg
}
// AddBalance adds the given amount to the positive balance and returns the balance
// before and after the operation. Exceeding maxBalance results in an error (balance is
// unchanged) while adding a negative amount higher than the current balance results in
// zero balance.
func (n *NodeBalance) AddBalance(amount int64) (uint64, uint64, error) {
var (
err error
old, new uint64
)
n.bt.ns.Operation(func() {
var (
callbacks []func()
setPriority bool
)
n.bt.updateTotalBalance(n, func() bool {
now := n.bt.clock.Now()
n.updateBalance(now)
// Ensure the given amount is valid to apply.
offset := n.bt.posExp.LogOffset(now)
old = n.balance.pos.Value(offset)
if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) {
err = errBalanceOverflow
return false
}
// Update the total positive balance counter.
n.balance.pos.Add(amount, offset)
callbacks = n.checkCallbacks(now)
setPriority = n.checkPriorityStatus()
new = n.balance.pos.Value(offset)
n.storeBalance(true, false)
return true
})
for _, cb := range callbacks {
cb()
}
if setPriority {
n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)
}
n.signalPriorityUpdate()
})
if err != nil {
return old, old, err
}
return old, new, nil
}
// SetBalance sets the positive and negative balance to the given values
func (n *NodeBalance) SetBalance(pos, neg uint64) error {
if pos > maxBalance || neg > maxBalance {
return errBalanceOverflow
}
n.bt.ns.Operation(func() {
var (
callbacks []func()
setPriority bool
)
n.bt.updateTotalBalance(n, func() bool {
now := n.bt.clock.Now()
n.updateBalance(now)
var pb, nb utils.ExpiredValue
pb.Add(int64(pos), n.bt.posExp.LogOffset(now))
nb.Add(int64(neg), n.bt.negExp.LogOffset(now))
n.balance.pos = pb
n.balance.neg = nb
callbacks = n.checkCallbacks(now)
setPriority = n.checkPriorityStatus()
n.storeBalance(true, true)
return true
})
for _, cb := range callbacks {
cb()
}
if setPriority {
n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)
}
n.signalPriorityUpdate()
})
return nil
}
// RequestServed should be called after serving a request for the given peer
func (n *NodeBalance) RequestServed(cost uint64) uint64 {
n.lock.Lock()
var callbacks []func()
defer func() {
n.lock.Unlock()
if callbacks != nil {
n.bt.ns.Operation(func() {
for _, cb := range callbacks {
cb()
}
})
}
}()
now := n.bt.clock.Now()
n.updateBalance(now)
fcost := float64(cost)
posExp := n.bt.posExp.LogOffset(now)
var check bool
if !n.balance.pos.IsZero() {
if n.posFactor.RequestFactor != 0 {
c := -int64(fcost * n.posFactor.RequestFactor)
cc := n.balance.pos.Add(c, posExp)
if c == cc {
fcost = 0
} else {
fcost *= 1 - float64(cc)/float64(c)
}
check = true
} else {
fcost = 0
}
}
if fcost > 0 {
if n.negFactor.RequestFactor != 0 {
n.balance.neg.Add(int64(fcost*n.negFactor.RequestFactor), n.bt.negExp.LogOffset(now))
check = true
}
}
if check {
callbacks = n.checkCallbacks(now)
}
n.sumReqCost += cost
return n.balance.pos.Value(posExp)
}
// Priority returns the actual priority based on the current balance
func (n *NodeBalance) Priority(now mclock.AbsTime, capacity uint64) int64 {
n.lock.Lock()
defer n.lock.Unlock()
n.updateBalance(now)
return n.balanceToPriority(n.balance, capacity)
}
// EstMinPriority gives a lower estimate for the priority at a given time in the future.
// An average request cost per time is assumed that is twice the average cost per time
// in the current session.
// If update is true then a priority callback is added that turns UpdateFlag on and off
// in case the priority goes below the estimated minimum.
func (n *NodeBalance) EstMinPriority(at mclock.AbsTime, capacity uint64, update bool) int64 {
n.lock.Lock()
defer n.lock.Unlock()
var avgReqCost float64
dt := time.Duration(n.lastUpdate - n.initTime)
if dt > time.Second {
avgReqCost = float64(n.sumReqCost) * 2 / float64(dt)
}
pri := n.balanceToPriority(n.reducedBalance(at, capacity, avgReqCost), capacity)
if update {
n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate)
}
return pri
}
// PosBalanceMissing calculates the missing amount of positive balance in order to
// connect at targetCapacity, stay connected for the given amount of time and then
// still have a priority of targetPriority
func (n *NodeBalance) PosBalanceMissing(targetPriority int64, targetCapacity uint64, after time.Duration) uint64 {
n.lock.Lock()
defer n.lock.Unlock()
now := n.bt.clock.Now()
if targetPriority < 0 {
timePrice := n.negFactor.timePrice(targetCapacity)
timeCost := uint64(float64(after) * timePrice)
negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now))
if timeCost+negBalance < uint64(-targetPriority) {
return 0
}
if uint64(-targetPriority) > negBalance && timePrice > 1e-100 {
if negTime := time.Duration(float64(uint64(-targetPriority)-negBalance) / timePrice); negTime < after {
after -= negTime
} else {
after = 0
}
}
targetPriority = 0
}
timePrice := n.posFactor.timePrice(targetCapacity)
posRequired := uint64(float64(targetPriority)*float64(targetCapacity)+float64(after)*timePrice) + 1
if posRequired >= maxBalance {
return math.MaxUint64 // target not reachable
}
posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now))
if posRequired > posBalance {
return posRequired - posBalance
}
return 0
}
// SetPriceFactors sets the price factors. TimeFactor is the price of a nanosecond of
// connection while RequestFactor is the price of a request cost unit.
func (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) {
n.lock.Lock()
now := n.bt.clock.Now()
n.updateBalance(now)
n.posFactor, n.negFactor = posFactor, negFactor
callbacks := n.checkCallbacks(now)
n.lock.Unlock()
if callbacks != nil {
n.bt.ns.Operation(func() {
for _, cb := range callbacks {
cb()
}
})
}
}
// GetPriceFactors returns the price factors
func (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) {
n.lock.Lock()
defer n.lock.Unlock()
return n.posFactor, n.negFactor
}
// activate starts time/capacity cost deduction.
func (n *NodeBalance) activate() {
n.bt.updateTotalBalance(n, func() bool {
if n.active {
return false
}
n.active = true
n.lastUpdate = n.bt.clock.Now()
return true
})
}
// deactivate stops time/capacity cost deduction and saves the balances in the database
func (n *NodeBalance) deactivate() {
n.bt.updateTotalBalance(n, func() bool {
if !n.active {
return false
}
n.updateBalance(n.bt.clock.Now())
if n.updateEvent != nil {
n.updateEvent.Stop()
n.updateEvent = nil
}
n.storeBalance(true, true)
n.active = false
return true
})
}
// updateBalance updates balance based on the time factor
func (n *NodeBalance) updateBalance(now mclock.AbsTime) {
if n.active && now > n.lastUpdate {
n.balance = n.reducedBalance(now, n.capacity, 0)
n.lastUpdate = now
}
}
// storeBalance stores the positive and/or negative balance of the node in the database
func (n *NodeBalance) storeBalance(pos, neg bool) {
if pos {
n.bt.storeBalance(n.node.ID().Bytes(), false, n.balance.pos)
}
if neg {
n.bt.storeBalance([]byte(n.connAddress), true, n.balance.neg)
}
}
// addCallback sets up a one-time callback to be called when priority reaches
// the threshold. If it has already reached the threshold the callback is called
// immediately.
// Note: should be called while n.lock is held
// Note 2: the callback function runs inside a NodeStateMachine operation
func (n *NodeBalance) addCallback(id int, threshold int64, callback func()) {
n.removeCallback(id)
idx := 0
for idx < n.callbackCount && threshold > n.callbacks[idx].threshold {
idx++
}
for i := n.callbackCount - 1; i >= idx; i-- {
n.callbackIndex[n.callbacks[i].id]++
n.callbacks[i+1] = n.callbacks[i]
}
n.callbackCount++
n.callbackIndex[id] = idx
n.callbacks[idx] = balanceCallback{id, threshold, callback}
now := n.bt.clock.Now()
n.updateBalance(now)
n.scheduleCheck(now)
}
// removeCallback removes the given callback and returns true if it was active
// Note: should be called while n.lock is held
func (n *NodeBalance) removeCallback(id int) bool {
idx := n.callbackIndex[id]
if idx == -1 {
return false
}
n.callbackIndex[id] = -1
for i := idx; i < n.callbackCount-1; i++ {
n.callbackIndex[n.callbacks[i+1].id]--
n.callbacks[i] = n.callbacks[i+1]
}
n.callbackCount--
return true
}
// checkCallbacks checks whether the threshold of any of the active callbacks
// have been reached and returns triggered callbacks.
// Note: checkCallbacks assumes that the balance has been recently updated.
func (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) {
if n.callbackCount == 0 || n.capacity == 0 {
return
}
pri := n.balanceToPriority(n.balance, n.capacity)
for n.callbackCount != 0 && n.callbacks[n.callbackCount-1].threshold >= pri {
n.callbackCount--
n.callbackIndex[n.callbacks[n.callbackCount].id] = -1
callbacks = append(callbacks, n.callbacks[n.callbackCount].callback)
}
n.scheduleCheck(now)
return
}
// scheduleCheck sets up or updates a scheduled event to ensure that it will be called
// again just after the next threshold has been reached.
func (n *NodeBalance) scheduleCheck(now mclock.AbsTime) {
if n.callbackCount != 0 {
d, ok := n.timeUntil(n.callbacks[n.callbackCount-1].threshold)
if !ok {
n.nextUpdate = 0
n.updateAfter(0)
return
}
if n.nextUpdate == 0 || n.nextUpdate > now+mclock.AbsTime(d) {
if d > time.Second {
// Note: if the scheduled update is not in the very near future then we
// schedule the update a bit earlier. This way we do need to update a few
// extra times but don't need to reschedule every time a processed request
// brings the expected firing time a little bit closer.
d = ((d - time.Second) * 7 / 8) + time.Second
}
n.nextUpdate = now + mclock.AbsTime(d)
n.updateAfter(d)
}
} else {
n.nextUpdate = 0
n.updateAfter(0)
}
}
// updateAfter schedules a balance update and callback check in the future
func (n *NodeBalance) updateAfter(dt time.Duration) {
if n.updateEvent == nil || n.updateEvent.Stop() {
if dt == 0 {
n.updateEvent = nil
} else {
n.updateEvent = n.bt.clock.AfterFunc(dt, func() {
var callbacks []func()
n.lock.Lock()
if n.callbackCount != 0 {
now := n.bt.clock.Now()
n.updateBalance(now)
callbacks = n.checkCallbacks(now)
}
n.lock.Unlock()
if callbacks != nil {
n.bt.ns.Operation(func() {
for _, cb := range callbacks {
cb()
}
})
}
})
}
}
}
// balanceExhausted should be called when the positive balance is exhausted (priority goes to zero/negative)
// Note: this function should run inside a NodeStateMachine operation
func (n *NodeBalance) balanceExhausted() {
n.lock.Lock()
n.storeBalance(true, false)
n.priority = false
n.lock.Unlock()
n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.PriorityFlag, 0)
}
// checkPriorityStatus checks whether the node has gained priority status and sets the priority
// callback and flag if necessary. It assumes that the balance has been recently updated.
// Note that the priority flag has to be set by the caller after the mutex has been released.
func (n *NodeBalance) checkPriorityStatus() bool {
if !n.priority && !n.balance.pos.IsZero() {
n.priority = true
n.addCallback(balanceCallbackZero, 0, func() { n.balanceExhausted() })
return true
}
return false
}
// signalPriorityUpdate signals that the priority fell below the previous minimum estimate
// Note: this function should run inside a NodeStateMachine operation
func (n *NodeBalance) signalPriorityUpdate() {
n.bt.ns.SetStateSub(n.node, n.bt.UpdateFlag, nodestate.Flags{}, 0)
n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.UpdateFlag, 0)
}
// setCapacity updates the capacity value used for priority calculation
// Note: capacity should never be zero
// Note 2: this function should run inside a NodeStateMachine operation
func (n *NodeBalance) setCapacity(capacity uint64) {
n.lock.Lock()
now := n.bt.clock.Now()
n.updateBalance(now)
n.capacity = capacity
callbacks := n.checkCallbacks(now)
n.lock.Unlock()
for _, cb := range callbacks {
cb()
}
}
// balanceToPriority converts a balance to a priority value. Lower priority means
// first to disconnect. Positive balance translates to positive priority. If positive
// balance is zero then negative balance translates to a negative priority.
func (n *NodeBalance) balanceToPriority(b balance, capacity uint64) int64 {
if !b.pos.IsZero() {
return int64(b.pos.Value(n.bt.posExp.LogOffset(n.bt.clock.Now())) / capacity)
}
return -int64(b.neg.Value(n.bt.negExp.LogOffset(n.bt.clock.Now())))
}
// reducedBalance estimates the reduced balance at a given time in the fututre based
// on the current balance, the time factor and an estimated average request cost per time ratio
func (n *NodeBalance) reducedBalance(at mclock.AbsTime, capacity uint64, avgReqCost float64) balance {
dt := float64(at - n.lastUpdate)
b := n.balance
if !b.pos.IsZero() {
factor := n.posFactor.timePrice(capacity) + n.posFactor.RequestFactor*avgReqCost
diff := -int64(dt * factor)
dd := b.pos.Add(diff, n.bt.posExp.LogOffset(at))
if dd == diff {
dt = 0
} else {
dt += float64(dd) / factor
}
}
if dt > 0 {
factor := n.negFactor.timePrice(capacity) + n.negFactor.RequestFactor*avgReqCost
b.neg.Add(int64(dt*factor), n.bt.negExp.LogOffset(at))
}
return b
}
// timeUntil calculates the remaining time needed to reach a given priority level
// assuming that no requests are processed until then. If the given level is never
// reached then (0, false) is returned.
// Note: the function assumes that the balance has been recently updated and
// calculates the time starting from the last update.
func (n *NodeBalance) timeUntil(priority int64) (time.Duration, bool) {
now := n.bt.clock.Now()
var dt float64
if !n.balance.pos.IsZero() {
posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now))
timePrice := n.posFactor.timePrice(n.capacity)
if timePrice < 1e-100 {
return 0, false
}
if priority > 0 {
newBalance := uint64(priority) * n.capacity
if newBalance > posBalance {
return 0, false
}
dt = float64(posBalance-newBalance) / timePrice
return time.Duration(dt), true
}
dt = float64(posBalance) / timePrice
} else {
if priority > 0 {
return 0, false
}
}
// if we have a positive balance then dt equals the time needed to get it to zero
negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now))
timePrice := n.negFactor.timePrice(n.capacity)
if uint64(-priority) > negBalance {
if timePrice < 1e-100 {
return 0, false
}
dt += float64(uint64(-priority)-negBalance) / timePrice
}
return time.Duration(dt), true
}

View File

@@ -0,0 +1,400 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"math/rand"
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
var (
testFlag = testSetup.NewFlag("testFlag")
connAddrFlag = testSetup.NewField("connAddr", reflect.TypeOf(""))
btTestSetup = NewBalanceTrackerSetup(testSetup)
)
func init() {
btTestSetup.Connect(connAddrFlag, ppTestSetup.CapacityField)
}
type zeroExpirer struct{}
func (z zeroExpirer) SetRate(now mclock.AbsTime, rate float64) {}
func (z zeroExpirer) SetLogOffset(now mclock.AbsTime, logOffset utils.Fixed64) {}
func (z zeroExpirer) LogOffset(now mclock.AbsTime) utils.Fixed64 { return 0 }
type balanceTestSetup struct {
clock *mclock.Simulated
ns *nodestate.NodeStateMachine
bt *BalanceTracker
}
func newBalanceTestSetup() *balanceTestSetup {
clock := &mclock.Simulated{}
ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)
db := memorydb.New()
bt := NewBalanceTracker(ns, btTestSetup, db, clock, zeroExpirer{}, zeroExpirer{})
ns.Start()
return &balanceTestSetup{
clock: clock,
ns: ns,
bt: bt,
}
}
func (b *balanceTestSetup) newNode(capacity uint64) *NodeBalance {
node := enode.SignNull(&enr.Record{}, enode.ID{})
b.ns.SetState(node, testFlag, nodestate.Flags{}, 0)
b.ns.SetField(node, btTestSetup.connAddressField, "")
b.ns.SetField(node, ppTestSetup.CapacityField, capacity)
n, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*NodeBalance)
return n
}
func (b *balanceTestSetup) stop() {
b.bt.Stop()
b.ns.Stop()
}
func TestAddBalance(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000)
var inputs = []struct {
delta int64
expect [2]uint64
total uint64
expectErr bool
}{
{100, [2]uint64{0, 100}, 100, false},
{-100, [2]uint64{100, 0}, 0, false},
{-100, [2]uint64{0, 0}, 0, false},
{1, [2]uint64{0, 1}, 1, false},
{maxBalance, [2]uint64{0, 0}, 0, true},
}
for _, i := range inputs {
old, new, err := node.AddBalance(i.delta)
if i.expectErr {
if err == nil {
t.Fatalf("Expect get error but nil")
}
continue
} else if err != nil {
t.Fatalf("Expect get no error but %v", err)
}
if old != i.expect[0] || new != i.expect[1] {
t.Fatalf("Positive balance mismatch, got %v -> %v", old, new)
}
if b.bt.TotalTokenAmount() != i.total {
t.Fatalf("Total positive balance mismatch, want %v, got %v", i.total, b.bt.TotalTokenAmount())
}
}
}
func TestSetBalance(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000)
var inputs = []struct {
pos, neg uint64
}{
{1000, 0},
{0, 1000},
{1000, 1000},
}
for _, i := range inputs {
node.SetBalance(i.pos, i.neg)
pos, neg := node.GetBalance()
if pos != i.pos {
t.Fatalf("Positive balance mismatch, want %v, got %v", i.pos, pos)
}
if neg != i.neg {
t.Fatalf("Negative balance mismatch, want %v, got %v", i.neg, neg)
}
}
}
func TestBalanceTimeCost(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000)
b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
node.SetBalance(uint64(time.Minute), 0) // 1 minute time allowance
var inputs = []struct {
runTime time.Duration
expPos uint64
expNeg uint64
}{
{time.Second, uint64(time.Second * 59), 0},
{0, uint64(time.Second * 59), 0},
{time.Second * 59, 0, 0},
{time.Second, 0, uint64(time.Second)},
}
for _, i := range inputs {
b.clock.Run(i.runTime)
if pos, _ := node.GetBalance(); pos != i.expPos {
t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
}
if _, neg := node.GetBalance(); neg != i.expNeg {
t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
}
}
node.SetBalance(uint64(time.Minute), 0) // Refill 1 minute time allowance
for _, i := range inputs {
b.clock.Run(i.runTime)
if pos, _ := node.GetBalance(); pos != i.expPos {
t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
}
if _, neg := node.GetBalance(); neg != i.expNeg {
t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
}
}
}
func TestBalanceReqCost(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
node.SetBalance(uint64(time.Minute), 0) // 1 minute time serving time allowance
var inputs = []struct {
reqCost uint64
expPos uint64
expNeg uint64
}{
{uint64(time.Second), uint64(time.Second * 59), 0},
{0, uint64(time.Second * 59), 0},
{uint64(time.Second * 59), 0, 0},
{uint64(time.Second), 0, uint64(time.Second)},
}
for _, i := range inputs {
node.RequestServed(i.reqCost)
if pos, _ := node.GetBalance(); pos != i.expPos {
t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
}
if _, neg := node.GetBalance(); neg != i.expNeg {
t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
}
}
}
func TestBalanceToPriority(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
var inputs = []struct {
pos uint64
neg uint64
priority int64
}{
{1000, 0, 1},
{2000, 0, 2}, // Higher balance, higher priority value
{0, 0, 0},
{0, 1000, -1000},
}
for _, i := range inputs {
node.SetBalance(i.pos, i.neg)
priority := node.Priority(b.clock.Now(), 1000)
if priority != i.priority {
t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority)
}
}
}
func TestEstimatedPriority(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000000000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
node.SetBalance(uint64(time.Minute), 0)
var inputs = []struct {
runTime time.Duration // time cost
futureTime time.Duration // diff of future time
reqCost uint64 // single request cost
priority int64 // expected estimated priority
}{
{time.Second, time.Second, 0, 58},
{0, time.Second, 0, 58},
// 2 seconds time cost, 1 second estimated time cost, 10^9 request cost,
// 10^9 estimated request cost per second.
{time.Second, time.Second, 1000000000, 55},
// 3 seconds time cost, 3 second estimated time cost, 10^9*2 request cost,
// 4*10^9 estimated request cost.
{time.Second, 3 * time.Second, 1000000000, 48},
// All positive balance is used up
{time.Second * 55, 0, 0, 0},
// 1 minute estimated time cost, 4/58 * 10^9 estimated request cost per sec.
{0, time.Minute, 0, -int64(time.Minute) - int64(time.Second)*120/29},
}
for _, i := range inputs {
b.clock.Run(i.runTime)
node.RequestServed(i.reqCost)
priority := node.EstMinPriority(b.clock.Now()+mclock.AbsTime(i.futureTime), 1000000000, false)
if priority != i.priority {
t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority)
}
}
}
func TestPosBalanceMissing(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
var inputs = []struct {
pos, neg uint64
priority int64
cap uint64
after time.Duration
expect uint64
}{
{uint64(time.Second * 2), 0, 0, 1, time.Second, 0},
{uint64(time.Second * 2), 0, 0, 1, 2 * time.Second, 1},
{uint64(time.Second * 2), 0, int64(time.Second), 1, 2 * time.Second, uint64(time.Second) + 1},
{0, 0, int64(time.Second), 1, time.Second, uint64(2*time.Second) + 1},
{0, 0, -int64(time.Second), 1, time.Second, 1},
}
for _, i := range inputs {
node.SetBalance(i.pos, i.neg)
got := node.PosBalanceMissing(i.priority, i.cap, i.after)
if got != i.expect {
t.Fatalf("Missing budget mismatch, want %v, got %v", i.expect, got)
}
}
}
func TestPostiveBalanceCounting(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
var nodes []*NodeBalance
for i := 0; i < 100; i += 1 {
node := b.newNode(1000000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
nodes = append(nodes, node)
}
// Allocate service token
var sum uint64
for i := 0; i < 100; i += 1 {
amount := int64(rand.Intn(100) + 100)
nodes[i].AddBalance(amount)
sum += uint64(amount)
}
if b.bt.TotalTokenAmount() != sum {
t.Fatalf("Invalid token amount")
}
// Change client status
for i := 0; i < 100; i += 1 {
if rand.Intn(2) == 0 {
b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1))
}
}
if b.bt.TotalTokenAmount() != sum {
t.Fatalf("Invalid token amount")
}
for i := 0; i < 100; i += 1 {
if rand.Intn(2) == 0 {
b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1))
}
}
if b.bt.TotalTokenAmount() != sum {
t.Fatalf("Invalid token amount")
}
}
func TestCallbackChecking(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
var inputs = []struct {
priority int64
expDiff time.Duration
}{
{500, time.Millisecond * 500},
{0, time.Second},
{-int64(time.Second), 2 * time.Second},
}
node.SetBalance(uint64(time.Second), 0)
for _, i := range inputs {
diff, _ := node.timeUntil(i.priority)
if diff != i.expDiff {
t.Fatalf("Time difference mismatch, want %v, got %v", i.expDiff, diff)
}
}
}
func TestCallback(t *testing.T) {
b := newBalanceTestSetup()
defer b.stop()
node := b.newNode(1000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
callCh := make(chan struct{}, 1)
node.SetBalance(uint64(time.Minute), 0)
node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })
b.clock.Run(time.Minute)
select {
case <-callCh:
case <-time.NewTimer(time.Second).C:
t.Fatalf("Callback hasn't been called yet")
}
node.SetBalance(uint64(time.Minute), 0)
node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })
node.removeCallback(balanceCallbackZero)
b.clock.Run(time.Minute)
select {
case <-callCh:
t.Fatalf("Callback shouldn't be called")
case <-time.NewTimer(time.Millisecond * 100).C:
}
}

View File

@@ -0,0 +1,290 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"reflect"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
const (
posThreshold = 1000000 // minimum positive balance that is persisted in the database
negThreshold = 1000000 // minimum negative balance that is persisted in the database
persistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence
)
// BalanceTrackerSetup contains node state flags and fields used by BalanceTracker
type BalanceTrackerSetup struct {
// controlled by PriorityPool
PriorityFlag, UpdateFlag nodestate.Flags
BalanceField nodestate.Field
// external connections
connAddressField, capacityField nodestate.Field
}
// NewBalanceTrackerSetup creates a new BalanceTrackerSetup and initializes the fields
// and flags controlled by BalanceTracker
func NewBalanceTrackerSetup(setup *nodestate.Setup) BalanceTrackerSetup {
return BalanceTrackerSetup{
// PriorityFlag is set if the node has a positive balance
PriorityFlag: setup.NewFlag("priorityNode"),
// UpdateFlag set and then immediately reset if the balance has been updated and
// therefore priority is suddenly changed
UpdateFlag: setup.NewFlag("balanceUpdate"),
// BalanceField contains the NodeBalance struct which implements nodePriority,
// allowing on-demand priority calculation and future priority estimation
BalanceField: setup.NewField("balance", reflect.TypeOf(&NodeBalance{})),
}
}
// Connect sets the fields used by BalanceTracker as an input
func (bts *BalanceTrackerSetup) Connect(connAddressField, capacityField nodestate.Field) {
bts.connAddressField = connAddressField
bts.capacityField = capacityField
}
// BalanceTracker tracks positive and negative balances for connected nodes.
// After connAddressField is set externally, a NodeBalance is created and previous
// balance values are loaded from the database. Both balances are exponentially expired
// values. Costs are deducted from the positive balance if present, otherwise added to
// the negative balance. If the capacity is non-zero then a time cost is applied
// continuously while individual request costs are applied immediately.
// The two balances are translated into a single priority value that also depends
// on the actual capacity.
type BalanceTracker struct {
BalanceTrackerSetup
clock mclock.Clock
lock sync.Mutex
ns *nodestate.NodeStateMachine
ndb *nodeDB
posExp, negExp utils.ValueExpirer
posExpTC, negExpTC uint64
active, inactive utils.ExpiredValue
balanceTimer *utils.UpdateTimer
quit chan struct{}
}
// NewBalanceTracker creates a new BalanceTracker
func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker {
ndb := newNodeDB(db, clock)
bt := &BalanceTracker{
ns: ns,
BalanceTrackerSetup: setup,
ndb: ndb,
clock: clock,
posExp: posExp,
negExp: negExp,
balanceTimer: utils.NewUpdateTimer(clock, time.Second*10),
quit: make(chan struct{}),
}
bt.ndb.forEachBalance(false, func(id enode.ID, balance utils.ExpiredValue) bool {
bt.inactive.AddExp(balance)
return true
})
ns.SubscribeField(bt.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
n, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance)
if n == nil {
return
}
ov, _ := oldValue.(uint64)
nv, _ := newValue.(uint64)
if ov == 0 && nv != 0 {
n.activate()
}
if nv != 0 {
n.setCapacity(nv)
}
if ov != 0 && nv == 0 {
n.deactivate()
}
})
ns.SubscribeField(bt.connAddressField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
if newValue != nil {
ns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(string)))
} else {
ns.SetStateSub(node, nodestate.Flags{}, bt.PriorityFlag, 0)
if b, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance); b != nil {
b.deactivate()
}
ns.SetFieldSub(node, bt.BalanceField, nil)
}
})
// The positive and negative balances of clients are stored in database
// and both of these decay exponentially over time. Delete them if the
// value is small enough.
bt.ndb.evictCallBack = bt.canDropBalance
go func() {
for {
select {
case <-clock.After(persistExpirationRefresh):
now := clock.Now()
bt.ndb.setExpiration(posExp.LogOffset(now), negExp.LogOffset(now))
case <-bt.quit:
return
}
}
}()
return bt
}
// Stop saves expiration offset and unsaved node balances and shuts BalanceTracker down
func (bt *BalanceTracker) Stop() {
now := bt.clock.Now()
bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now))
close(bt.quit)
bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok {
n.lock.Lock()
n.storeBalance(true, true)
n.lock.Unlock()
bt.ns.SetField(node, bt.BalanceField, nil)
}
})
bt.ndb.close()
}
// TotalTokenAmount returns the current total amount of service tokens in existence
func (bt *BalanceTracker) TotalTokenAmount() uint64 {
bt.lock.Lock()
defer bt.lock.Unlock()
bt.balanceTimer.Update(func(_ time.Duration) bool {
bt.active = utils.ExpiredValue{}
bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok {
pos, _ := n.GetRawBalance()
bt.active.AddExp(pos)
}
})
return true
})
total := bt.active
total.AddExp(bt.inactive)
return total.Value(bt.posExp.LogOffset(bt.clock.Now()))
}
// GetPosBalanceIDs lists node IDs with an associated positive balance
func (bt *BalanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {
return bt.ndb.getPosBalanceIDs(start, stop, maxCount)
}
// SetExpirationTCs sets positive and negative token expiration time constants.
// Specified in seconds, 0 means infinite (no expiration).
func (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) {
bt.lock.Lock()
defer bt.lock.Unlock()
bt.posExpTC, bt.negExpTC = pos, neg
now := bt.clock.Now()
if pos > 0 {
bt.posExp.SetRate(now, 1/float64(pos*uint64(time.Second)))
} else {
bt.posExp.SetRate(now, 0)
}
if neg > 0 {
bt.negExp.SetRate(now, 1/float64(neg*uint64(time.Second)))
} else {
bt.negExp.SetRate(now, 0)
}
}
// GetExpirationTCs returns the current positive and negative token expiration
// time constants
func (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) {
bt.lock.Lock()
defer bt.lock.Unlock()
return bt.posExpTC, bt.negExpTC
}
// newNodeBalance loads balances from the database and creates a NodeBalance instance
// for the given node. It also sets the PriorityFlag and adds balanceCallbackZero if
// the node has a positive balance.
// Note: this function should run inside a NodeStateMachine operation
func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string) *NodeBalance {
pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false)
nb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true)
n := &NodeBalance{
bt: bt,
node: node,
connAddress: negBalanceKey,
balance: balance{pos: pb, neg: nb},
initTime: bt.clock.Now(),
lastUpdate: bt.clock.Now(),
}
for i := range n.callbackIndex {
n.callbackIndex[i] = -1
}
if n.checkPriorityStatus() {
n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)
}
return n
}
// storeBalance stores either a positive or a negative balance in the database
func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) {
if bt.canDropBalance(bt.clock.Now(), neg, value) {
bt.ndb.delBalance(id, neg) // balance is small enough, drop it directly.
} else {
bt.ndb.setBalance(id, neg, value)
}
}
// canDropBalance tells whether a positive or negative balance is below the threshold
// and therefore can be dropped from the database
func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool {
if neg {
return b.Value(bt.negExp.LogOffset(now)) <= negThreshold
}
return b.Value(bt.posExp.LogOffset(now)) <= posThreshold
}
// updateTotalBalance adjusts the total balance after executing given callback.
func (bt *BalanceTracker) updateTotalBalance(n *NodeBalance, callback func() bool) {
bt.lock.Lock()
defer bt.lock.Unlock()
n.lock.Lock()
defer n.lock.Unlock()
original, active := n.balance.pos, n.active
if !callback() {
return
}
if active {
bt.active.SubExp(original)
} else {
bt.inactive.SubExp(original)
}
if n.active {
bt.active.AddExp(n.balance.pos)
} else {
bt.inactive.AddExp(n.balance.pos)
}
}

View File

@@ -0,0 +1,250 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"bytes"
"encoding/binary"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
lru "github.com/hashicorp/golang-lru"
)
const (
balanceCacheLimit = 8192 // the maximum number of cached items in service token balance queue
// nodeDBVersion is the version identifier of the node data in db
//
// Changelog:
// Version 0 => 1
// * Replace `lastTotal` with `meta` in positive balance: version 0=>1
//
// Version 1 => 2
// * Positive Balance and negative balance is changed:
// * Cumulative time is replaced with expiration
nodeDBVersion = 2
// dbCleanupCycle is the cycle of db for useless data cleanup
dbCleanupCycle = time.Hour
)
var (
positiveBalancePrefix = []byte("pb:") // dbVersion(uint16 big endian) + positiveBalancePrefix + id -> balance
negativeBalancePrefix = []byte("nb:") // dbVersion(uint16 big endian) + negativeBalancePrefix + ip -> balance
expirationKey = []byte("expiration:") // dbVersion(uint16 big endian) + expirationKey -> posExp, negExp
)
type nodeDB struct {
db ethdb.KeyValueStore
cache *lru.Cache
auxbuf []byte // 37-byte auxiliary buffer for key encoding
verbuf [2]byte // 2-byte auxiliary buffer for db version
evictCallBack func(mclock.AbsTime, bool, utils.ExpiredValue) bool // Callback to determine whether the balance can be evicted.
clock mclock.Clock
closeCh chan struct{}
cleanupHook func() // Test hook used for testing
}
func newNodeDB(db ethdb.KeyValueStore, clock mclock.Clock) *nodeDB {
cache, _ := lru.New(balanceCacheLimit)
ndb := &nodeDB{
db: db,
cache: cache,
auxbuf: make([]byte, 37),
clock: clock,
closeCh: make(chan struct{}),
}
binary.BigEndian.PutUint16(ndb.verbuf[:], uint16(nodeDBVersion))
go ndb.expirer()
return ndb
}
func (db *nodeDB) close() {
close(db.closeCh)
}
func (db *nodeDB) getPrefix(neg bool) []byte {
prefix := positiveBalancePrefix
if neg {
prefix = negativeBalancePrefix
}
return append(db.verbuf[:], prefix...)
}
func (db *nodeDB) key(id []byte, neg bool) []byte {
prefix := positiveBalancePrefix
if neg {
prefix = negativeBalancePrefix
}
if len(prefix)+len(db.verbuf)+len(id) > len(db.auxbuf) {
db.auxbuf = append(db.auxbuf, make([]byte, len(prefix)+len(db.verbuf)+len(id)-len(db.auxbuf))...)
}
copy(db.auxbuf[:len(db.verbuf)], db.verbuf[:])
copy(db.auxbuf[len(db.verbuf):len(db.verbuf)+len(prefix)], prefix)
copy(db.auxbuf[len(prefix)+len(db.verbuf):len(prefix)+len(db.verbuf)+len(id)], id)
return db.auxbuf[:len(prefix)+len(db.verbuf)+len(id)]
}
func (db *nodeDB) getExpiration() (utils.Fixed64, utils.Fixed64) {
blob, err := db.db.Get(append(db.verbuf[:], expirationKey...))
if err != nil || len(blob) != 16 {
return 0, 0
}
return utils.Fixed64(binary.BigEndian.Uint64(blob[:8])), utils.Fixed64(binary.BigEndian.Uint64(blob[8:16]))
}
func (db *nodeDB) setExpiration(pos, neg utils.Fixed64) {
var buff [16]byte
binary.BigEndian.PutUint64(buff[:8], uint64(pos))
binary.BigEndian.PutUint64(buff[8:16], uint64(neg))
db.db.Put(append(db.verbuf[:], expirationKey...), buff[:16])
}
func (db *nodeDB) getOrNewBalance(id []byte, neg bool) utils.ExpiredValue {
key := db.key(id, neg)
item, exist := db.cache.Get(string(key))
if exist {
return item.(utils.ExpiredValue)
}
var b utils.ExpiredValue
enc, err := db.db.Get(key)
if err != nil || len(enc) == 0 {
return b
}
if err := rlp.DecodeBytes(enc, &b); err != nil {
log.Crit("Failed to decode positive balance", "err", err)
}
db.cache.Add(string(key), b)
return b
}
func (db *nodeDB) setBalance(id []byte, neg bool, b utils.ExpiredValue) {
key := db.key(id, neg)
enc, err := rlp.EncodeToBytes(&(b))
if err != nil {
log.Crit("Failed to encode positive balance", "err", err)
}
db.db.Put(key, enc)
db.cache.Add(string(key), b)
}
func (db *nodeDB) delBalance(id []byte, neg bool) {
key := db.key(id, neg)
db.db.Delete(key)
db.cache.Remove(string(key))
}
// getPosBalanceIDs returns a lexicographically ordered list of IDs of accounts
// with a positive balance
func (db *nodeDB) getPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {
if maxCount <= 0 {
return
}
prefix := db.getPrefix(false)
keylen := len(prefix) + len(enode.ID{})
it := db.db.NewIterator(prefix, start.Bytes())
defer it.Release()
for it.Next() {
var id enode.ID
if len(it.Key()) != keylen {
return
}
copy(id[:], it.Key()[keylen-len(id):])
if bytes.Compare(id.Bytes(), stop.Bytes()) >= 0 {
return
}
result = append(result, id)
if len(result) == maxCount {
return
}
}
return
}
// forEachBalance iterates all balances and passes values to callback.
func (db *nodeDB) forEachBalance(neg bool, callback func(id enode.ID, balance utils.ExpiredValue) bool) {
prefix := db.getPrefix(neg)
keylen := len(prefix) + len(enode.ID{})
it := db.db.NewIterator(prefix, nil)
defer it.Release()
for it.Next() {
var id enode.ID
if len(it.Key()) != keylen {
return
}
copy(id[:], it.Key()[keylen-len(id):])
var b utils.ExpiredValue
if err := rlp.DecodeBytes(it.Value(), &b); err != nil {
continue
}
if !callback(id, b) {
return
}
}
}
func (db *nodeDB) expirer() {
for {
select {
case <-db.clock.After(dbCleanupCycle):
db.expireNodes()
case <-db.closeCh:
return
}
}
}
// expireNodes iterates the whole node db and checks whether the
// token balances can be deleted.
func (db *nodeDB) expireNodes() {
var (
visited int
deleted int
start = time.Now()
)
for _, neg := range []bool{false, true} {
iter := db.db.NewIterator(db.getPrefix(neg), nil)
for iter.Next() {
visited++
var balance utils.ExpiredValue
if err := rlp.DecodeBytes(iter.Value(), &balance); err != nil {
log.Crit("Failed to decode negative balance", "err", err)
}
if db.evictCallBack != nil && db.evictCallBack(db.clock.Now(), neg, balance) {
deleted++
db.db.Delete(iter.Key())
}
}
}
// Invoke testing hook if it's not nil.
if db.cleanupHook != nil {
db.cleanupHook()
}
log.Debug("Expire nodes", "visited", visited, "deleted", deleted, "elapsed", common.PrettyDuration(time.Since(start)))
}

View File

@@ -0,0 +1,144 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
)
func expval(v uint64) utils.ExpiredValue {
return utils.ExpiredValue{Base: v}
}
func TestNodeDB(t *testing.T) {
ndb := newNodeDB(rawdb.NewMemoryDatabase(), mclock.System{})
defer ndb.close()
var cases = []struct {
id enode.ID
ip string
balance utils.ExpiredValue
positive bool
}{
{enode.ID{0x00, 0x01, 0x02}, "", expval(100), true},
{enode.ID{0x00, 0x01, 0x02}, "", expval(200), true},
{enode.ID{}, "127.0.0.1", expval(100), false},
{enode.ID{}, "127.0.0.1", expval(200), false},
}
for _, c := range cases {
if c.positive {
ndb.setBalance(c.id.Bytes(), false, c.balance)
if pb := ndb.getOrNewBalance(c.id.Bytes(), false); !reflect.DeepEqual(pb, c.balance) {
t.Fatalf("Positive balance mismatch, want %v, got %v", c.balance, pb)
}
} else {
ndb.setBalance([]byte(c.ip), true, c.balance)
if nb := ndb.getOrNewBalance([]byte(c.ip), true); !reflect.DeepEqual(nb, c.balance) {
t.Fatalf("Negative balance mismatch, want %v, got %v", c.balance, nb)
}
}
}
for _, c := range cases {
if c.positive {
ndb.delBalance(c.id.Bytes(), false)
if pb := ndb.getOrNewBalance(c.id.Bytes(), false); !reflect.DeepEqual(pb, utils.ExpiredValue{}) {
t.Fatalf("Positive balance mismatch, want %v, got %v", utils.ExpiredValue{}, pb)
}
} else {
ndb.delBalance([]byte(c.ip), true)
if nb := ndb.getOrNewBalance([]byte(c.ip), true); !reflect.DeepEqual(nb, utils.ExpiredValue{}) {
t.Fatalf("Negative balance mismatch, want %v, got %v", utils.ExpiredValue{}, nb)
}
}
}
posExp, negExp := utils.Fixed64(1000), utils.Fixed64(2000)
ndb.setExpiration(posExp, negExp)
if pos, neg := ndb.getExpiration(); pos != posExp || neg != negExp {
t.Fatalf("Expiration mismatch, want %v / %v, got %v / %v", posExp, negExp, pos, neg)
}
/* curBalance := currencyBalance{typ: "ETH", amount: 10000}
ndb.setCurrencyBalance(enode.ID{0x01, 0x02}, curBalance)
if got := ndb.getCurrencyBalance(enode.ID{0x01, 0x02}); !reflect.DeepEqual(got, curBalance) {
t.Fatalf("Currency balance mismatch, want %v, got %v", curBalance, got)
}*/
}
func TestNodeDBExpiration(t *testing.T) {
var (
iterated int
done = make(chan struct{}, 1)
)
callback := func(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool {
iterated += 1
return true
}
clock := &mclock.Simulated{}
ndb := newNodeDB(rawdb.NewMemoryDatabase(), clock)
defer ndb.close()
ndb.evictCallBack = callback
ndb.cleanupHook = func() { done <- struct{}{} }
var cases = []struct {
id []byte
neg bool
balance utils.ExpiredValue
}{
{[]byte{0x01, 0x02}, false, expval(1)},
{[]byte{0x03, 0x04}, false, expval(1)},
{[]byte{0x05, 0x06}, false, expval(1)},
{[]byte{0x07, 0x08}, false, expval(1)},
{[]byte("127.0.0.1"), true, expval(1)},
{[]byte("127.0.0.2"), true, expval(1)},
{[]byte("127.0.0.3"), true, expval(1)},
{[]byte("127.0.0.4"), true, expval(1)},
}
for _, c := range cases {
ndb.setBalance(c.id, c.neg, c.balance)
}
clock.WaitForTimers(1)
clock.Run(time.Hour + time.Minute)
select {
case <-done:
case <-time.NewTimer(time.Second).C:
t.Fatalf("timeout")
}
if iterated != 8 {
t.Fatalf("Failed to evict useless balances, want %v, got %d", 8, iterated)
}
for _, c := range cases {
ndb.setBalance(c.id, c.neg, c.balance)
}
clock.WaitForTimers(1)
clock.Run(time.Hour + time.Minute)
select {
case <-done:
case <-time.NewTimer(time.Second).C:
t.Fatalf("timeout")
}
if iterated != 16 {
t.Fatalf("Failed to evict useless balances, want %v, got %d", 16, iterated)
}
}

View File

@@ -0,0 +1,502 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"math"
"reflect"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
const (
lazyQueueRefresh = time.Second * 10 // refresh period of the active queue
)
// PriorityPoolSetup contains node state flags and fields used by PriorityPool
// Note: ActiveFlag and InactiveFlag can be controlled both externally and by the pool,
// see PriorityPool description for details.
type PriorityPoolSetup struct {
// controlled by PriorityPool
ActiveFlag, InactiveFlag nodestate.Flags
CapacityField, ppNodeInfoField nodestate.Field
// external connections
updateFlag nodestate.Flags
priorityField nodestate.Field
}
// NewPriorityPoolSetup creates a new PriorityPoolSetup and initializes the fields
// and flags controlled by PriorityPool
func NewPriorityPoolSetup(setup *nodestate.Setup) PriorityPoolSetup {
return PriorityPoolSetup{
ActiveFlag: setup.NewFlag("active"),
InactiveFlag: setup.NewFlag("inactive"),
CapacityField: setup.NewField("capacity", reflect.TypeOf(uint64(0))),
ppNodeInfoField: setup.NewField("ppNodeInfo", reflect.TypeOf(&ppNodeInfo{})),
}
}
// Connect sets the fields and flags used by PriorityPool as an input
func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag nodestate.Flags) {
pps.priorityField = priorityField // should implement nodePriority
pps.updateFlag = updateFlag // triggers an immediate priority update
}
// PriorityPool handles a set of nodes where each node has a capacity (a scalar value)
// and a priority (which can change over time and can also depend on the capacity).
// A node is active if it has at least the necessary minimal amount of capacity while
// inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).
// The pool ensures that the number and total capacity of all active nodes are limited
// and the highest priority nodes are active at all times (limits can be changed
// during operation with immediate effect).
//
// When activating clients a priority bias is applied in favor of the already active
// nodes in order to avoid nodes quickly alternating between active and inactive states
// when their priorities are close to each other. The bias is specified in terms of
// duration (time) because priorities are expected to usually get lower over time and
// therefore a future minimum prediction (see EstMinPriority) should monotonously
// decrease with the specified time parameter.
// This time bias can be interpreted as minimum expected active time at the given
// capacity (if the threshold priority stays the same).
//
// Nodes in the pool always have either InactiveFlag or ActiveFlag set. A new node is
// added to the pool by externally setting InactiveFlag. PriorityPool can switch a node
// between InactiveFlag and ActiveFlag at any time. Nodes can be removed from the pool
// by externally resetting both flags. ActiveFlag should not be set externally.
//
// The highest priority nodes in "inactive" state are moved to "active" state as soon as
// the minimum capacity can be granted for them. The capacity of lower priority active
// nodes is reduced or they are demoted to "inactive" state if their priority is
// insufficient even at minimal capacity.
type PriorityPool struct {
PriorityPoolSetup
ns *nodestate.NodeStateMachine
clock mclock.Clock
lock sync.Mutex
activeQueue *prque.LazyQueue
inactiveQueue *prque.Prque
changed []*ppNodeInfo
activeCount, activeCap uint64
maxCount, maxCap uint64
minCap uint64
activeBias time.Duration
capacityStepDiv uint64
}
// nodePriority interface provides current and estimated future priorities on demand
type nodePriority interface {
// Priority should return the current priority of the node (higher is better)
Priority(now mclock.AbsTime, cap uint64) int64
// EstMinPriority should return a lower estimate for the minimum of the node priority
// value starting from the current moment until the given time. If the priority goes
// under the returned estimate before the specified moment then it is the caller's
// responsibility to signal with updateFlag.
EstMinPriority(until mclock.AbsTime, cap uint64, update bool) int64
}
// ppNodeInfo is the internal node descriptor of PriorityPool
type ppNodeInfo struct {
nodePriority nodePriority
node *enode.Node
connected bool
capacity, origCap uint64
bias time.Duration
forced, changed bool
activeIndex, inactiveIndex int
}
// NewPriorityPool creates a new PriorityPool
func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool {
pp := &PriorityPool{
ns: ns,
PriorityPoolSetup: setup,
clock: clock,
activeQueue: prque.NewLazyQueue(activeSetIndex, activePriority, activeMaxPriority, clock, lazyQueueRefresh),
inactiveQueue: prque.New(inactiveSetIndex),
minCap: minCap,
activeBias: activeBias,
capacityStepDiv: capacityStepDiv,
}
ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
if newValue != nil {
c := &ppNodeInfo{
node: node,
nodePriority: newValue.(nodePriority),
activeIndex: -1,
inactiveIndex: -1,
}
ns.SetFieldSub(node, pp.ppNodeInfoField, c)
} else {
ns.SetStateSub(node, nodestate.Flags{}, pp.ActiveFlag.Or(pp.InactiveFlag), 0)
if n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil {
pp.disconnectedNode(n)
}
ns.SetFieldSub(node, pp.CapacityField, nil)
ns.SetFieldSub(node, pp.ppNodeInfoField, nil)
}
})
ns.SubscribeState(pp.ActiveFlag.Or(pp.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
if c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil {
if oldState.IsEmpty() {
pp.connectedNode(c)
}
if newState.IsEmpty() {
pp.disconnectedNode(c)
}
}
})
ns.SubscribeState(pp.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
if !newState.IsEmpty() {
pp.updatePriority(node)
}
})
return pp
}
// RequestCapacity checks whether changing the capacity of a node to the given target
// is possible (bias is applied in favor of other active nodes if the target is higher
// than the current capacity).
// If setCap is true then it also performs the change if possible. The function returns
// the minimum priority needed to do the change and whether it is currently allowed.
// If setCap and allowed are both true then the caller can assume that the change was
// successful.
// Note: priorityField should always be set before calling RequestCapacity. If setCap
// is false then both InactiveFlag and ActiveFlag can be unset and they are not changed
// by this function call either.
// Note 2: this function should run inside a NodeStateMachine operation
func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) {
pp.lock.Lock()
pp.activeQueue.Refresh()
var updates []capUpdate
defer func() {
pp.lock.Unlock()
pp.updateFlags(updates)
}()
if targetCap < pp.minCap {
targetCap = pp.minCap
}
c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)
if c == nil {
log.Error("RequestCapacity called for unknown node", "id", node.ID())
return math.MaxInt64, false
}
var priority int64
if targetCap > c.capacity {
priority = c.nodePriority.EstMinPriority(pp.clock.Now()+mclock.AbsTime(bias), targetCap, false)
} else {
priority = c.nodePriority.Priority(pp.clock.Now(), targetCap)
}
pp.markForChange(c)
pp.setCapacity(c, targetCap)
c.forced = true
pp.activeQueue.Remove(c.activeIndex)
pp.inactiveQueue.Remove(c.inactiveIndex)
pp.activeQueue.Push(c)
minPriority = pp.enforceLimits()
// if capacity update is possible now then minPriority == math.MinInt64
// if it is not possible at all then minPriority == math.MaxInt64
allowed = priority > minPriority
updates = pp.finalizeChanges(setCap && allowed)
return
}
// SetLimits sets the maximum number and total capacity of simultaneously active nodes
func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) {
pp.lock.Lock()
pp.activeQueue.Refresh()
var updates []capUpdate
defer func() {
pp.lock.Unlock()
pp.ns.Operation(func() { pp.updateFlags(updates) })
}()
inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)
dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)
pp.maxCount, pp.maxCap = maxCount, maxCap
if dec {
pp.enforceLimits()
updates = pp.finalizeChanges(true)
}
if inc {
updates = pp.tryActivate()
}
}
// SetActiveBias sets the bias applied when trying to activate inactive nodes
func (pp *PriorityPool) SetActiveBias(bias time.Duration) {
pp.lock.Lock()
defer pp.lock.Unlock()
pp.activeBias = bias
pp.tryActivate()
}
// Active returns the number and total capacity of currently active nodes
func (pp *PriorityPool) Active() (uint64, uint64) {
pp.lock.Lock()
defer pp.lock.Unlock()
return pp.activeCount, pp.activeCap
}
// inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
func inactiveSetIndex(a interface{}, index int) {
a.(*ppNodeInfo).inactiveIndex = index
}
// activeSetIndex callback updates ppNodeInfo item index in activeQueue
func activeSetIndex(a interface{}, index int) {
a.(*ppNodeInfo).activeIndex = index
}
// invertPriority inverts a priority value. The active queue uses inverted priorities
// because the node on the top is the first to be deactivated.
func invertPriority(p int64) int64 {
if p == math.MinInt64 {
return math.MaxInt64
}
return -p
}
// activePriority callback returns actual priority of ppNodeInfo item in activeQueue
func activePriority(a interface{}, now mclock.AbsTime) int64 {
c := a.(*ppNodeInfo)
if c.forced {
return math.MinInt64
}
if c.bias == 0 {
return invertPriority(c.nodePriority.Priority(now, c.capacity))
}
return invertPriority(c.nodePriority.EstMinPriority(now+mclock.AbsTime(c.bias), c.capacity, true))
}
// activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue
func activeMaxPriority(a interface{}, until mclock.AbsTime) int64 {
c := a.(*ppNodeInfo)
if c.forced {
return math.MinInt64
}
return invertPriority(c.nodePriority.EstMinPriority(until+mclock.AbsTime(c.bias), c.capacity, false))
}
// inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue
func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 {
return p.nodePriority.Priority(pp.clock.Now(), pp.minCap)
}
// connectedNode is called when a new node has been added to the pool (InactiveFlag set)
// Note: this function should run inside a NodeStateMachine operation
func (pp *PriorityPool) connectedNode(c *ppNodeInfo) {
pp.lock.Lock()
pp.activeQueue.Refresh()
var updates []capUpdate
defer func() {
pp.lock.Unlock()
pp.updateFlags(updates)
}()
if c.connected {
return
}
c.connected = true
pp.inactiveQueue.Push(c, pp.inactivePriority(c))
updates = pp.tryActivate()
}
// disconnectedNode is called when a node has been removed from the pool (both InactiveFlag
// and ActiveFlag reset)
// Note: this function should run inside a NodeStateMachine operation
func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) {
pp.lock.Lock()
pp.activeQueue.Refresh()
var updates []capUpdate
defer func() {
pp.lock.Unlock()
pp.updateFlags(updates)
}()
if !c.connected {
return
}
c.connected = false
pp.activeQueue.Remove(c.activeIndex)
pp.inactiveQueue.Remove(c.inactiveIndex)
if c.capacity != 0 {
pp.setCapacity(c, 0)
updates = pp.tryActivate()
}
}
// markForChange internally puts a node in a temporary state that can either be reverted
// or confirmed later. This temporary state allows changing the capacity of a node and
// moving it between the active and inactive queue. ActiveFlag/InactiveFlag and
// CapacityField are not changed while the changes are still temporary.
func (pp *PriorityPool) markForChange(c *ppNodeInfo) {
if c.changed {
return
}
c.changed = true
c.origCap = c.capacity
pp.changed = append(pp.changed, c)
}
// setCapacity changes the capacity of a node and adjusts activeCap and activeCount
// accordingly. Note that this change is performed in the temporary state so it should
// be called after markForChange and before finalizeChanges.
func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) {
pp.activeCap += cap - n.capacity
if n.capacity == 0 {
pp.activeCount++
}
if cap == 0 {
pp.activeCount--
}
n.capacity = cap
}
// enforceLimits enforces active node count and total capacity limits. It returns the
// lowest active node priority. Note that this function is performed on the temporary
// internal state.
func (pp *PriorityPool) enforceLimits() int64 {
if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {
return math.MinInt64
}
var maxActivePriority int64
pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool {
c := data.(*ppNodeInfo)
pp.markForChange(c)
maxActivePriority = priority
if c.capacity == pp.minCap {
pp.setCapacity(c, 0)
} else {
sub := c.capacity / pp.capacityStepDiv
if c.capacity-sub < pp.minCap {
sub = c.capacity - pp.minCap
}
pp.setCapacity(c, c.capacity-sub)
pp.activeQueue.Push(c)
}
return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount
})
return invertPriority(maxActivePriority)
}
// finalizeChanges either commits or reverts temporary changes. The necessary capacity
// field and according flag updates are not performed here but returned in a list because
// they should be performed while the mutex is not held.
func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) {
for _, c := range pp.changed {
// always remove and push back in order to update biased/forced priority
pp.activeQueue.Remove(c.activeIndex)
pp.inactiveQueue.Remove(c.inactiveIndex)
c.bias = 0
c.forced = false
c.changed = false
if !commit {
pp.setCapacity(c, c.origCap)
}
if c.connected {
if c.capacity != 0 {
pp.activeQueue.Push(c)
} else {
pp.inactiveQueue.Push(c, pp.inactivePriority(c))
}
if c.capacity != c.origCap && commit {
updates = append(updates, capUpdate{c.node, c.origCap, c.capacity})
}
}
c.origCap = 0
}
pp.changed = nil
return
}
// capUpdate describes a CapacityField and ActiveFlag/InactiveFlag update
type capUpdate struct {
node *enode.Node
oldCap, newCap uint64
}
// updateFlags performs CapacityField and ActiveFlag/InactiveFlag updates while the
// pool mutex is not held
// Note: this function should run inside a NodeStateMachine operation
func (pp *PriorityPool) updateFlags(updates []capUpdate) {
for _, f := range updates {
if f.oldCap == 0 {
pp.ns.SetStateSub(f.node, pp.ActiveFlag, pp.InactiveFlag, 0)
}
if f.newCap == 0 {
pp.ns.SetStateSub(f.node, pp.InactiveFlag, pp.ActiveFlag, 0)
pp.ns.SetFieldSub(f.node, pp.CapacityField, nil)
} else {
pp.ns.SetFieldSub(f.node, pp.CapacityField, f.newCap)
}
}
}
// tryActivate tries to activate inactive nodes if possible
func (pp *PriorityPool) tryActivate() []capUpdate {
var commit bool
for pp.inactiveQueue.Size() > 0 {
c := pp.inactiveQueue.PopItem().(*ppNodeInfo)
pp.markForChange(c)
pp.setCapacity(c, pp.minCap)
c.bias = pp.activeBias
pp.activeQueue.Push(c)
pp.enforceLimits()
if c.capacity > 0 {
commit = true
} else {
break
}
}
return pp.finalizeChanges(commit)
}
// updatePriority gets the current priority value of the given node from the nodePriority
// interface and performs the necessary changes. It is triggered by updateFlag.
// Note: this function should run inside a NodeStateMachine operation
func (pp *PriorityPool) updatePriority(node *enode.Node) {
pp.lock.Lock()
pp.activeQueue.Refresh()
var updates []capUpdate
defer func() {
pp.lock.Unlock()
pp.updateFlags(updates)
}()
c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)
if c == nil || !c.connected {
return
}
pp.activeQueue.Remove(c.activeIndex)
pp.inactiveQueue.Remove(c.inactiveIndex)
if c.capacity != 0 {
pp.activeQueue.Push(c)
} else {
pp.inactiveQueue.Push(c, pp.inactivePriority(c))
}
updates = pp.tryActivate()
}

View File

@@ -0,0 +1,129 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"math/rand"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
var (
testSetup = &nodestate.Setup{}
ppTestClientFlag = testSetup.NewFlag("ppTestClientFlag")
ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{}))
ppUpdateFlag = testSetup.NewFlag("ppUpdateFlag")
ppTestSetup = NewPriorityPoolSetup(testSetup)
)
func init() {
ppTestSetup.Connect(ppTestClientField, ppUpdateFlag)
}
const (
testCapacityStepDiv = 100
testCapacityToleranceDiv = 10
)
type ppTestClient struct {
node *enode.Node
balance, cap uint64
}
func (c *ppTestClient) Priority(now mclock.AbsTime, cap uint64) int64 {
return int64(c.balance / cap)
}
func (c *ppTestClient) EstMinPriority(until mclock.AbsTime, cap uint64, update bool) int64 {
return int64(c.balance / cap)
}
func TestPriorityPool(t *testing.T) {
clock := &mclock.Simulated{}
ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)
ns.SubscribeField(ppTestSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
if n := ns.GetField(node, ppTestSetup.priorityField); n != nil {
c := n.(*ppTestClient)
c.cap = newValue.(uint64)
}
})
pp := NewPriorityPool(ns, ppTestSetup, clock, 100, 0, testCapacityStepDiv)
ns.Start()
pp.SetLimits(100, 1000000)
clients := make([]*ppTestClient, 100)
raise := func(c *ppTestClient) {
for {
var ok bool
ns.Operation(func() {
_, ok = pp.RequestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true)
})
if !ok {
return
}
}
}
var sumBalance uint64
check := func(c *ppTestClient) {
expCap := 1000000 * c.balance / sumBalance
capTol := expCap / testCapacityToleranceDiv
if c.cap < expCap-capTol || c.cap > expCap+capTol {
t.Errorf("Wrong node capacity (expected %d, got %d)", expCap, c.cap)
}
}
for i := range clients {
c := &ppTestClient{
node: enode.SignNull(&enr.Record{}, enode.ID{byte(i)}),
balance: 1000000000,
cap: 1000,
}
sumBalance += c.balance
clients[i] = c
ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0)
ns.SetField(c.node, ppTestSetup.priorityField, c)
ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0)
raise(c)
check(c)
}
for count := 0; count < 100; count++ {
c := clients[rand.Intn(len(clients))]
oldBalance := c.balance
c.balance = uint64(rand.Int63n(1000000000) + 1000000000)
sumBalance += c.balance - oldBalance
pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0)
pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0)
if c.balance > oldBalance {
raise(c)
} else {
for _, c := range clients {
raise(c)
}
}
for _, c := range clients {
check(c)
}
}
ns.Stop()
}