les, les/lespay/server: refactor client pool (#21236)
* les, les/lespay/server: refactor client pool * les: use ns.Operation and sub calls where needed * les: fixed tests * les: removed active/inactive logic from peerSet * les: removed active/inactive peer logic * les: fixed linter warnings * les: fixed more linter errors and added missing metrics * les: addressed comments * cmd/geth: fixed TestPriorityClient * les: simplified clientPool state machine * les/lespay/server: do not use goroutine for balance callbacks * internal/web3ext: fix addBalance required parameters * les: removed freeCapacity, always connect at minCapacity initially * les: only allow capacity change with priority status Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
This commit is contained in:
@ -17,17 +17,17 @@
|
||||
package les
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
lps "github.com/ethereum/go-ethereum/les/lespay/server"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/p2p/nodestate"
|
||||
)
|
||||
|
||||
func TestClientPoolL10C100Free(t *testing.T) {
|
||||
@ -56,29 +56,68 @@ func TestClientPoolL100C300P20(t *testing.T) {
|
||||
|
||||
const testClientPoolTicks = 100000
|
||||
|
||||
type poolTestPeer int
|
||||
|
||||
func (i poolTestPeer) ID() enode.ID {
|
||||
return enode.ID{byte(i % 256), byte(i >> 8)}
|
||||
type poolTestPeer struct {
|
||||
node *enode.Node
|
||||
index int
|
||||
disconnCh chan int
|
||||
cap uint64
|
||||
inactiveAllowed bool
|
||||
}
|
||||
|
||||
func (i poolTestPeer) freeClientId() string {
|
||||
return fmt.Sprintf("addr #%d", i)
|
||||
func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
|
||||
return &poolTestPeer{
|
||||
index: i,
|
||||
disconnCh: disconnCh,
|
||||
node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
|
||||
}
|
||||
}
|
||||
|
||||
func (i poolTestPeer) updateCapacity(uint64) {}
|
||||
|
||||
type poolTestPeerWithCap struct {
|
||||
poolTestPeer
|
||||
|
||||
cap uint64
|
||||
func (i *poolTestPeer) Node() *enode.Node {
|
||||
return i.node
|
||||
}
|
||||
|
||||
func (i *poolTestPeerWithCap) updateCapacity(cap uint64) { i.cap = cap }
|
||||
func (i *poolTestPeer) freeClientId() string {
|
||||
return fmt.Sprintf("addr #%d", i.index)
|
||||
}
|
||||
|
||||
func (i poolTestPeer) freezeClient() {}
|
||||
func (i *poolTestPeer) updateCapacity(cap uint64) {
|
||||
i.cap = cap
|
||||
}
|
||||
|
||||
func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomDisconnect bool) {
|
||||
func (i *poolTestPeer) freeze() {}
|
||||
|
||||
func (i *poolTestPeer) allowInactive() bool {
|
||||
return i.inactiveAllowed
|
||||
}
|
||||
|
||||
func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) {
|
||||
temp := pool.ns.GetField(p.node, clientField) == nil
|
||||
if temp {
|
||||
pool.ns.SetField(p.node, connAddressField, p.freeClientId())
|
||||
}
|
||||
n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*lps.NodeBalance)
|
||||
pos, neg = n.GetBalance()
|
||||
if temp {
|
||||
pool.ns.SetField(p.node, connAddressField, nil)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func addBalance(pool *clientPool, id enode.ID, amount int64) {
|
||||
pool.forClients([]enode.ID{id}, func(c *clientInfo) {
|
||||
c.balance.AddBalance(amount)
|
||||
})
|
||||
}
|
||||
|
||||
func checkDiff(a, b uint64) bool {
|
||||
maxDiff := (a + b) / 2000
|
||||
if maxDiff < 1 {
|
||||
maxDiff = 1
|
||||
}
|
||||
return a > b+maxDiff || b > a+maxDiff
|
||||
}
|
||||
|
||||
func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
var (
|
||||
clock mclock.Simulated
|
||||
@ -89,15 +128,15 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
|
||||
disconnFn = func(id enode.ID) {
|
||||
disconnCh <- int(id[0]) + int(id[1])<<8
|
||||
}
|
||||
pool = newClientPool(db, 1, &clock, disconnFn)
|
||||
pool = newClientPool(db, 1, 0, &clock, disconnFn)
|
||||
)
|
||||
pool.setConnectedBias(0)
|
||||
pool.setLimits(connLimit, uint64(connLimit))
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
|
||||
pool.setLimits(activeLimit, uint64(activeLimit))
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
// pool should accept new peers up to its connected limit
|
||||
for i := 0; i < connLimit; i++ {
|
||||
if pool.connect(poolTestPeer(i), 0) {
|
||||
for i := 0; i < activeLimit; i++ {
|
||||
if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
|
||||
connected[i] = true
|
||||
} else {
|
||||
t.Fatalf("Test peer #%d rejected", i)
|
||||
@ -111,28 +150,30 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
|
||||
// give a positive balance to some of the peers
|
||||
amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
|
||||
for i := 0; i < paidCount; i++ {
|
||||
pool.addBalance(poolTestPeer(i).ID(), amount, "")
|
||||
addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
|
||||
}
|
||||
}
|
||||
|
||||
i := rand.Intn(clientCount)
|
||||
if connected[i] {
|
||||
if randomDisconnect {
|
||||
pool.disconnect(poolTestPeer(i))
|
||||
pool.disconnect(newPoolTestPeer(i, disconnCh))
|
||||
connected[i] = false
|
||||
connTicks[i] += tickCounter
|
||||
}
|
||||
} else {
|
||||
if pool.connect(poolTestPeer(i), 0) {
|
||||
if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
|
||||
connected[i] = true
|
||||
connTicks[i] -= tickCounter
|
||||
} else {
|
||||
pool.disconnect(newPoolTestPeer(i, disconnCh))
|
||||
}
|
||||
}
|
||||
pollDisconnects:
|
||||
for {
|
||||
select {
|
||||
case i := <-disconnCh:
|
||||
pool.disconnect(poolTestPeer(i))
|
||||
pool.disconnect(newPoolTestPeer(i, disconnCh))
|
||||
if connected[i] {
|
||||
connTicks[i] += tickCounter
|
||||
connected[i] = false
|
||||
@ -143,10 +184,10 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
|
||||
}
|
||||
}
|
||||
|
||||
expTicks := testClientPoolTicks/2*connLimit/clientCount + testClientPoolTicks/2*(connLimit-paidCount)/(clientCount-paidCount)
|
||||
expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
|
||||
expMin := expTicks - expTicks/5
|
||||
expMax := expTicks + expTicks/5
|
||||
paidTicks := testClientPoolTicks/2*connLimit/clientCount + testClientPoolTicks/2
|
||||
paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
|
||||
paidMin := paidTicks - paidTicks/5
|
||||
paidMax := paidTicks + paidTicks/5
|
||||
|
||||
@ -167,22 +208,39 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
|
||||
pool.stop()
|
||||
}
|
||||
|
||||
func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
|
||||
if cap, _ := pool.connect(p); cap == 0 {
|
||||
if expSuccess {
|
||||
t.Fatalf("Failed to connect paid client")
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil {
|
||||
if expSuccess {
|
||||
t.Fatalf("Failed to raise capacity of paid client")
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
if !expSuccess {
|
||||
t.Fatalf("Should reject high capacity paid client")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectPaidClient(t *testing.T) {
|
||||
var (
|
||||
clock mclock.Simulated
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
)
|
||||
pool := newClientPool(db, 1, &clock, nil)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10))
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
// Add balance for an external client and mark it as paid client
|
||||
pool.addBalance(poolTestPeer(0).ID(), 1000, "")
|
||||
|
||||
if !pool.connect(poolTestPeer(0), 10) {
|
||||
t.Fatalf("Failed to connect paid client")
|
||||
}
|
||||
addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
|
||||
testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
|
||||
}
|
||||
|
||||
func TestConnectPaidClientToSmallPool(t *testing.T) {
|
||||
@ -190,18 +248,16 @@ func TestConnectPaidClientToSmallPool(t *testing.T) {
|
||||
clock mclock.Simulated
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
)
|
||||
pool := newClientPool(db, 1, &clock, nil)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
// Add balance for an external client and mark it as paid client
|
||||
pool.addBalance(poolTestPeer(0).ID(), 1000, "")
|
||||
addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
|
||||
|
||||
// Connect a fat paid client to pool, should reject it.
|
||||
if pool.connect(poolTestPeer(0), 100) {
|
||||
t.Fatalf("Connected fat paid client, should reject it")
|
||||
}
|
||||
testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
|
||||
}
|
||||
|
||||
func TestConnectPaidClientToFullPool(t *testing.T) {
|
||||
@ -210,23 +266,23 @@ func TestConnectPaidClientToFullPool(t *testing.T) {
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
)
|
||||
removeFn := func(enode.ID) {} // Noop
|
||||
pool := newClientPool(db, 1, &clock, removeFn)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.addBalance(poolTestPeer(i).ID(), 1000000000, "")
|
||||
pool.connect(poolTestPeer(i), 1)
|
||||
addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
|
||||
pool.connect(newPoolTestPeer(i, nil))
|
||||
}
|
||||
pool.addBalance(poolTestPeer(11).ID(), 1000, "") // Add low balance to new paid client
|
||||
if pool.connect(poolTestPeer(11), 1) {
|
||||
addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
|
||||
if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
|
||||
t.Fatalf("Low balance paid client should be rejected")
|
||||
}
|
||||
clock.Run(time.Second)
|
||||
pool.addBalance(poolTestPeer(12).ID(), 1000000000*60*3, "") // Add high balance to new paid client
|
||||
if !pool.connect(poolTestPeer(12), 1) {
|
||||
t.Fatalf("High balance paid client should be accpected")
|
||||
addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
|
||||
if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 {
|
||||
t.Fatalf("High balance paid client should be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,23 +290,25 @@ func TestPaidClientKickedOut(t *testing.T) {
|
||||
var (
|
||||
clock mclock.Simulated
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
kickedCh = make(chan int, 1)
|
||||
kickedCh = make(chan int, 100)
|
||||
)
|
||||
removeFn := func(id enode.ID) { kickedCh <- int(id[0]) }
|
||||
pool := newClientPool(db, 1, &clock, removeFn)
|
||||
removeFn := func(id enode.ID) {
|
||||
kickedCh <- int(id[0])
|
||||
}
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
|
||||
pool.bt.SetExpirationTCs(0, 0)
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.addBalance(poolTestPeer(i).ID(), 1000000000, "") // 1 second allowance
|
||||
pool.connect(poolTestPeer(i), 1)
|
||||
addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
|
||||
pool.connect(newPoolTestPeer(i, kickedCh))
|
||||
clock.Run(time.Millisecond)
|
||||
}
|
||||
clock.Run(time.Second)
|
||||
clock.Run(defaultConnectedBias)
|
||||
if !pool.connect(poolTestPeer(11), 0) {
|
||||
t.Fatalf("Free client should be accectped")
|
||||
clock.Run(defaultConnectedBias + time.Second*11)
|
||||
if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 {
|
||||
t.Fatalf("Free client should be accepted")
|
||||
}
|
||||
select {
|
||||
case id := <-kickedCh:
|
||||
@ -267,13 +325,14 @@ func TestConnectFreeClient(t *testing.T) {
|
||||
clock mclock.Simulated
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
)
|
||||
pool := newClientPool(db, 1, &clock, nil)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10))
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
if !pool.connect(poolTestPeer(0), 10) {
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 {
|
||||
t.Fatalf("Failed to connect free client")
|
||||
}
|
||||
testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
|
||||
}
|
||||
|
||||
func TestConnectFreeClientToFullPool(t *testing.T) {
|
||||
@ -282,24 +341,24 @@ func TestConnectFreeClientToFullPool(t *testing.T) {
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
)
|
||||
removeFn := func(enode.ID) {} // Noop
|
||||
pool := newClientPool(db, 1, &clock, removeFn)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.connect(poolTestPeer(i), 1)
|
||||
pool.connect(newPoolTestPeer(i, nil))
|
||||
}
|
||||
if pool.connect(poolTestPeer(11), 1) {
|
||||
if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
|
||||
t.Fatalf("New free client should be rejected")
|
||||
}
|
||||
clock.Run(time.Minute)
|
||||
if pool.connect(poolTestPeer(12), 1) {
|
||||
if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 {
|
||||
t.Fatalf("New free client should be rejected")
|
||||
}
|
||||
clock.Run(time.Millisecond)
|
||||
clock.Run(4 * time.Minute)
|
||||
if !pool.connect(poolTestPeer(13), 1) {
|
||||
if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 {
|
||||
t.Fatalf("Old client connects more than 5min should be kicked")
|
||||
}
|
||||
}
|
||||
@ -308,24 +367,30 @@ func TestFreeClientKickedOut(t *testing.T) {
|
||||
var (
|
||||
clock mclock.Simulated
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
kicked = make(chan int, 10)
|
||||
kicked = make(chan int, 100)
|
||||
)
|
||||
removeFn := func(id enode.ID) { kicked <- int(id[0]) }
|
||||
pool := newClientPool(db, 1, &clock, removeFn)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.connect(poolTestPeer(i), 1)
|
||||
pool.connect(newPoolTestPeer(i, kicked))
|
||||
clock.Run(time.Millisecond)
|
||||
}
|
||||
if pool.connect(poolTestPeer(10), 1) {
|
||||
if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 {
|
||||
t.Fatalf("New free client should be rejected")
|
||||
}
|
||||
select {
|
||||
case <-kicked:
|
||||
case <-time.NewTimer(time.Second).C:
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
pool.disconnect(newPoolTestPeer(10, kicked))
|
||||
clock.Run(5 * time.Minute)
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.connect(poolTestPeer(i+10), 1)
|
||||
pool.connect(newPoolTestPeer(i+10, kicked))
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
@ -346,19 +411,19 @@ func TestPositiveBalanceCalculation(t *testing.T) {
|
||||
kicked = make(chan int, 10)
|
||||
)
|
||||
removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
|
||||
pool := newClientPool(db, 1, &clock, removeFn)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
pool.addBalance(poolTestPeer(0).ID(), int64(time.Minute*3), "")
|
||||
pool.connect(poolTestPeer(0), 10)
|
||||
addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
|
||||
testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
|
||||
clock.Run(time.Minute)
|
||||
|
||||
pool.disconnect(poolTestPeer(0))
|
||||
pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID())
|
||||
if pb.value != uint64(time.Minute*2) {
|
||||
t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb.value)
|
||||
pool.disconnect(newPoolTestPeer(0, kicked))
|
||||
pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
|
||||
if checkDiff(pb, uint64(time.Minute*2)) {
|
||||
t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
|
||||
}
|
||||
}
|
||||
|
||||
@ -369,18 +434,16 @@ func TestDowngradePriorityClient(t *testing.T) {
|
||||
kicked = make(chan int, 10)
|
||||
)
|
||||
removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
|
||||
pool := newClientPool(db, 1, &clock, removeFn)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
p := &poolTestPeerWithCap{
|
||||
poolTestPeer: poolTestPeer(0),
|
||||
}
|
||||
pool.addBalance(p.ID(), int64(time.Minute), "")
|
||||
pool.connect(p, 10)
|
||||
p := newPoolTestPeer(0, kicked)
|
||||
addBalance(pool, p.node.ID(), int64(time.Minute))
|
||||
testPriorityConnect(t, pool, p, 10, true)
|
||||
if p.cap != 10 {
|
||||
t.Fatalf("The capcacity of priority peer hasn't been updated, got: %d", p.cap)
|
||||
t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
|
||||
}
|
||||
|
||||
clock.Run(time.Minute) // All positive balance should be used up.
|
||||
@ -388,156 +451,131 @@ func TestDowngradePriorityClient(t *testing.T) {
|
||||
if p.cap != 1 {
|
||||
t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
|
||||
}
|
||||
pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID())
|
||||
if pb.value != 0 {
|
||||
t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb.value)
|
||||
pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
|
||||
if pb != 0 {
|
||||
t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
|
||||
}
|
||||
|
||||
pool.addBalance(poolTestPeer(0).ID(), int64(time.Minute), "")
|
||||
pb = pool.ndb.getOrNewPB(poolTestPeer(0).ID())
|
||||
if pb.value != uint64(time.Minute) {
|
||||
t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb.value)
|
||||
addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
|
||||
pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
|
||||
if checkDiff(pb, uint64(time.Minute)) {
|
||||
t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNegativeBalanceCalculation(t *testing.T) {
|
||||
var (
|
||||
clock mclock.Simulated
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
kicked = make(chan int, 10)
|
||||
clock mclock.Simulated
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
)
|
||||
removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
|
||||
pool := newClientPool(db, 1, &clock, removeFn)
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
|
||||
defer pool.stop()
|
||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.connect(poolTestPeer(i), 1)
|
||||
pool.connect(newPoolTestPeer(i, nil))
|
||||
}
|
||||
clock.Run(time.Second)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.disconnect(poolTestPeer(i))
|
||||
nb := pool.ndb.getOrNewNB(poolTestPeer(i).freeClientId())
|
||||
if nb.logValue != 0 {
|
||||
pool.disconnect(newPoolTestPeer(i, nil))
|
||||
_, nb := getBalance(pool, newPoolTestPeer(i, nil))
|
||||
if nb != 0 {
|
||||
t.Fatalf("Short connection shouldn't be recorded")
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.connect(poolTestPeer(i), 1)
|
||||
pool.connect(newPoolTestPeer(i, nil))
|
||||
}
|
||||
clock.Run(time.Minute)
|
||||
for i := 0; i < 10; i++ {
|
||||
pool.disconnect(poolTestPeer(i))
|
||||
nb := pool.ndb.getOrNewNB(poolTestPeer(i).freeClientId())
|
||||
nb.logValue -= pool.logOffset(clock.Now())
|
||||
nb.logValue /= fixedPointMultiplier
|
||||
if nb.logValue != int64(math.Log(float64(time.Minute/time.Second))) {
|
||||
t.Fatalf("Negative balance mismatch, want %v, got %v", int64(math.Log(float64(time.Minute/time.Second))), nb.logValue)
|
||||
pool.disconnect(newPoolTestPeer(i, nil))
|
||||
_, nb := getBalance(pool, newPoolTestPeer(i, nil))
|
||||
if checkDiff(nb, uint64(time.Minute)/1000) {
|
||||
t.Fatalf("Negative balance mismatch, want %v, got %v", uint64(time.Minute)/1000, nb)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeDB(t *testing.T) {
|
||||
ndb := newNodeDB(rawdb.NewMemoryDatabase(), mclock.System{})
|
||||
defer ndb.close()
|
||||
|
||||
if !bytes.Equal(ndb.verbuf[:], []byte{0x00, nodeDBVersion}) {
|
||||
t.Fatalf("version buffer mismatch, want %v, got %v", []byte{0x00, nodeDBVersion}, ndb.verbuf)
|
||||
}
|
||||
var cases = []struct {
|
||||
id enode.ID
|
||||
ip string
|
||||
balance interface{}
|
||||
positive bool
|
||||
}{
|
||||
{enode.ID{0x00, 0x01, 0x02}, "", posBalance{value: 100}, true},
|
||||
{enode.ID{0x00, 0x01, 0x02}, "", posBalance{value: 200}, true},
|
||||
{enode.ID{}, "127.0.0.1", negBalance{logValue: 10}, false},
|
||||
{enode.ID{}, "127.0.0.1", negBalance{logValue: 20}, false},
|
||||
}
|
||||
for _, c := range cases {
|
||||
if c.positive {
|
||||
ndb.setPB(c.id, c.balance.(posBalance))
|
||||
if pb := ndb.getOrNewPB(c.id); !reflect.DeepEqual(pb, c.balance.(posBalance)) {
|
||||
t.Fatalf("Positive balance mismatch, want %v, got %v", c.balance.(posBalance), pb)
|
||||
}
|
||||
} else {
|
||||
ndb.setNB(c.ip, c.balance.(negBalance))
|
||||
if nb := ndb.getOrNewNB(c.ip); !reflect.DeepEqual(nb, c.balance.(negBalance)) {
|
||||
t.Fatalf("Negative balance mismatch, want %v, got %v", c.balance.(negBalance), nb)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
if c.positive {
|
||||
ndb.delPB(c.id)
|
||||
if pb := ndb.getOrNewPB(c.id); !reflect.DeepEqual(pb, posBalance{}) {
|
||||
t.Fatalf("Positive balance mismatch, want %v, got %v", posBalance{}, pb)
|
||||
}
|
||||
} else {
|
||||
ndb.delNB(c.ip)
|
||||
if nb := ndb.getOrNewNB(c.ip); !reflect.DeepEqual(nb, negBalance{}) {
|
||||
t.Fatalf("Negative balance mismatch, want %v, got %v", negBalance{}, nb)
|
||||
}
|
||||
}
|
||||
}
|
||||
ndb.setCumulativeTime(100)
|
||||
if ndb.getCumulativeTime() != 100 {
|
||||
t.Fatalf("Cumulative time mismatch, want %v, got %v", 100, ndb.getCumulativeTime())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeDBExpiration(t *testing.T) {
|
||||
func TestInactiveClient(t *testing.T) {
|
||||
var (
|
||||
iterated int
|
||||
done = make(chan struct{}, 1)
|
||||
clock mclock.Simulated
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
)
|
||||
callback := func(now mclock.AbsTime, b negBalance) bool {
|
||||
iterated += 1
|
||||
return true
|
||||
}
|
||||
clock := &mclock.Simulated{}
|
||||
ndb := newNodeDB(rawdb.NewMemoryDatabase(), clock)
|
||||
defer ndb.close()
|
||||
ndb.nbEvictCallBack = callback
|
||||
ndb.cleanupHook = func() { done <- struct{}{} }
|
||||
pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
|
||||
defer pool.stop()
|
||||
pool.setLimits(2, uint64(2))
|
||||
|
||||
var cases = []struct {
|
||||
ip string
|
||||
balance negBalance
|
||||
}{
|
||||
{"127.0.0.1", negBalance{logValue: 1}},
|
||||
{"127.0.0.2", negBalance{logValue: 1}},
|
||||
{"127.0.0.3", negBalance{logValue: 1}},
|
||||
{"127.0.0.4", negBalance{logValue: 1}},
|
||||
p1 := newPoolTestPeer(1, nil)
|
||||
p1.inactiveAllowed = true
|
||||
p2 := newPoolTestPeer(2, nil)
|
||||
p2.inactiveAllowed = true
|
||||
p3 := newPoolTestPeer(3, nil)
|
||||
p3.inactiveAllowed = true
|
||||
addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
|
||||
addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
|
||||
// p1: 1000 p2: 0 p3: 2000
|
||||
p1.cap, _ = pool.connect(p1)
|
||||
if p1.cap != 1 {
|
||||
t.Fatalf("Failed to connect peer #1")
|
||||
}
|
||||
for _, c := range cases {
|
||||
ndb.setNB(c.ip, c.balance)
|
||||
p2.cap, _ = pool.connect(p2)
|
||||
if p2.cap != 1 {
|
||||
t.Fatalf("Failed to connect peer #2")
|
||||
}
|
||||
clock.WaitForTimers(1)
|
||||
clock.Run(time.Hour + time.Minute)
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.NewTimer(time.Second).C:
|
||||
t.Fatalf("timeout")
|
||||
p3.cap, _ = pool.connect(p3)
|
||||
if p3.cap != 1 {
|
||||
t.Fatalf("Failed to connect peer #3")
|
||||
}
|
||||
if iterated != 4 {
|
||||
t.Fatalf("Failed to evict useless negative balances, want %v, got %d", 4, iterated)
|
||||
if p2.cap != 0 {
|
||||
t.Fatalf("Failed to deactivate peer #2")
|
||||
}
|
||||
clock.WaitForTimers(1)
|
||||
for _, c := range cases {
|
||||
ndb.setNB(c.ip, c.balance)
|
||||
addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
|
||||
// p1: 1000 p2: 3000 p3: 2000
|
||||
if p2.cap != 1 {
|
||||
t.Fatalf("Failed to activate peer #2")
|
||||
}
|
||||
clock.Run(time.Hour + time.Minute)
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.NewTimer(time.Second).C:
|
||||
t.Fatalf("timeout")
|
||||
if p1.cap != 0 {
|
||||
t.Fatalf("Failed to deactivate peer #1")
|
||||
}
|
||||
if iterated != 8 {
|
||||
t.Fatalf("Failed to evict useless negative balances, want %v, got %d", 4, iterated)
|
||||
addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
|
||||
// p1: 1000 p2: 500 p3: 2000
|
||||
if p1.cap != 1 {
|
||||
t.Fatalf("Failed to activate peer #1")
|
||||
}
|
||||
if p2.cap != 0 {
|
||||
t.Fatalf("Failed to deactivate peer #2")
|
||||
}
|
||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
|
||||
p4 := newPoolTestPeer(4, nil)
|
||||
addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
|
||||
// p1: 1000 p2: 500 p3: 2000 p4: 1500
|
||||
p4.cap, _ = pool.connect(p4)
|
||||
if p4.cap != 1 {
|
||||
t.Fatalf("Failed to activate peer #4")
|
||||
}
|
||||
if p1.cap != 0 {
|
||||
t.Fatalf("Failed to deactivate peer #1")
|
||||
}
|
||||
clock.Run(time.Second * 600)
|
||||
// manually trigger a check to avoid a long real-time wait
|
||||
pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0)
|
||||
pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0)
|
||||
// p1: 1000 p2: 500 p3: 2000 p4: 900
|
||||
if p1.cap != 1 {
|
||||
t.Fatalf("Failed to activate peer #1")
|
||||
}
|
||||
if p4.cap != 0 {
|
||||
t.Fatalf("Failed to deactivate peer #4")
|
||||
}
|
||||
pool.disconnect(p2)
|
||||
pool.disconnect(p4)
|
||||
addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
|
||||
if p1.cap != 1 {
|
||||
t.Fatalf("Should not deactivate peer #1")
|
||||
}
|
||||
if p2.cap != 0 {
|
||||
t.Fatalf("Should not activate peer #2")
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user