les, les/flowcontrol: improved request serving and flow control (#18230)
This change - implements concurrent LES request serving even for a single peer. - replaces the request cost estimation method with a cost table based on benchmarks which gives much more consistent results. Until now the allowed number of light peers was just a guess which probably contributed a lot to the fluctuating quality of available service. Everything related to request cost is implemented in a single object, the 'cost tracker'. It uses a fixed cost table with a global 'correction factor'. Benchmark code is included and can be run at any time to adapt costs to low-level implementation changes. - reimplements flowcontrol.ClientManager in a cleaner and more efficient way, with added capabilities: There is now control over bandwidth, which allows using the flow control parameters for client prioritization. Target utilization over 100 percent is now supported to model concurrent request processing. Total serving bandwidth is reduced during block processing to prevent database contention. - implements an RPC API for the LES servers allowing server operators to assign priority bandwidth to certain clients and change prioritized status even while the client is connected. The new API is meant for cases where server operators charge for LES using an off-protocol mechanism. - adds a unit test for the new client manager. - adds an end-to-end test using the network simulator that tests bandwidth control functions through the new API.
This commit is contained in:
committed by
Felix Lange
parent
c2b33a117f
commit
c2003ed63b
301
les/server.go
301
les/server.go
@ -14,40 +14,46 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package les implements the Light Ethereum Subprotocol.
|
||||
package les
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
const bufLimitRatio = 6000 // fixed bufLimit/MRR ratio
|
||||
|
||||
type LesServer struct {
|
||||
lesCommons
|
||||
|
||||
fcManager *flowcontrol.ClientManager // nil if our node is client only
|
||||
fcCostStats *requestCostStats
|
||||
defParams *flowcontrol.ServerParams
|
||||
costTracker *costTracker
|
||||
defParams flowcontrol.ServerParams
|
||||
lesTopics []discv5.Topic
|
||||
privateKey *ecdsa.PrivateKey
|
||||
quitSync chan struct{}
|
||||
onlyAnnounce bool
|
||||
|
||||
thcNormal, thcBlockProcessing int // serving thread count for normal operation and block processing mode
|
||||
|
||||
maxPeers int
|
||||
freeClientCap uint64
|
||||
freeClientPool *freeClientPool
|
||||
priorityClientPool *priorityClientPool
|
||||
}
|
||||
|
||||
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
@ -87,12 +93,20 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
|
||||
protocolManager: pm,
|
||||
},
|
||||
costTracker: newCostTracker(eth.ChainDb(), config),
|
||||
quitSync: quitSync,
|
||||
lesTopics: lesTopics,
|
||||
onlyAnnounce: config.OnlyAnnounce,
|
||||
}
|
||||
|
||||
logger := log.New()
|
||||
pm.server = srv
|
||||
srv.thcNormal = config.LightServ * 4 / 100
|
||||
if srv.thcNormal < 4 {
|
||||
srv.thcNormal = 4
|
||||
}
|
||||
srv.thcBlockProcessing = config.LightServ/100 + 1
|
||||
srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
|
||||
|
||||
chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
|
||||
chtV2SectionCount := chtV1SectionCount / (params.CHTFrequencyClient / params.CHTFrequencyServer)
|
||||
@ -114,23 +128,92 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
}
|
||||
|
||||
srv.chtIndexer.Start(eth.BlockChain())
|
||||
pm.server = srv
|
||||
|
||||
srv.defParams = &flowcontrol.ServerParams{
|
||||
BufLimit: 300000000,
|
||||
MinRecharge: 50000,
|
||||
}
|
||||
srv.fcManager = flowcontrol.NewClientManager(uint64(config.LightServ), 10, 1000000000)
|
||||
srv.fcCostStats = newCostStats(eth.ChainDb())
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
func (s *LesServer) APIs() []rpc.API {
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "les",
|
||||
Version: "1.0",
|
||||
Service: NewPrivateLightServerAPI(s),
|
||||
Public: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// startEventLoop starts an event handler loop that updates the recharge curve of
|
||||
// the client manager and adjusts the client pool's size according to the total
|
||||
// capacity updates coming from the client manager
|
||||
func (s *LesServer) startEventLoop() {
|
||||
s.protocolManager.wg.Add(1)
|
||||
|
||||
var processing bool
|
||||
blockProcFeed := make(chan bool, 100)
|
||||
s.protocolManager.blockchain.(*core.BlockChain).SubscribeBlockProcessingEvent(blockProcFeed)
|
||||
totalRechargeCh := make(chan uint64, 100)
|
||||
totalRecharge := s.costTracker.subscribeTotalRecharge(totalRechargeCh)
|
||||
totalCapacityCh := make(chan uint64, 100)
|
||||
updateRecharge := func() {
|
||||
if processing {
|
||||
s.protocolManager.servingQueue.setThreads(s.thcBlockProcessing)
|
||||
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})
|
||||
} else {
|
||||
s.protocolManager.servingQueue.setThreads(s.thcNormal)
|
||||
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 10, totalRecharge}, {totalRecharge, totalRecharge}})
|
||||
}
|
||||
}
|
||||
updateRecharge()
|
||||
totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)
|
||||
s.priorityClientPool.setLimits(s.maxPeers, totalCapacity)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case processing = <-blockProcFeed:
|
||||
updateRecharge()
|
||||
case totalRecharge = <-totalRechargeCh:
|
||||
updateRecharge()
|
||||
case totalCapacity = <-totalCapacityCh:
|
||||
s.priorityClientPool.setLimits(s.maxPeers, totalCapacity)
|
||||
case <-s.protocolManager.quitSync:
|
||||
s.protocolManager.wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *LesServer) Protocols() []p2p.Protocol {
|
||||
return s.makeProtocols(ServerProtocolVersions)
|
||||
}
|
||||
|
||||
// Start starts the LES server
|
||||
func (s *LesServer) Start(srvr *p2p.Server) {
|
||||
s.maxPeers = s.config.LightPeers
|
||||
totalRecharge := s.costTracker.totalRecharge()
|
||||
if s.maxPeers > 0 {
|
||||
s.freeClientCap = minCapacity //totalRecharge / uint64(s.maxPeers)
|
||||
if s.freeClientCap < minCapacity {
|
||||
s.freeClientCap = minCapacity
|
||||
}
|
||||
if s.freeClientCap > 0 {
|
||||
s.defParams = flowcontrol.ServerParams{
|
||||
BufLimit: s.freeClientCap * bufLimitRatio,
|
||||
MinRecharge: s.freeClientCap,
|
||||
}
|
||||
}
|
||||
}
|
||||
freePeers := int(totalRecharge / s.freeClientCap)
|
||||
if freePeers < s.maxPeers {
|
||||
log.Warn("Light peer count limited", "specified", s.maxPeers, "allowed", freePeers)
|
||||
}
|
||||
|
||||
s.freeClientPool = newFreeClientPool(s.chainDb, s.freeClientCap, 10000, mclock.System{}, func(id string) { go s.protocolManager.removePeer(id) })
|
||||
s.priorityClientPool = newPriorityClientPool(s.freeClientCap, s.protocolManager.peers, s.freeClientPool)
|
||||
|
||||
s.protocolManager.peers.notify(s.priorityClientPool)
|
||||
s.startEventLoop()
|
||||
s.protocolManager.Start(s.config.LightPeers)
|
||||
if srvr.DiscV5 != nil {
|
||||
for _, topic := range s.lesTopics {
|
||||
@ -156,185 +239,14 @@ func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
|
||||
func (s *LesServer) Stop() {
|
||||
s.chtIndexer.Close()
|
||||
// bloom trie indexer is closed by parent bloombits indexer
|
||||
s.fcCostStats.store()
|
||||
s.fcManager.Stop()
|
||||
go func() {
|
||||
<-s.protocolManager.noMorePeers
|
||||
}()
|
||||
s.freeClientPool.stop()
|
||||
s.costTracker.stop()
|
||||
s.protocolManager.Stop()
|
||||
}
|
||||
|
||||
type requestCosts struct {
|
||||
baseCost, reqCost uint64
|
||||
}
|
||||
|
||||
type requestCostTable map[uint64]*requestCosts
|
||||
|
||||
type RequestCostList []struct {
|
||||
MsgCode, BaseCost, ReqCost uint64
|
||||
}
|
||||
|
||||
func (list RequestCostList) decode() requestCostTable {
|
||||
table := make(requestCostTable)
|
||||
for _, e := range list {
|
||||
table[e.MsgCode] = &requestCosts{
|
||||
baseCost: e.BaseCost,
|
||||
reqCost: e.ReqCost,
|
||||
}
|
||||
}
|
||||
return table
|
||||
}
|
||||
|
||||
type linReg struct {
|
||||
sumX, sumY, sumXX, sumXY float64
|
||||
cnt uint64
|
||||
}
|
||||
|
||||
const linRegMaxCnt = 100000
|
||||
|
||||
func (l *linReg) add(x, y float64) {
|
||||
if l.cnt >= linRegMaxCnt {
|
||||
sub := float64(l.cnt+1-linRegMaxCnt) / linRegMaxCnt
|
||||
l.sumX -= l.sumX * sub
|
||||
l.sumY -= l.sumY * sub
|
||||
l.sumXX -= l.sumXX * sub
|
||||
l.sumXY -= l.sumXY * sub
|
||||
l.cnt = linRegMaxCnt - 1
|
||||
}
|
||||
l.cnt++
|
||||
l.sumX += x
|
||||
l.sumY += y
|
||||
l.sumXX += x * x
|
||||
l.sumXY += x * y
|
||||
}
|
||||
|
||||
func (l *linReg) calc() (b, m float64) {
|
||||
if l.cnt == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
cnt := float64(l.cnt)
|
||||
d := cnt*l.sumXX - l.sumX*l.sumX
|
||||
if d < 0.001 {
|
||||
return l.sumY / cnt, 0
|
||||
}
|
||||
m = (cnt*l.sumXY - l.sumX*l.sumY) / d
|
||||
b = (l.sumY / cnt) - (m * l.sumX / cnt)
|
||||
return b, m
|
||||
}
|
||||
|
||||
func (l *linReg) toBytes() []byte {
|
||||
var arr [40]byte
|
||||
binary.BigEndian.PutUint64(arr[0:8], math.Float64bits(l.sumX))
|
||||
binary.BigEndian.PutUint64(arr[8:16], math.Float64bits(l.sumY))
|
||||
binary.BigEndian.PutUint64(arr[16:24], math.Float64bits(l.sumXX))
|
||||
binary.BigEndian.PutUint64(arr[24:32], math.Float64bits(l.sumXY))
|
||||
binary.BigEndian.PutUint64(arr[32:40], l.cnt)
|
||||
return arr[:]
|
||||
}
|
||||
|
||||
func linRegFromBytes(data []byte) *linReg {
|
||||
if len(data) != 40 {
|
||||
return nil
|
||||
}
|
||||
l := &linReg{}
|
||||
l.sumX = math.Float64frombits(binary.BigEndian.Uint64(data[0:8]))
|
||||
l.sumY = math.Float64frombits(binary.BigEndian.Uint64(data[8:16]))
|
||||
l.sumXX = math.Float64frombits(binary.BigEndian.Uint64(data[16:24]))
|
||||
l.sumXY = math.Float64frombits(binary.BigEndian.Uint64(data[24:32]))
|
||||
l.cnt = binary.BigEndian.Uint64(data[32:40])
|
||||
return l
|
||||
}
|
||||
|
||||
type requestCostStats struct {
|
||||
lock sync.RWMutex
|
||||
db ethdb.Database
|
||||
stats map[uint64]*linReg
|
||||
}
|
||||
|
||||
type requestCostStatsRlp []struct {
|
||||
MsgCode uint64
|
||||
Data []byte
|
||||
}
|
||||
|
||||
var rcStatsKey = []byte("_requestCostStats")
|
||||
|
||||
func newCostStats(db ethdb.Database) *requestCostStats {
|
||||
stats := make(map[uint64]*linReg)
|
||||
for _, code := range reqList {
|
||||
stats[code] = &linReg{cnt: 100}
|
||||
}
|
||||
|
||||
if db != nil {
|
||||
data, err := db.Get(rcStatsKey)
|
||||
var statsRlp requestCostStatsRlp
|
||||
if err == nil {
|
||||
err = rlp.DecodeBytes(data, &statsRlp)
|
||||
}
|
||||
if err == nil {
|
||||
for _, r := range statsRlp {
|
||||
if stats[r.MsgCode] != nil {
|
||||
if l := linRegFromBytes(r.Data); l != nil {
|
||||
stats[r.MsgCode] = l
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &requestCostStats{
|
||||
db: db,
|
||||
stats: stats,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *requestCostStats) store() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
statsRlp := make(requestCostStatsRlp, len(reqList))
|
||||
for i, code := range reqList {
|
||||
statsRlp[i].MsgCode = code
|
||||
statsRlp[i].Data = s.stats[code].toBytes()
|
||||
}
|
||||
|
||||
if data, err := rlp.EncodeToBytes(statsRlp); err == nil {
|
||||
s.db.Put(rcStatsKey, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *requestCostStats) getCurrentList() RequestCostList {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
list := make(RequestCostList, len(reqList))
|
||||
for idx, code := range reqList {
|
||||
b, m := s.stats[code].calc()
|
||||
if m < 0 {
|
||||
b += m
|
||||
m = 0
|
||||
}
|
||||
if b < 0 {
|
||||
b = 0
|
||||
}
|
||||
|
||||
list[idx].MsgCode = code
|
||||
list[idx].BaseCost = uint64(b * 2)
|
||||
list[idx].ReqCost = uint64(m * 2)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func (s *requestCostStats) update(msgCode, reqCnt, cost uint64) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
c, ok := s.stats[msgCode]
|
||||
if !ok || reqCnt == 0 {
|
||||
return
|
||||
}
|
||||
c.add(float64(reqCnt), float64(cost))
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) blockLoop() {
|
||||
pm.wg.Add(1)
|
||||
headCh := make(chan core.ChainHeadEvent, 10)
|
||||
@ -371,12 +283,7 @@ func (pm *ProtocolManager) blockLoop() {
|
||||
switch p.announceType {
|
||||
|
||||
case announceTypeSimple:
|
||||
select {
|
||||
case p.announceChn <- announce:
|
||||
default:
|
||||
pm.removePeer(p.id)
|
||||
}
|
||||
|
||||
p.queueSend(func() { p.SendAnnounce(announce) })
|
||||
case announceTypeSigned:
|
||||
if !signed {
|
||||
signedAnnounce = announce
|
||||
@ -384,11 +291,7 @@ func (pm *ProtocolManager) blockLoop() {
|
||||
signed = true
|
||||
}
|
||||
|
||||
select {
|
||||
case p.announceChn <- signedAnnounce:
|
||||
default:
|
||||
pm.removePeer(p.id)
|
||||
}
|
||||
p.queueSend(func() { p.SendAnnounce(signedAnnounce) })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user