committed by
Felföldi Zsolt
parent
4aee0d1994
commit
2ed729d38e
360
les/server.go
360
les/server.go
@@ -18,15 +18,11 @@ package les
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
@@ -38,80 +34,94 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
const bufLimitRatio = 6000 // fixed bufLimit/MRR ratio
|
||||
|
||||
type LesServer struct {
|
||||
lesCommons
|
||||
|
||||
archiveMode bool // Flag whether the ethereum node runs in archive mode.
|
||||
handler *serverHandler
|
||||
lesTopics []discv5.Topic
|
||||
privateKey *ecdsa.PrivateKey
|
||||
|
||||
fcManager *flowcontrol.ClientManager // nil if our node is client only
|
||||
// Flow control and capacity management
|
||||
fcManager *flowcontrol.ClientManager
|
||||
costTracker *costTracker
|
||||
testCost uint64
|
||||
defParams flowcontrol.ServerParams
|
||||
lesTopics []discv5.Topic
|
||||
privateKey *ecdsa.PrivateKey
|
||||
quitSync chan struct{}
|
||||
onlyAnnounce bool
|
||||
servingQueue *servingQueue
|
||||
clientPool *clientPool
|
||||
|
||||
thcNormal, thcBlockProcessing int // serving thread count for normal operation and block processing mode
|
||||
|
||||
maxPeers int
|
||||
minCapacity, maxCapacity, freeClientCap uint64
|
||||
clientPool *clientPool
|
||||
freeCapacity uint64 // The minimal client capacity used for free client.
|
||||
threadsIdle int // Request serving threads count when system is idle.
|
||||
threadsBusy int // Request serving threads count when system is busy(block insertion).
|
||||
}
|
||||
|
||||
func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
// Collect les protocol version information supported by local node.
|
||||
lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
|
||||
for i, pv := range AdvertiseProtocolVersions {
|
||||
lesTopics[i] = lesTopic(e.BlockChain().Genesis().Hash(), pv)
|
||||
}
|
||||
quitSync := make(chan struct{})
|
||||
// Calculate the number of threads used to service the light client
|
||||
// requests based on the user-specified value.
|
||||
threads := config.LightServ * 4 / 100
|
||||
if threads < 4 {
|
||||
threads = 4
|
||||
}
|
||||
srv := &LesServer{
|
||||
lesCommons: lesCommons{
|
||||
genesis: e.BlockChain().Genesis().Hash(),
|
||||
config: config,
|
||||
chainConfig: e.BlockChain().Config(),
|
||||
iConfig: light.DefaultServerIndexerConfig,
|
||||
chainDb: e.ChainDb(),
|
||||
peers: newPeerSet(),
|
||||
chainReader: e.BlockChain(),
|
||||
chtIndexer: light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
|
||||
bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
|
||||
closeCh: make(chan struct{}),
|
||||
},
|
||||
archiveMode: e.ArchiveMode(),
|
||||
quitSync: quitSync,
|
||||
lesTopics: lesTopics,
|
||||
onlyAnnounce: config.UltraLightOnlyAnnounce,
|
||||
fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}),
|
||||
servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100),
|
||||
threadsBusy: config.LightServ/100 + 1,
|
||||
threadsIdle: threads,
|
||||
}
|
||||
srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)
|
||||
|
||||
logger := log.New()
|
||||
srv.thcNormal = config.LightServ * 4 / 100
|
||||
if srv.thcNormal < 4 {
|
||||
srv.thcNormal = 4
|
||||
}
|
||||
srv.thcBlockProcessing = config.LightServ/100 + 1
|
||||
srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
|
||||
|
||||
checkpoint := srv.latestLocalCheckpoint()
|
||||
if !checkpoint.Empty() {
|
||||
logger.Info("Loaded latest checkpoint", "section", checkpoint.SectionIndex, "head", checkpoint.SectionHead,
|
||||
"chtroot", checkpoint.CHTRoot, "bloomroot", checkpoint.BloomRoot)
|
||||
}
|
||||
|
||||
srv.chtIndexer.Start(e.BlockChain())
|
||||
srv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), e.Synced)
|
||||
srv.costTracker, srv.freeCapacity = newCostTracker(e.ChainDb(), config)
|
||||
|
||||
// Set up checkpoint oracle.
|
||||
oracle := config.CheckpointOracle
|
||||
if oracle == nil {
|
||||
oracle = params.CheckpointOracles[e.BlockChain().Genesis().Hash()]
|
||||
}
|
||||
registrar := newCheckpointOracle(oracle, srv.getLocalCheckpoint)
|
||||
// TODO(rjl493456442) Checkpoint is useless for les server, separate handler for client and server.
|
||||
pm, err := NewProtocolManager(e.BlockChain().Config(), nil, light.DefaultServerIndexerConfig, config.UltraLightServers, config.UltraLightFraction, false, config.NetworkId, e.EventMux(), newPeerSet(), e.BlockChain(), e.TxPool(), e.ChainDb(), nil, nil, registrar, quitSync, new(sync.WaitGroup), e.Synced)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv.protocolManager = pm
|
||||
pm.servingQueue = newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100)
|
||||
pm.server = srv
|
||||
srv.oracle = newCheckpointOracle(oracle, srv.localCheckpoint)
|
||||
|
||||
// Initialize server capacity management fields.
|
||||
srv.defParams = flowcontrol.ServerParams{
|
||||
BufLimit: srv.freeCapacity * bufLimitRatio,
|
||||
MinRecharge: srv.freeCapacity,
|
||||
}
|
||||
// LES flow control tries to more or less guarantee the possibility for the
|
||||
// clients to send a certain amount of requests at any time and get a quick
|
||||
// response. Most of the clients want this guarantee but don't actually need
|
||||
// to send requests most of the time. Our goal is to serve as many clients as
|
||||
// possible while the actually used server capacity does not exceed the limits
|
||||
totalRecharge := srv.costTracker.totalRecharge()
|
||||
maxCapacity := srv.freeCapacity * uint64(srv.config.LightPeers)
|
||||
if totalRecharge > maxCapacity {
|
||||
maxCapacity = totalRecharge
|
||||
}
|
||||
srv.fcManager.SetCapacityLimits(srv.freeCapacity, maxCapacity, srv.freeCapacity*2)
|
||||
|
||||
srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, 10000, mclock.System{}, func(id enode.ID) { go srv.peers.Unregister(peerIdToString(id)) })
|
||||
srv.peers.notify(srv.clientPool)
|
||||
|
||||
checkpoint := srv.latestLocalCheckpoint()
|
||||
if !checkpoint.Empty() {
|
||||
log.Info("Loaded latest checkpoint", "section", checkpoint.SectionIndex, "head", checkpoint.SectionHead,
|
||||
"chtroot", checkpoint.CHTRoot, "bloomroot", checkpoint.BloomRoot)
|
||||
}
|
||||
srv.chtIndexer.Start(e.BlockChain())
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
@@ -120,102 +130,29 @@ func (s *LesServer) APIs() []rpc.API {
|
||||
{
|
||||
Namespace: "les",
|
||||
Version: "1.0",
|
||||
Service: NewPrivateLightAPI(&s.lesCommons, s.protocolManager.reg),
|
||||
Service: NewPrivateLightAPI(&s.lesCommons),
|
||||
Public: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// startEventLoop starts an event handler loop that updates the recharge curve of
|
||||
// the client manager and adjusts the client pool's size according to the total
|
||||
// capacity updates coming from the client manager
|
||||
func (s *LesServer) startEventLoop() {
|
||||
s.protocolManager.wg.Add(1)
|
||||
|
||||
var (
|
||||
processing, procLast bool
|
||||
procStarted time.Time
|
||||
)
|
||||
blockProcFeed := make(chan bool, 100)
|
||||
s.protocolManager.blockchain.(*core.BlockChain).SubscribeBlockProcessingEvent(blockProcFeed)
|
||||
totalRechargeCh := make(chan uint64, 100)
|
||||
totalRecharge := s.costTracker.subscribeTotalRecharge(totalRechargeCh)
|
||||
totalCapacityCh := make(chan uint64, 100)
|
||||
updateRecharge := func() {
|
||||
if processing {
|
||||
if !procLast {
|
||||
procStarted = time.Now()
|
||||
}
|
||||
s.protocolManager.servingQueue.setThreads(s.thcBlockProcessing)
|
||||
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})
|
||||
} else {
|
||||
if procLast {
|
||||
blockProcessingTimer.UpdateSince(procStarted)
|
||||
}
|
||||
s.protocolManager.servingQueue.setThreads(s.thcNormal)
|
||||
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 16, totalRecharge / 2}, {totalRecharge / 2, totalRecharge / 2}, {totalRecharge, totalRecharge}})
|
||||
}
|
||||
procLast = processing
|
||||
}
|
||||
updateRecharge()
|
||||
totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)
|
||||
s.clientPool.setLimits(s.maxPeers, totalCapacity)
|
||||
|
||||
var maxFreePeers uint64
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case processing = <-blockProcFeed:
|
||||
updateRecharge()
|
||||
case totalRecharge = <-totalRechargeCh:
|
||||
updateRecharge()
|
||||
case totalCapacity = <-totalCapacityCh:
|
||||
totalCapacityGauge.Update(int64(totalCapacity))
|
||||
newFreePeers := totalCapacity / s.freeClientCap
|
||||
if newFreePeers < maxFreePeers && newFreePeers < uint64(s.maxPeers) {
|
||||
log.Warn("Reduced total capacity", "maxFreePeers", newFreePeers)
|
||||
}
|
||||
maxFreePeers = newFreePeers
|
||||
s.clientPool.setLimits(s.maxPeers, totalCapacity)
|
||||
case <-s.protocolManager.quitSync:
|
||||
s.protocolManager.wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *LesServer) Protocols() []p2p.Protocol {
|
||||
return s.makeProtocols(ServerProtocolVersions)
|
||||
return s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
|
||||
if p := s.peers.Peer(peerIdToString(id)); p != nil {
|
||||
return p.Info()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Start starts the LES server
|
||||
func (s *LesServer) Start(srvr *p2p.Server) {
|
||||
s.maxPeers = s.config.LightPeers
|
||||
totalRecharge := s.costTracker.totalRecharge()
|
||||
if s.maxPeers > 0 {
|
||||
s.freeClientCap = s.minCapacity //totalRecharge / uint64(s.maxPeers)
|
||||
if s.freeClientCap < s.minCapacity {
|
||||
s.freeClientCap = s.minCapacity
|
||||
}
|
||||
if s.freeClientCap > 0 {
|
||||
s.defParams = flowcontrol.ServerParams{
|
||||
BufLimit: s.freeClientCap * bufLimitRatio,
|
||||
MinRecharge: s.freeClientCap,
|
||||
}
|
||||
}
|
||||
}
|
||||
s.privateKey = srvr.PrivateKey
|
||||
s.handler.start()
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.capacityManagement()
|
||||
|
||||
s.maxCapacity = s.freeClientCap * uint64(s.maxPeers)
|
||||
if totalRecharge > s.maxCapacity {
|
||||
s.maxCapacity = totalRecharge
|
||||
}
|
||||
s.fcManager.SetCapacityLimits(s.freeClientCap, s.maxCapacity, s.freeClientCap*2)
|
||||
s.clientPool = newClientPool(s.chainDb, s.freeClientCap, 10000, mclock.System{}, func(id enode.ID) { go s.protocolManager.removePeer(peerIdToString(id)) })
|
||||
s.clientPool.setPriceFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1})
|
||||
s.protocolManager.peers.notify(s.clientPool)
|
||||
s.startEventLoop()
|
||||
s.protocolManager.Start(s.config.LightPeers)
|
||||
if srvr.DiscV5 != nil {
|
||||
for _, topic := range s.lesTopics {
|
||||
topic := topic
|
||||
@@ -224,12 +161,32 @@ func (s *LesServer) Start(srvr *p2p.Server) {
|
||||
logger.Info("Starting topic registration")
|
||||
defer logger.Info("Terminated topic registration")
|
||||
|
||||
srvr.DiscV5.RegisterTopic(topic, s.quitSync)
|
||||
srvr.DiscV5.RegisterTopic(topic, s.closeCh)
|
||||
}()
|
||||
}
|
||||
}
|
||||
s.privateKey = srvr.PrivateKey
|
||||
s.protocolManager.blockLoop()
|
||||
}
|
||||
|
||||
// Stop stops the LES service
|
||||
func (s *LesServer) Stop() {
|
||||
close(s.closeCh)
|
||||
|
||||
// Disconnect existing sessions.
|
||||
// This also closes the gate for any new registrations on the peer set.
|
||||
// sessions which are already established but not added to pm.peers yet
|
||||
// will exit when they try to register.
|
||||
s.peers.Close()
|
||||
|
||||
s.fcManager.Stop()
|
||||
s.clientPool.stop()
|
||||
s.costTracker.stop()
|
||||
s.handler.stop()
|
||||
s.servingQueue.stop()
|
||||
|
||||
// Note, bloom trie indexer is closed by parent bloombits indexer.
|
||||
s.chtIndexer.Close()
|
||||
s.wg.Wait()
|
||||
log.Info("Les server stopped")
|
||||
}
|
||||
|
||||
func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
|
||||
@@ -238,78 +195,67 @@ func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
|
||||
|
||||
// SetClient sets the rpc client and starts running checkpoint contract if it is not yet watched.
|
||||
func (s *LesServer) SetContractBackend(backend bind.ContractBackend) {
|
||||
if s.protocolManager.reg != nil {
|
||||
s.protocolManager.reg.start(backend)
|
||||
if s.oracle == nil {
|
||||
return
|
||||
}
|
||||
s.oracle.start(backend)
|
||||
}
|
||||
|
||||
// capacityManagement starts an event handler loop that updates the recharge curve of
|
||||
// the client manager and adjusts the client pool's size according to the total
|
||||
// capacity updates coming from the client manager
|
||||
func (s *LesServer) capacityManagement() {
|
||||
defer s.wg.Done()
|
||||
|
||||
processCh := make(chan bool, 100)
|
||||
sub := s.handler.blockchain.SubscribeBlockProcessingEvent(processCh)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
totalRechargeCh := make(chan uint64, 100)
|
||||
totalRecharge := s.costTracker.subscribeTotalRecharge(totalRechargeCh)
|
||||
|
||||
totalCapacityCh := make(chan uint64, 100)
|
||||
totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)
|
||||
s.clientPool.setLimits(s.config.LightPeers, totalCapacity)
|
||||
|
||||
var (
|
||||
busy bool
|
||||
freePeers uint64
|
||||
blockProcess mclock.AbsTime
|
||||
)
|
||||
updateRecharge := func() {
|
||||
if busy {
|
||||
s.servingQueue.setThreads(s.threadsBusy)
|
||||
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})
|
||||
} else {
|
||||
s.servingQueue.setThreads(s.threadsIdle)
|
||||
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 10, totalRecharge}, {totalRecharge, totalRecharge}})
|
||||
}
|
||||
}
|
||||
updateRecharge()
|
||||
|
||||
for {
|
||||
select {
|
||||
case busy = <-processCh:
|
||||
if busy {
|
||||
blockProcess = mclock.Now()
|
||||
} else {
|
||||
blockProcessingTimer.Update(time.Duration(mclock.Now() - blockProcess))
|
||||
}
|
||||
updateRecharge()
|
||||
case totalRecharge = <-totalRechargeCh:
|
||||
totalRechargeGauge.Update(int64(totalRecharge))
|
||||
updateRecharge()
|
||||
case totalCapacity = <-totalCapacityCh:
|
||||
totalCapacityGauge.Update(int64(totalCapacity))
|
||||
newFreePeers := totalCapacity / s.freeCapacity
|
||||
if newFreePeers < freePeers && newFreePeers < uint64(s.config.LightPeers) {
|
||||
log.Warn("Reduced free peer connections", "from", freePeers, "to", newFreePeers)
|
||||
}
|
||||
freePeers = newFreePeers
|
||||
s.clientPool.setLimits(s.config.LightPeers, totalCapacity)
|
||||
case <-s.closeCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the LES service
|
||||
func (s *LesServer) Stop() {
|
||||
s.fcManager.Stop()
|
||||
s.chtIndexer.Close()
|
||||
// bloom trie indexer is closed by parent bloombits indexer
|
||||
go func() {
|
||||
<-s.protocolManager.noMorePeers
|
||||
}()
|
||||
s.clientPool.stop()
|
||||
s.costTracker.stop()
|
||||
s.protocolManager.Stop()
|
||||
}
|
||||
|
||||
// todo(rjl493456442) separate client and server implementation.
|
||||
func (pm *ProtocolManager) blockLoop() {
|
||||
pm.wg.Add(1)
|
||||
headCh := make(chan core.ChainHeadEvent, 10)
|
||||
headSub := pm.blockchain.SubscribeChainHeadEvent(headCh)
|
||||
go func() {
|
||||
var lastHead *types.Header
|
||||
lastBroadcastTd := common.Big0
|
||||
for {
|
||||
select {
|
||||
case ev := <-headCh:
|
||||
peers := pm.peers.AllPeers()
|
||||
if len(peers) > 0 {
|
||||
header := ev.Block.Header()
|
||||
hash := header.Hash()
|
||||
number := header.Number.Uint64()
|
||||
td := rawdb.ReadTd(pm.chainDb, hash, number)
|
||||
if td != nil && td.Cmp(lastBroadcastTd) > 0 {
|
||||
var reorg uint64
|
||||
if lastHead != nil {
|
||||
reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(pm.chainDb, header, lastHead).Number.Uint64()
|
||||
}
|
||||
lastHead = header
|
||||
lastBroadcastTd = td
|
||||
|
||||
log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg)
|
||||
|
||||
announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
|
||||
var (
|
||||
signed bool
|
||||
signedAnnounce announceData
|
||||
)
|
||||
|
||||
for _, p := range peers {
|
||||
p := p
|
||||
switch p.announceType {
|
||||
case announceTypeSimple:
|
||||
p.queueSend(func() { p.SendAnnounce(announce) })
|
||||
case announceTypeSigned:
|
||||
if !signed {
|
||||
signedAnnounce = announce
|
||||
signedAnnounce.sign(pm.server.privateKey)
|
||||
signed = true
|
||||
}
|
||||
p.queueSend(func() { p.SendAnnounce(signedAnnounce) })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-pm.quitSync:
|
||||
headSub.Unsubscribe()
|
||||
pm.wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
Reference in New Issue
Block a user