eth: rework tx fetcher to use O(1) ops + manage network requests
This commit is contained in:
75
eth/sync.go
75
eth/sync.go
@ -38,13 +38,18 @@ const (
|
||||
)
|
||||
|
||||
type txsync struct {
|
||||
p *peer
|
||||
hashes []common.Hash
|
||||
txs []*types.Transaction
|
||||
p *peer
|
||||
txs []*types.Transaction
|
||||
}
|
||||
|
||||
// syncTransactions starts sending all currently pending transactions to the given peer.
|
||||
func (pm *ProtocolManager) syncTransactions(p *peer) {
|
||||
// Assemble the set of transaction to broadcast or announce to the remote
|
||||
// peer. Fun fact, this is quite an expensive operation as it needs to sort
|
||||
// the transactions if the sorting is not cached yet. However, with a random
|
||||
// order, insertions could overflow the non-executable queues and get dropped.
|
||||
//
|
||||
// TODO(karalabe): Figure out if we could get away with random order somehow
|
||||
var txs types.Transactions
|
||||
pending, _ := pm.txpool.Pending()
|
||||
for _, batch := range pending {
|
||||
@ -53,17 +58,29 @@ func (pm *ProtocolManager) syncTransactions(p *peer) {
|
||||
if len(txs) == 0 {
|
||||
return
|
||||
}
|
||||
// The eth/65 protocol introduces proper transaction announcements, so instead
|
||||
// of dripping transactions across multiple peers, just send the entire list as
|
||||
// an announcement and let the remote side decide what they need (likely nothing).
|
||||
if p.version >= eth65 {
|
||||
hashes := make([]common.Hash, len(txs))
|
||||
for i, tx := range txs {
|
||||
hashes[i] = tx.Hash()
|
||||
}
|
||||
p.AsyncSendPooledTransactionHashes(hashes)
|
||||
return
|
||||
}
|
||||
// Out of luck, peer is running legacy protocols, drop the txs over
|
||||
select {
|
||||
case pm.txsyncCh <- &txsync{p: p, txs: txs}:
|
||||
case <-pm.quitSync:
|
||||
}
|
||||
}
|
||||
|
||||
// txsyncLoop takes care of the initial transaction sync for each new
|
||||
// txsyncLoop64 takes care of the initial transaction sync for each new
|
||||
// connection. When a new peer appears, we relay all currently pending
|
||||
// transactions. In order to minimise egress bandwidth usage, we send
|
||||
// the transactions in small packs to one peer at a time.
|
||||
func (pm *ProtocolManager) txsyncLoop() {
|
||||
func (pm *ProtocolManager) txsyncLoop64() {
|
||||
var (
|
||||
pending = make(map[enode.ID]*txsync)
|
||||
sending = false // whether a send is active
|
||||
@ -72,44 +89,26 @@ func (pm *ProtocolManager) txsyncLoop() {
|
||||
)
|
||||
// send starts a sending a pack of transactions from the sync.
|
||||
send := func(s *txsync) {
|
||||
if s.p.version >= eth65 {
|
||||
panic("initial transaction syncer running on eth/65+")
|
||||
}
|
||||
// Fill pack with transactions up to the target size.
|
||||
size := common.StorageSize(0)
|
||||
pack.p = s.p
|
||||
pack.hashes = pack.hashes[:0]
|
||||
pack.txs = pack.txs[:0]
|
||||
if s.p.version >= eth65 {
|
||||
// Eth65 introduces transaction announcement https://github.com/ethereum/EIPs/pull/2464,
|
||||
// only txhashes are transferred here.
|
||||
for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {
|
||||
pack.hashes = append(pack.hashes, s.txs[i].Hash())
|
||||
size += common.HashLength
|
||||
}
|
||||
// Remove the transactions that will be sent.
|
||||
s.txs = s.txs[:copy(s.txs, s.txs[len(pack.hashes):])]
|
||||
if len(s.txs) == 0 {
|
||||
delete(pending, s.p.ID())
|
||||
}
|
||||
// Send the pack in the background.
|
||||
s.p.Log().Trace("Sending batch of transaction announcements", "count", len(pack.hashes), "bytes", size)
|
||||
sending = true
|
||||
go func() { done <- pack.p.SendNewTransactionHashes(pack.hashes) }()
|
||||
} else {
|
||||
// Legacy eth protocol doesn't have transaction announcement protocol
|
||||
// message, transfer the whole pending transaction slice.
|
||||
for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {
|
||||
pack.txs = append(pack.txs, s.txs[i])
|
||||
size += s.txs[i].Size()
|
||||
}
|
||||
// Remove the transactions that will be sent.
|
||||
s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])]
|
||||
if len(s.txs) == 0 {
|
||||
delete(pending, s.p.ID())
|
||||
}
|
||||
// Send the pack in the background.
|
||||
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
|
||||
sending = true
|
||||
go func() { done <- pack.p.SendNewTransactions(pack.txs) }()
|
||||
for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {
|
||||
pack.txs = append(pack.txs, s.txs[i])
|
||||
size += s.txs[i].Size()
|
||||
}
|
||||
// Remove the transactions that will be sent.
|
||||
s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])]
|
||||
if len(s.txs) == 0 {
|
||||
delete(pending, s.p.ID())
|
||||
}
|
||||
// Send the pack in the background.
|
||||
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
|
||||
sending = true
|
||||
go func() { done <- pack.p.SendTransactions64(pack.txs) }()
|
||||
}
|
||||
|
||||
// pick chooses the next pending sync.
|
||||
|
Reference in New Issue
Block a user