core: fix race conditions in txpool (#23474)
* core: fix race conditions in txpool * core: fixed races in the txpool * core: rebased on master * core: move reheap mutex * core: renamed mutex * core: revert Reheap changes
This commit is contained in:
committed by
GitHub
parent
d019e90162
commit
067084feda
@ -21,6 +21,8 @@ import (
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -478,9 +480,10 @@ func (h *priceHeap) Pop() interface{} {
|
||||
// better candidates for inclusion while in other cases (at the top of the baseFee peak)
|
||||
// the floating heap is better. When baseFee is decreasing they behave similarly.
|
||||
type txPricedList struct {
|
||||
all *txLookup // Pointer to the map of all transactions
|
||||
urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions
|
||||
stales int // Number of stale price points to (re-heap trigger)
|
||||
all *txLookup // Pointer to the map of all transactions
|
||||
urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions
|
||||
stales int64 // Number of stale price points to (re-heap trigger)
|
||||
reheapMu sync.Mutex // Mutex asserts that only one routine is reheaping the list
|
||||
}
|
||||
|
||||
const (
|
||||
@ -510,8 +513,8 @@ func (l *txPricedList) Put(tx *types.Transaction, local bool) {
|
||||
// the heap if a large enough ratio of transactions go stale.
|
||||
func (l *txPricedList) Removed(count int) {
|
||||
// Bump the stale counter, but exit if still too low (< 25%)
|
||||
l.stales += count
|
||||
if l.stales <= (len(l.urgent.list)+len(l.floating.list))/4 {
|
||||
stales := atomic.AddInt64(&l.stales, int64(count))
|
||||
if int(stales) <= (len(l.urgent.list)+len(l.floating.list))/4 {
|
||||
return
|
||||
}
|
||||
// Seems we've reached a critical number of stale transactions, reheap
|
||||
@ -535,7 +538,7 @@ func (l *txPricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool
|
||||
for len(h.list) > 0 {
|
||||
head := h.list[0]
|
||||
if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated
|
||||
l.stales--
|
||||
atomic.AddInt64(&l.stales, -1)
|
||||
heap.Pop(h)
|
||||
continue
|
||||
}
|
||||
@ -561,7 +564,7 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(&l.urgent).(*types.Transaction)
|
||||
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
|
||||
l.stales--
|
||||
atomic.AddInt64(&l.stales, -1)
|
||||
continue
|
||||
}
|
||||
// Non stale transaction found, move to floating heap
|
||||
@ -574,7 +577,7 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(&l.floating).(*types.Transaction)
|
||||
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
|
||||
l.stales--
|
||||
atomic.AddInt64(&l.stales, -1)
|
||||
continue
|
||||
}
|
||||
// Non stale transaction found, discard it
|
||||
@ -594,8 +597,10 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
|
||||
|
||||
// Reheap forcibly rebuilds the heap based on the current remote transaction set.
|
||||
func (l *txPricedList) Reheap() {
|
||||
l.reheapMu.Lock()
|
||||
defer l.reheapMu.Unlock()
|
||||
start := time.Now()
|
||||
l.stales = 0
|
||||
atomic.StoreInt64(&l.stales, 0)
|
||||
l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount())
|
||||
l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
|
||||
l.urgent.list = append(l.urgent.list, tx)
|
||||
|
Reference in New Issue
Block a user