eth, p2p/msgrate: move peer QoS tracking to its own package and use it for snap (#22876)
This change extracts the peer QoS tracking logic from eth/downloader, moving it into the new package p2p/msgrate. The job of msgrate.Tracker is determining suitable timeout values and request sizes per peer. The snap sync scheduler now uses msgrate.Tracker instead of the hard-coded 15s timeout. This should make the sync work better on network links with high latency.
This commit is contained in:
@ -796,12 +796,6 @@ func TestMultiSyncManyUseless(t *testing.T) {
|
||||
|
||||
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
|
||||
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
|
||||
// We're setting the timeout to very low, to increase the chance of the timeout
|
||||
// being triggered. This was previously a cause of panic, when a response
|
||||
// arrived simultaneously as a timeout was triggered.
|
||||
defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
|
||||
requestTimeout = time.Millisecond
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
cancel = make(chan struct{})
|
||||
@ -838,6 +832,11 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
|
||||
mkSource("noStorage", true, false, true),
|
||||
mkSource("noTrie", true, true, false),
|
||||
)
|
||||
// We're setting the timeout to very low, to increase the chance of the timeout
|
||||
// being triggered. This was previously a cause of panic, when a response
|
||||
// arrived simultaneously as a timeout was triggered.
|
||||
syncer.rates.OverrideTTLLimit = time.Millisecond
|
||||
|
||||
done := checkStall(t, term)
|
||||
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
|
||||
t.Fatalf("sync failed: %v", err)
|
||||
@ -848,10 +847,6 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
|
||||
|
||||
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
|
||||
func TestMultiSyncManyUnresponsive(t *testing.T) {
|
||||
// We're setting the timeout to very low, to make the test run a bit faster
|
||||
defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
|
||||
requestTimeout = time.Millisecond
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
cancel = make(chan struct{})
|
||||
@ -888,6 +883,9 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
|
||||
mkSource("noStorage", true, false, true),
|
||||
mkSource("noTrie", true, true, false),
|
||||
)
|
||||
// We're setting the timeout to very low, to make the test run a bit faster
|
||||
syncer.rates.OverrideTTLLimit = time.Millisecond
|
||||
|
||||
done := checkStall(t, term)
|
||||
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
|
||||
t.Fatalf("sync failed: %v", err)
|
||||
|
Reference in New Issue
Block a user