This reverts commit 0b724bd4d5
.
This commit is contained in:
committed by
GitHub
parent
0b724bd4d5
commit
604960938b
336
network/fetcher.go
Normal file
336
network/fetcher.go
Normal file
@@ -0,0 +1,336 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethersphere/swarm/storage"
|
||||
"github.com/ethersphere/swarm/tracing"
|
||||
olog "github.com/opentracing/opentracing-go/log"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSearchTimeout = 1 * time.Second
|
||||
// maximum number of forwarded requests (hops), to make sure requests are not
|
||||
// forwarded forever in peer loops
|
||||
maxHopCount uint8 = 20
|
||||
)
|
||||
|
||||
// Time to consider peer to be skipped.
|
||||
// Also used in stream delivery.
|
||||
var RequestTimeout = 10 * time.Second
|
||||
|
||||
type RequestFunc func(context.Context, *Request) (*enode.ID, chan struct{}, error)
|
||||
|
||||
// Fetcher is created when a chunk is not found locally. It starts a request handler loop once and
|
||||
// keeps it alive until all active requests are completed. This can happen:
|
||||
// 1. either because the chunk is delivered
|
||||
// 2. or because the requester cancelled/timed out
|
||||
// Fetcher self destroys itself after it is completed.
|
||||
// TODO: cancel all forward requests after termination
|
||||
type Fetcher struct {
|
||||
protoRequestFunc RequestFunc // request function fetcher calls to issue retrieve request for a chunk
|
||||
addr storage.Address // the address of the chunk to be fetched
|
||||
offerC chan *enode.ID // channel of sources (peer node id strings)
|
||||
requestC chan uint8 // channel for incoming requests (with the hopCount value in it)
|
||||
searchTimeout time.Duration
|
||||
skipCheck bool
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
Addr storage.Address // chunk address
|
||||
Source *enode.ID // nodeID of peer to request from (can be nil)
|
||||
SkipCheck bool // whether to offer the chunk first or deliver directly
|
||||
peersToSkip *sync.Map // peers not to request chunk from (only makes sense if source is nil)
|
||||
HopCount uint8 // number of forwarded requests (hops)
|
||||
}
|
||||
|
||||
// NewRequest returns a new instance of Request based on chunk address skip check and
|
||||
// a map of peers to skip.
|
||||
func NewRequest(addr storage.Address, skipCheck bool, peersToSkip *sync.Map) *Request {
|
||||
return &Request{
|
||||
Addr: addr,
|
||||
SkipCheck: skipCheck,
|
||||
peersToSkip: peersToSkip,
|
||||
}
|
||||
}
|
||||
|
||||
// SkipPeer returns if the peer with nodeID should not be requested to deliver a chunk.
|
||||
// Peers to skip are kept per Request and for a time period of RequestTimeout.
|
||||
// This function is used in stream package in Delivery.RequestFromPeers to optimize
|
||||
// requests for chunks.
|
||||
func (r *Request) SkipPeer(nodeID string) bool {
|
||||
val, ok := r.peersToSkip.Load(nodeID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
t, ok := val.(time.Time)
|
||||
if ok && time.Now().After(t.Add(RequestTimeout)) {
|
||||
// deadline expired
|
||||
r.peersToSkip.Delete(nodeID)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FetcherFactory is initialised with a request function and can create fetchers
|
||||
type FetcherFactory struct {
|
||||
request RequestFunc
|
||||
skipCheck bool
|
||||
}
|
||||
|
||||
// NewFetcherFactory takes a request function and skip check parameter and creates a FetcherFactory
|
||||
func NewFetcherFactory(request RequestFunc, skipCheck bool) *FetcherFactory {
|
||||
return &FetcherFactory{
|
||||
request: request,
|
||||
skipCheck: skipCheck,
|
||||
}
|
||||
}
|
||||
|
||||
// New constructs a new Fetcher, for the given chunk. All peers in peersToSkip
|
||||
// are not requested to deliver the given chunk. peersToSkip should always
|
||||
// contain the peers which are actively requesting this chunk, to make sure we
|
||||
// don't request back the chunks from them.
|
||||
// The created Fetcher is started and returned.
|
||||
func (f *FetcherFactory) New(ctx context.Context, source storage.Address, peers *sync.Map) storage.NetFetcher {
|
||||
fetcher := NewFetcher(ctx, source, f.request, f.skipCheck)
|
||||
go fetcher.run(peers)
|
||||
return fetcher
|
||||
}
|
||||
|
||||
// NewFetcher creates a new Fetcher for the given chunk address using the given request function.
|
||||
func NewFetcher(ctx context.Context, addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher {
|
||||
return &Fetcher{
|
||||
addr: addr,
|
||||
protoRequestFunc: rf,
|
||||
offerC: make(chan *enode.ID),
|
||||
requestC: make(chan uint8),
|
||||
searchTimeout: defaultSearchTimeout,
|
||||
skipCheck: skipCheck,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// Offer is called when an upstream peer offers the chunk via syncing as part of `OfferedHashesMsg` and the node does not have the chunk locally.
|
||||
func (f *Fetcher) Offer(source *enode.ID) {
|
||||
// First we need to have this select to make sure that we return if context is done
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// This select alone would not guarantee that we return of context is done, it could potentially
|
||||
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||
select {
|
||||
case f.offerC <- source:
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally.
|
||||
func (f *Fetcher) Request(hopCount uint8) {
|
||||
// First we need to have this select to make sure that we return if context is done
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if hopCount >= maxHopCount {
|
||||
log.Debug("fetcher request hop count limit reached", "hops", hopCount)
|
||||
return
|
||||
}
|
||||
|
||||
// This select alone would not guarantee that we return of context is done, it could potentially
|
||||
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||
select {
|
||||
case f.requestC <- hopCount + 1:
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// start prepares the Fetcher
|
||||
// it keeps the Fetcher alive within the lifecycle of the passed context
|
||||
func (f *Fetcher) run(peers *sync.Map) {
|
||||
var (
|
||||
doRequest bool // determines if retrieval is initiated in the current iteration
|
||||
wait *time.Timer // timer for search timeout
|
||||
waitC <-chan time.Time // timer channel
|
||||
sources []*enode.ID // known sources, ie. peers that offered the chunk
|
||||
requested bool // true if the chunk was actually requested
|
||||
hopCount uint8
|
||||
)
|
||||
gone := make(chan *enode.ID) // channel to signal that a peer we requested from disconnected
|
||||
|
||||
// loop that keeps the fetching process alive
|
||||
// after every request a timer is set. If this goes off we request again from another peer
|
||||
// note that the previous request is still alive and has the chance to deliver, so
|
||||
// requesting again extends the search. ie.,
|
||||
// if a peer we requested from is gone we issue a new request, so the number of active
|
||||
// requests never decreases
|
||||
for {
|
||||
select {
|
||||
|
||||
// incoming offer
|
||||
case source := <-f.offerC:
|
||||
log.Trace("new source", "peer addr", source, "request addr", f.addr)
|
||||
// 1) the chunk is offered by a syncing peer
|
||||
// add to known sources
|
||||
sources = append(sources, source)
|
||||
// launch a request to the source iff the chunk was requested (not just expected because its offered by a syncing peer)
|
||||
doRequest = requested
|
||||
|
||||
// incoming request
|
||||
case hopCount = <-f.requestC:
|
||||
// 2) chunk is requested, set requested flag
|
||||
// launch a request iff none been launched yet
|
||||
doRequest = !requested
|
||||
log.Trace("new request", "request addr", f.addr, "doRequest", doRequest)
|
||||
requested = true
|
||||
|
||||
// peer we requested from is gone. fall back to another
|
||||
// and remove the peer from the peers map
|
||||
case id := <-gone:
|
||||
peers.Delete(id.String())
|
||||
doRequest = requested
|
||||
log.Trace("peer gone", "peer id", id.String(), "request addr", f.addr, "doRequest", doRequest)
|
||||
|
||||
// search timeout: too much time passed since the last request,
|
||||
// extend the search to a new peer if we can find one
|
||||
case <-waitC:
|
||||
doRequest = requested
|
||||
log.Trace("search timed out: requesting", "request addr", f.addr, "doRequest", doRequest)
|
||||
|
||||
// all Fetcher context closed, can quit
|
||||
case <-f.ctx.Done():
|
||||
log.Trace("terminate fetcher", "request addr", f.addr)
|
||||
// TODO: send cancellations to all peers left over in peers map (i.e., those we requested from)
|
||||
return
|
||||
}
|
||||
|
||||
// need to issue a new request
|
||||
if doRequest {
|
||||
var err error
|
||||
sources, err = f.doRequest(gone, peers, sources, hopCount)
|
||||
if err != nil {
|
||||
log.Info("unable to request", "request addr", f.addr, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if wait channel is not set, set it to a timer
|
||||
if requested {
|
||||
if wait == nil {
|
||||
wait = time.NewTimer(f.searchTimeout)
|
||||
defer wait.Stop()
|
||||
waitC = wait.C
|
||||
} else {
|
||||
// stop the timer and drain the channel if it was not drained earlier
|
||||
if !wait.Stop() {
|
||||
select {
|
||||
case <-wait.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
// reset the timer to go off after defaultSearchTimeout
|
||||
wait.Reset(f.searchTimeout)
|
||||
}
|
||||
}
|
||||
doRequest = false
|
||||
}
|
||||
}
|
||||
|
||||
// doRequest attempts at finding a peer to request the chunk from
|
||||
// * first it tries to request explicitly from peers that are known to have offered the chunk
|
||||
// * if there are no such peers (available) it tries to request it from a peer closest to the chunk address
|
||||
// excluding those in the peersToSkip map
|
||||
// * if no such peer is found an error is returned
|
||||
//
|
||||
// if a request is successful,
|
||||
// * the peer's address is added to the set of peers to skip
|
||||
// * the peer's address is removed from prospective sources, and
|
||||
// * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer)
|
||||
func (f *Fetcher) doRequest(gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID, hopCount uint8) ([]*enode.ID, error) {
|
||||
var i int
|
||||
var sourceID *enode.ID
|
||||
var quit chan struct{}
|
||||
|
||||
req := &Request{
|
||||
Addr: f.addr,
|
||||
SkipCheck: f.skipCheck,
|
||||
peersToSkip: peersToSkip,
|
||||
HopCount: hopCount,
|
||||
}
|
||||
|
||||
foundSource := false
|
||||
// iterate over known sources
|
||||
for i = 0; i < len(sources); i++ {
|
||||
req.Source = sources[i]
|
||||
var err error
|
||||
log.Trace("fetcher.doRequest", "request addr", f.addr, "peer", req.Source.String())
|
||||
sourceID, quit, err = f.protoRequestFunc(f.ctx, req)
|
||||
if err == nil {
|
||||
// remove the peer from known sources
|
||||
// Note: we can modify the source although we are looping on it, because we break from the loop immediately
|
||||
sources = append(sources[:i], sources[i+1:]...)
|
||||
foundSource = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// if there are no known sources, or none available, we try request from a closest node
|
||||
if !foundSource {
|
||||
req.Source = nil
|
||||
var err error
|
||||
sourceID, quit, err = f.protoRequestFunc(f.ctx, req)
|
||||
if err != nil {
|
||||
// if no peers found to request from
|
||||
return sources, err
|
||||
}
|
||||
}
|
||||
// add peer to the set of peers to skip from now
|
||||
peersToSkip.Store(sourceID.String(), time.Now())
|
||||
|
||||
// if the quit channel is closed, it indicates that the source peer we requested from
|
||||
// disconnected or terminated its streamer
|
||||
// here start a go routine that watches this channel and reports the source peer on the gone channel
|
||||
// this go routine quits if the fetcher global context is done to prevent process leak
|
||||
go func() {
|
||||
select {
|
||||
case <-quit:
|
||||
gone <- sourceID
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
|
||||
// finish the request span
|
||||
spanId := fmt.Sprintf("stream.send.request.%v.%v", *sourceID, req.Addr)
|
||||
span := tracing.ShiftSpanByKey(spanId)
|
||||
|
||||
if span != nil {
|
||||
span.LogFields(olog.String("finish", "from doRequest"))
|
||||
span.Finish()
|
||||
}
|
||||
}()
|
||||
return sources, nil
|
||||
}
|
476
network/fetcher_test.go
Normal file
476
network/fetcher_test.go
Normal file
@@ -0,0 +1,476 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var requestedPeerID = enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
|
||||
var sourcePeerID = enode.HexID("99d8594b52298567d2ca3f4c441a5ba0140ee9245e26460d01102a52773c73b9")
|
||||
|
||||
// mockRequester pushes every request to the requestC channel when its doRequest function is called
|
||||
type mockRequester struct {
|
||||
// requests []Request
|
||||
requestC chan *Request // when a request is coming it is pushed to requestC
|
||||
waitTimes []time.Duration // with waitTimes[i] you can define how much to wait on the ith request (optional)
|
||||
count int //counts the number of requests
|
||||
quitC chan struct{}
|
||||
}
|
||||
|
||||
func newMockRequester(waitTimes ...time.Duration) *mockRequester {
|
||||
return &mockRequester{
|
||||
requestC: make(chan *Request),
|
||||
waitTimes: waitTimes,
|
||||
quitC: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockRequester) doRequest(ctx context.Context, request *Request) (*enode.ID, chan struct{}, error) {
|
||||
waitTime := time.Duration(0)
|
||||
if m.count < len(m.waitTimes) {
|
||||
waitTime = m.waitTimes[m.count]
|
||||
m.count++
|
||||
}
|
||||
time.Sleep(waitTime)
|
||||
m.requestC <- request
|
||||
|
||||
// if there is a Source in the request use that, if not use the global requestedPeerId
|
||||
source := request.Source
|
||||
if source == nil {
|
||||
source = &requestedPeerID
|
||||
}
|
||||
return source, m.quitC, nil
|
||||
}
|
||||
|
||||
// TestFetcherSingleRequest creates a Fetcher using mockRequester, and run it with a sample set of peers to skip.
|
||||
// mockRequester pushes a Request on a channel every time the request function is called. Using
|
||||
// this channel we test if calling Fetcher.Request calls the request function, and whether it uses
|
||||
// the correct peers to skip which we provided for the fetcher.run function.
|
||||
func TestFetcherSingleRequest(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peers := []string{"a", "b", "c", "d"}
|
||||
peersToSkip := &sync.Map{}
|
||||
for _, p := range peers {
|
||||
peersToSkip.Store(p, time.Now())
|
||||
}
|
||||
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case request := <-requester.requestC:
|
||||
// request should contain all peers from peersToSkip provided to the fetcher
|
||||
for _, p := range peers {
|
||||
if _, ok := request.peersToSkip.Load(p); !ok {
|
||||
t.Fatalf("request.peersToSkip misses peer")
|
||||
}
|
||||
}
|
||||
|
||||
// source peer should be also added to peersToSkip eventually
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, ok := request.peersToSkip.Load(requestedPeerID.String()); !ok {
|
||||
t.Fatalf("request.peersToSkip does not contain peer returned by the request function")
|
||||
}
|
||||
|
||||
// hopCount in the forwarded request should be incremented
|
||||
if request.HopCount != 1 {
|
||||
t.Fatalf("Expected request.HopCount 1 got %v", request.HopCount)
|
||||
}
|
||||
|
||||
// fetch should trigger a request, if it doesn't happen in time, test should fail
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetch timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCancelStopsFetcher tests that a cancelled fetcher does not initiate further requests even if its fetch function is called
|
||||
func TestFetcherCancelStopsFetcher(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// we start the fetcher, and then we immediately cancel the context
|
||||
go fetcher.run(peersToSkip)
|
||||
cancel()
|
||||
|
||||
// we call Request with an active context
|
||||
fetcher.Request(0)
|
||||
|
||||
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("cancelled fetcher initiated request")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
|
||||
// TestFetchCancelStopsRequest tests that calling a Request function with a cancelled context does not initiate a request
|
||||
func TestFetcherCancelStopsRequest(t *testing.T) {
|
||||
t.Skip("since context is now per fetcher, this test is likely redundant")
|
||||
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// we start the fetcher with an active context
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// we call Request with a cancelled context
|
||||
fetcher.Request(0)
|
||||
|
||||
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("cancelled fetch function initiated request")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
|
||||
// if there is another Request with active context, there should be a request, because the fetcher itself is not cancelled
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("expected request")
|
||||
}
|
||||
}
|
||||
|
||||
// TestOfferUsesSource tests Fetcher Offer behavior.
|
||||
// In this case there should be 1 (and only one) request initiated from the source peer, and the
|
||||
// source nodeid should appear in the peersToSkip map.
|
||||
func TestFetcherOfferUsesSource(t *testing.T) {
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// call the Offer function with the source peer
|
||||
fetcher.Offer(&sourcePeerID)
|
||||
|
||||
// fetcher should not initiate request
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("fetcher initiated request")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
|
||||
// call Request after the Offer
|
||||
fetcher.Request(0)
|
||||
|
||||
// there should be exactly 1 request coming from fetcher
|
||||
var request *Request
|
||||
select {
|
||||
case request = <-requester.requestC:
|
||||
if *request.Source != sourcePeerID {
|
||||
t.Fatalf("Expected source id %v got %v", sourcePeerID, request.Source)
|
||||
}
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetcher did not initiate request")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("Fetcher number of requests expected 1 got 2")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
|
||||
// source peer should be added to peersToSkip eventually
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, ok := request.peersToSkip.Load(sourcePeerID.String()); !ok {
|
||||
t.Fatalf("SourcePeerId not added to peersToSkip")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) {
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// call Request first
|
||||
fetcher.Request(0)
|
||||
|
||||
// there should be a request coming from fetcher
|
||||
var request *Request
|
||||
select {
|
||||
case request = <-requester.requestC:
|
||||
if request.Source != nil {
|
||||
t.Fatalf("Incorrect source peer id, expected nil got %v", request.Source)
|
||||
}
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetcher did not initiate request")
|
||||
}
|
||||
|
||||
// after the Request call Offer
|
||||
fetcher.Offer(&sourcePeerID)
|
||||
|
||||
// there should be a request coming from fetcher
|
||||
select {
|
||||
case request = <-requester.requestC:
|
||||
if *request.Source != sourcePeerID {
|
||||
t.Fatalf("Incorrect source peer id, expected %v got %v", sourcePeerID, request.Source)
|
||||
}
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetcher did not initiate request")
|
||||
}
|
||||
|
||||
// source peer should be added to peersToSkip eventually
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, ok := request.peersToSkip.Load(sourcePeerID.String()); !ok {
|
||||
t.Fatalf("SourcePeerId not added to peersToSkip")
|
||||
}
|
||||
}
|
||||
|
||||
// TestFetcherRetryOnTimeout tests that fetch retries after searchTimeOut has passed
|
||||
func TestFetcherRetryOnTimeout(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
// set searchTimeOut to low value so the test is quicker
|
||||
fetcher.searchTimeout = 250 * time.Millisecond
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// call the fetch function with an active context
|
||||
fetcher.Request(0)
|
||||
|
||||
// after 100ms the first request should be initiated
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
default:
|
||||
t.Fatalf("fetch did not initiate request")
|
||||
}
|
||||
|
||||
// after another 100ms no new request should be initiated, because search timeout is 250ms
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("unexpected request from fetcher")
|
||||
default:
|
||||
}
|
||||
|
||||
// after another 300ms search timeout is over, there should be a new request
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
default:
|
||||
t.Fatalf("fetch did not retry request")
|
||||
}
|
||||
}
|
||||
|
||||
// TestFetcherFactory creates a FetcherFactory and checks if the factory really creates and starts
|
||||
// a Fetcher when it return a fetch function. We test the fetching functionality just by checking if
|
||||
// a request is initiated when the fetch function is called
|
||||
func TestFetcherFactory(t *testing.T) {
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
fetcherFactory := NewFetcherFactory(requester.doRequest, false)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
fetcher := fetcherFactory.New(context.Background(), addr, peersToSkip)
|
||||
|
||||
fetcher.Request(0)
|
||||
|
||||
// check if the created fetchFunction really starts a fetcher and initiates a request
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetch timeout")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFetcherRequestQuitRetriesRequest(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
// make sure the searchTimeout is long so it is sure the request is not
|
||||
// retried because of timeout
|
||||
fetcher.searchTimeout = 10 * time.Second
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("request is not initiated")
|
||||
}
|
||||
|
||||
close(requester.quitC)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("request is not initiated after failed request")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRequestSkipPeer checks if PeerSkip function will skip provided peer
|
||||
// and not skip unknown one.
|
||||
func TestRequestSkipPeer(t *testing.T) {
|
||||
addr := make([]byte, 32)
|
||||
peers := []enode.ID{
|
||||
enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8"),
|
||||
enode.HexID("99d8594b52298567d2ca3f4c441a5ba0140ee9245e26460d01102a52773c73b9"),
|
||||
}
|
||||
|
||||
peersToSkip := new(sync.Map)
|
||||
peersToSkip.Store(peers[0].String(), time.Now())
|
||||
r := NewRequest(addr, false, peersToSkip)
|
||||
|
||||
if !r.SkipPeer(peers[0].String()) {
|
||||
t.Errorf("peer not skipped")
|
||||
}
|
||||
|
||||
if r.SkipPeer(peers[1].String()) {
|
||||
t.Errorf("peer skipped")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRequestSkipPeerExpired checks if a peer to skip is not skipped
|
||||
// after RequestTimeout has passed.
|
||||
func TestRequestSkipPeerExpired(t *testing.T) {
|
||||
addr := make([]byte, 32)
|
||||
peer := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
|
||||
|
||||
// set RequestTimeout to a low value and reset it after the test
|
||||
defer func(t time.Duration) { RequestTimeout = t }(RequestTimeout)
|
||||
RequestTimeout = 250 * time.Millisecond
|
||||
|
||||
peersToSkip := new(sync.Map)
|
||||
peersToSkip.Store(peer.String(), time.Now())
|
||||
r := NewRequest(addr, false, peersToSkip)
|
||||
|
||||
if !r.SkipPeer(peer.String()) {
|
||||
t.Errorf("peer not skipped")
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if r.SkipPeer(peer.String()) {
|
||||
t.Errorf("peer skipped")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRequestSkipPeerPermanent checks if a peer to skip is not skipped
|
||||
// after RequestTimeout is not skipped if it is set for a permanent skipping
|
||||
// by value to peersToSkip map is not time.Duration.
|
||||
func TestRequestSkipPeerPermanent(t *testing.T) {
|
||||
addr := make([]byte, 32)
|
||||
peer := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
|
||||
|
||||
// set RequestTimeout to a low value and reset it after the test
|
||||
defer func(t time.Duration) { RequestTimeout = t }(RequestTimeout)
|
||||
RequestTimeout = 250 * time.Millisecond
|
||||
|
||||
peersToSkip := new(sync.Map)
|
||||
peersToSkip.Store(peer.String(), true)
|
||||
r := NewRequest(addr, false, peersToSkip)
|
||||
|
||||
if !r.SkipPeer(peer.String()) {
|
||||
t.Errorf("peer not skipped")
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if !r.SkipPeer(peer.String()) {
|
||||
t.Errorf("peer not skipped")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetcherMaxHopCount(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// if hopCount is already at max no request should be initiated
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("cancelled fetcher initiated request")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
}
|
@@ -56,6 +56,7 @@ var (
|
||||
|
||||
bucketKeyStore = simulation.BucketKey("store")
|
||||
bucketKeyFileStore = simulation.BucketKey("filestore")
|
||||
bucketKeyNetStore = simulation.BucketKey("netstore")
|
||||
bucketKeyDelivery = simulation.BucketKey("delivery")
|
||||
bucketKeyRegistry = simulation.BucketKey("registry")
|
||||
|
||||
@@ -80,7 +81,7 @@ func newNetStoreAndDelivery(ctx *adapters.ServiceContext, bucket *sync.Map) (*ne
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore.RemoteGet = delivery.RequestFromPeers
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||
|
||||
return addr, netStore, delivery, cleanup, nil
|
||||
}
|
||||
@@ -92,13 +93,13 @@ func newNetStoreAndDeliveryWithBzzAddr(ctx *adapters.ServiceContext, bucket *syn
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore.RemoteGet = delivery.RequestFromPeers
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||
|
||||
return netStore, delivery, cleanup, nil
|
||||
}
|
||||
|
||||
// newNetStoreAndDeliveryWithRequestFunc is a constructor for NetStore and Delivery, used in Simulations, accepting any NetStore.RequestFunc
|
||||
func newNetStoreAndDeliveryWithRequestFunc(ctx *adapters.ServiceContext, bucket *sync.Map, rf storage.RemoteGetFunc) (*network.BzzAddr, *storage.NetStore, *Delivery, func(), error) {
|
||||
func newNetStoreAndDeliveryWithRequestFunc(ctx *adapters.ServiceContext, bucket *sync.Map, rf network.RequestFunc) (*network.BzzAddr, *storage.NetStore, *Delivery, func(), error) {
|
||||
addr := network.NewAddr(ctx.Config.Node())
|
||||
|
||||
netStore, delivery, cleanup, err := netStoreAndDeliveryWithAddr(ctx, bucket, addr)
|
||||
@@ -106,7 +107,7 @@ func newNetStoreAndDeliveryWithRequestFunc(ctx *adapters.ServiceContext, bucket
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore.RemoteGet = rf
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(rf, true).New
|
||||
|
||||
return addr, netStore, delivery, cleanup, nil
|
||||
}
|
||||
@@ -119,9 +120,14 @@ func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map,
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore := storage.NewNetStore(localStore, enode.ID{})
|
||||
lnetStore := storage.NewLNetStore(netStore)
|
||||
fileStore := storage.NewFileStore(lnetStore, storage.NewFileStoreParams(), chunk.NewTags())
|
||||
netStore, err := storage.NewNetStore(localStore, nil)
|
||||
if err != nil {
|
||||
localStore.Close()
|
||||
localStoreCleanup()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams(), chunk.NewTags())
|
||||
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
delivery := NewDelivery(kad, netStore)
|
||||
@@ -161,11 +167,15 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore := storage.NewNetStore(localStore, enode.ID{})
|
||||
netStore, err := storage.NewNetStore(localStore, nil)
|
||||
if err != nil {
|
||||
localStore.Close()
|
||||
removeDataDir()
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
delivery := NewDelivery(to, netStore)
|
||||
netStore.RemoteGet = delivery.RequestFromPeers
|
||||
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||
intervalsStore := state.NewInmemoryStore()
|
||||
streamer := NewRegistry(addr.ID(), delivery, netStore, intervalsStore, registryOptions, nil)
|
||||
|
||||
|
@@ -27,9 +27,9 @@ import (
|
||||
"github.com/ethersphere/swarm/chunk"
|
||||
"github.com/ethersphere/swarm/log"
|
||||
"github.com/ethersphere/swarm/network"
|
||||
"github.com/ethersphere/swarm/network/timeouts"
|
||||
"github.com/ethersphere/swarm/spancontext"
|
||||
"github.com/ethersphere/swarm/storage"
|
||||
"github.com/ethersphere/swarm/tracing"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
olog "github.com/opentracing/opentracing-go/log"
|
||||
)
|
||||
@@ -39,6 +39,9 @@ var (
|
||||
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
|
||||
retrieveChunkFail = metrics.NewRegisteredCounter("network.stream.retrieve_chunks_fail.count", nil)
|
||||
|
||||
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
|
||||
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
|
||||
|
||||
lastReceivedChunksMsg = metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
|
||||
)
|
||||
|
||||
@@ -59,42 +62,52 @@ func NewDelivery(kad *network.Kademlia, netStore *storage.NetStore) *Delivery {
|
||||
|
||||
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
|
||||
type RetrieveRequestMsg struct {
|
||||
Addr storage.Address
|
||||
Addr storage.Address
|
||||
SkipCheck bool
|
||||
HopCount uint8
|
||||
}
|
||||
|
||||
func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *RetrieveRequestMsg) error {
|
||||
log.Trace("handle retrieve request", "peer", sp.ID(), "hash", req.Addr)
|
||||
log.Trace("received request", "peer", sp.ID(), "hash", req.Addr)
|
||||
handleRetrieveRequestMsgCount.Inc(1)
|
||||
|
||||
ctx, osp := spancontext.StartSpan(
|
||||
var osp opentracing.Span
|
||||
ctx, osp = spancontext.StartSpan(
|
||||
ctx,
|
||||
"handle.retrieve.request")
|
||||
"stream.handle.retrieve")
|
||||
|
||||
osp.LogFields(olog.String("ref", req.Addr.String()))
|
||||
|
||||
defer osp.Finish()
|
||||
var cancel func()
|
||||
// TODO: do something with this hardcoded timeout, maybe use TTL in the future
|
||||
ctx = context.WithValue(ctx, "peer", sp.ID().String())
|
||||
ctx = context.WithValue(ctx, "hopcount", req.HopCount)
|
||||
ctx, cancel = context.WithTimeout(ctx, network.RequestTimeout)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, timeouts.FetcherGlobalTimeout)
|
||||
defer cancel()
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-d.quit:
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
|
||||
r := &storage.Request{
|
||||
Addr: req.Addr,
|
||||
Origin: sp.ID(),
|
||||
}
|
||||
chunk, err := d.netStore.Get(ctx, chunk.ModeGetRequest, r)
|
||||
if err != nil {
|
||||
retrieveChunkFail.Inc(1)
|
||||
log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "err", err)
|
||||
return nil
|
||||
}
|
||||
go func() {
|
||||
defer osp.Finish()
|
||||
ch, err := d.netStore.Get(ctx, chunk.ModeGetRequest, req.Addr)
|
||||
if err != nil {
|
||||
retrieveChunkFail.Inc(1)
|
||||
log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
|
||||
return
|
||||
}
|
||||
syncing := false
|
||||
|
||||
log.Trace("retrieve request, delivery", "ref", req.Addr, "peer", sp.ID())
|
||||
syncing := false
|
||||
err = sp.Deliver(ctx, chunk, 0, syncing)
|
||||
if err != nil {
|
||||
log.Error("sp.Deliver errored", "err", err)
|
||||
}
|
||||
osp.LogFields(olog.Bool("delivered", true))
|
||||
err = sp.Deliver(ctx, ch, Top, syncing)
|
||||
if err != nil {
|
||||
log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
|
||||
}
|
||||
osp.LogFields(olog.Bool("delivered", true))
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -176,166 +189,57 @@ func (d *Delivery) Close() {
|
||||
close(d.quit)
|
||||
}
|
||||
|
||||
// getOriginPo returns the originPo if the incoming Request has an Origin
|
||||
// if our node is the first node that requests this chunk, then we don't have an Origin,
|
||||
// and return -1
|
||||
// this is used only for tracing, and can probably be refactor so that we don't have to
|
||||
// iterater over Kademlia
|
||||
func (d *Delivery) getOriginPo(req *storage.Request) int {
|
||||
originPo := -1
|
||||
|
||||
d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int) bool {
|
||||
id := p.ID()
|
||||
|
||||
// get po between chunk and origin
|
||||
if req.Origin.String() == id.String() {
|
||||
originPo = po
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return originPo
|
||||
}
|
||||
|
||||
// FindPeer is returning the closest peer from Kademlia that a chunk
|
||||
// request hasn't already been sent to
|
||||
func (d *Delivery) FindPeer(ctx context.Context, req *storage.Request) (*Peer, error) {
|
||||
// RequestFromPeers sends a chunk retrieve request to a peer
|
||||
// The most eligible peer that hasn't already been sent to is chosen
|
||||
// TODO: define "eligible"
|
||||
func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
|
||||
requestFromPeersCount.Inc(1)
|
||||
var sp *Peer
|
||||
var err error
|
||||
spID := req.Source
|
||||
|
||||
osp, _ := ctx.Value("remote.fetch").(opentracing.Span)
|
||||
|
||||
// originPo - proximity of the node that made the request; -1 if the request originator is our node;
|
||||
// myPo - this node's proximity with the requested chunk
|
||||
// selectedPeerPo - kademlia suggested node's proximity with the requested chunk (computed further below)
|
||||
originPo := d.getOriginPo(req)
|
||||
myPo := chunk.Proximity(req.Addr, d.kad.BaseAddr())
|
||||
selectedPeerPo := -1
|
||||
|
||||
depth := d.kad.NeighbourhoodDepth()
|
||||
|
||||
if osp != nil {
|
||||
osp.LogFields(olog.Int("originPo", originPo))
|
||||
osp.LogFields(olog.Int("depth", depth))
|
||||
osp.LogFields(olog.Int("myPo", myPo))
|
||||
}
|
||||
|
||||
// do not forward requests if origin proximity is bigger than our node's proximity
|
||||
// this means that origin is closer to the chunk
|
||||
if originPo > myPo {
|
||||
return nil, errors.New("not forwarding request, origin node is closer to chunk than this node")
|
||||
}
|
||||
|
||||
d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int) bool {
|
||||
id := p.ID()
|
||||
|
||||
// skip light nodes
|
||||
if p.LightNode {
|
||||
return true
|
||||
if spID != nil {
|
||||
sp = d.getPeer(*spID)
|
||||
if sp == nil {
|
||||
return nil, nil, fmt.Errorf("source peer %v not found", spID.String())
|
||||
}
|
||||
|
||||
// do not send request back to peer who asked us. maybe merge with SkipPeer at some point
|
||||
if req.Origin.String() == id.String() {
|
||||
return true
|
||||
}
|
||||
|
||||
// skip peers that we have already tried
|
||||
if req.SkipPeer(id.String()) {
|
||||
log.Trace("findpeer skip peer", "peer", id, "ref", req.Addr.String())
|
||||
return true
|
||||
}
|
||||
|
||||
if myPo < depth { // chunk is NOT within the neighbourhood
|
||||
if po <= myPo { // always choose a peer strictly closer to chunk than us
|
||||
log.Trace("findpeer1a", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
|
||||
return false
|
||||
} else {
|
||||
log.Trace("findpeer1b", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
|
||||
} else {
|
||||
d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int) bool {
|
||||
id := p.ID()
|
||||
if p.LightNode {
|
||||
// skip light nodes
|
||||
return true
|
||||
}
|
||||
} else { // chunk IS WITHIN neighbourhood
|
||||
if po < depth { // do not select peer outside the neighbourhood. But allows peers further from the chunk than us
|
||||
log.Trace("findpeer2a", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
|
||||
return false
|
||||
} else if po <= originPo { // avoid loop in neighbourhood, so not forward when a request comes from the neighbourhood
|
||||
log.Trace("findpeer2b", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
|
||||
return false
|
||||
} else {
|
||||
log.Trace("findpeer2c", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
|
||||
if req.SkipPeer(id.String()) {
|
||||
log.Trace("Delivery.RequestFromPeers: skip peer", "peer id", id)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// if selected peer is not in the depth (2nd condition; if depth <= po, then peer is in nearest neighbourhood)
|
||||
// and they have a lower po than ours, return error
|
||||
if po < myPo && depth > po {
|
||||
log.Trace("findpeer4 skip peer because origin was closer", "originpo", originPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
|
||||
|
||||
err = fmt.Errorf("not asking peers further away from origin; ref=%s originpo=%v po=%v depth=%v myPo=%v", req.Addr.String(), originPo, po, depth, myPo)
|
||||
sp = d.getPeer(id)
|
||||
// sp is nil, when we encounter a peer that is not registered for delivery, i.e. doesn't support the `stream` protocol
|
||||
if sp == nil {
|
||||
return true
|
||||
}
|
||||
spID = &id
|
||||
return false
|
||||
})
|
||||
if sp == nil {
|
||||
return nil, nil, errors.New("no peer found")
|
||||
}
|
||||
|
||||
// if chunk falls in our nearest neighbourhood (1st condition), but suggested peer is not in
|
||||
// the nearest neighbourhood (2nd condition), don't forward the request to suggested peer
|
||||
if depth <= myPo && depth > po {
|
||||
log.Trace("findpeer5 skip peer because depth", "originpo", originPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
|
||||
|
||||
err = fmt.Errorf("not going outside of depth; ref=%s originpo=%v po=%v depth=%v myPo=%v", req.Addr.String(), originPo, po, depth, myPo)
|
||||
return false
|
||||
}
|
||||
|
||||
sp = d.getPeer(id)
|
||||
|
||||
// sp could be nil, if we encountered a peer that is not registered for delivery, i.e. doesn't support the `stream` protocol
|
||||
// if sp is not nil, then we have selected the next peer and we stop iterating
|
||||
// if sp is nil, we continue iterating
|
||||
if sp != nil {
|
||||
selectedPeerPo = po
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// continue iterating
|
||||
return true
|
||||
})
|
||||
|
||||
if osp != nil {
|
||||
osp.LogFields(olog.Int("selectedPeerPo", selectedPeerPo))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sp == nil {
|
||||
return nil, errors.New("no peer found")
|
||||
}
|
||||
|
||||
return sp, nil
|
||||
}
|
||||
|
||||
// RequestFromPeers sends a chunk retrieve request to the next found peer
|
||||
func (d *Delivery) RequestFromPeers(ctx context.Context, req *storage.Request, localID enode.ID) (*enode.ID, error) {
|
||||
metrics.GetOrRegisterCounter("delivery.requestfrompeers", nil).Inc(1)
|
||||
|
||||
sp, err := d.FindPeer(ctx, req)
|
||||
if err != nil {
|
||||
log.Trace(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// setting this value in the context creates a new span that can persist across the sendpriority queue and the network roundtrip
|
||||
// this span will finish only when delivery is handled (or times out)
|
||||
r := &RetrieveRequestMsg{
|
||||
Addr: req.Addr,
|
||||
}
|
||||
log.Trace("sending retrieve request", "ref", r.Addr, "peer", sp.ID().String(), "origin", localID)
|
||||
err = sp.Send(ctx, r)
|
||||
ctx = context.WithValue(ctx, tracing.StoreLabelId, "stream.send.request")
|
||||
ctx = context.WithValue(ctx, tracing.StoreLabelMeta, fmt.Sprintf("%v.%v", sp.ID(), req.Addr))
|
||||
log.Trace("request.from.peers", "peer", sp.ID(), "ref", req.Addr)
|
||||
err := sp.SendPriority(ctx, &RetrieveRequestMsg{
|
||||
Addr: req.Addr,
|
||||
SkipCheck: req.SkipCheck,
|
||||
HopCount: req.HopCount,
|
||||
}, Top)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
requestFromPeersEachCount.Inc(1)
|
||||
|
||||
spID := sp.ID()
|
||||
return &spID, nil
|
||||
return spID, sp.quit, nil
|
||||
}
|
||||
|
@@ -19,17 +19,26 @@ package stream
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
"github.com/ethersphere/swarm/chunk"
|
||||
"github.com/ethersphere/swarm/log"
|
||||
"github.com/ethersphere/swarm/network"
|
||||
pq "github.com/ethersphere/swarm/network/priorityqueue"
|
||||
"github.com/ethersphere/swarm/network/simulation"
|
||||
"github.com/ethersphere/swarm/p2p/protocols"
|
||||
"github.com/ethersphere/swarm/state"
|
||||
"github.com/ethersphere/swarm/storage"
|
||||
"github.com/ethersphere/swarm/testutil"
|
||||
)
|
||||
|
||||
//Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
|
||||
@@ -151,12 +160,18 @@ func TestRequestFromPeers(t *testing.T) {
|
||||
streamer: r,
|
||||
}
|
||||
r.setPeer(sp)
|
||||
req := storage.NewRequest(storage.Address(hash0[:]))
|
||||
id, err := delivery.FindPeer(context.TODO(), req)
|
||||
req := network.NewRequest(
|
||||
storage.Address(hash0[:]),
|
||||
true,
|
||||
&sync.Map{},
|
||||
)
|
||||
ctx := context.Background()
|
||||
id, _, err := delivery.RequestFromPeers(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id.ID() != dummyPeerID {
|
||||
if *id != dummyPeerID {
|
||||
t.Fatalf("Expected an id, got %v", id)
|
||||
}
|
||||
}
|
||||
@@ -186,10 +201,15 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
|
||||
}
|
||||
r.setPeer(sp)
|
||||
|
||||
req := storage.NewRequest(storage.Address(hash0[:]))
|
||||
req := network.NewRequest(
|
||||
storage.Address(hash0[:]),
|
||||
true,
|
||||
&sync.Map{},
|
||||
)
|
||||
|
||||
ctx := context.Background()
|
||||
// making a request which should return with "no peer found"
|
||||
_, err := delivery.FindPeer(context.TODO(), req)
|
||||
_, _, err := delivery.RequestFromPeers(ctx, req)
|
||||
|
||||
expectedError := "no peer found"
|
||||
if err.Error() != expectedError {
|
||||
@@ -280,3 +300,293 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDeliveryFromNodes(t *testing.T) {
|
||||
testDeliveryFromNodes(t, 2, dataChunkCount, true)
|
||||
testDeliveryFromNodes(t, 2, dataChunkCount, false)
|
||||
testDeliveryFromNodes(t, 4, dataChunkCount, true)
|
||||
testDeliveryFromNodes(t, 4, dataChunkCount, false)
|
||||
|
||||
if testutil.RaceEnabled {
|
||||
// Travis cannot handle more nodes with -race; would time out.
|
||||
return
|
||||
}
|
||||
|
||||
testDeliveryFromNodes(t, 8, dataChunkCount, true)
|
||||
testDeliveryFromNodes(t, 8, dataChunkCount, false)
|
||||
testDeliveryFromNodes(t, 16, dataChunkCount, true)
|
||||
testDeliveryFromNodes(t, 16, dataChunkCount, false)
|
||||
}
|
||||
|
||||
func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool) {
|
||||
t.Helper()
|
||||
t.Run(fmt.Sprintf("testDeliveryFromNodes_%d_%d_skipCheck_%t", nodes, chunkCount, skipCheck), func(t *testing.T) {
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||
addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||
SkipCheck: skipCheck,
|
||||
Syncing: SyncingDisabled,
|
||||
}, nil)
|
||||
bucket.Store(bucketKeyRegistry, r)
|
||||
|
||||
cleanup = func() {
|
||||
r.Close()
|
||||
clean()
|
||||
}
|
||||
|
||||
return r, cleanup, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
log.Info("Adding nodes to simulation")
|
||||
_, err := sim.AddNodesAndConnectChain(nodes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Info("Starting simulation")
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
|
||||
nodeIDs := sim.UpNodeIDs()
|
||||
//determine the pivot node to be the first node of the simulation
|
||||
pivot := nodeIDs[0]
|
||||
|
||||
//distribute chunks of a random file into Stores of nodes 1 to nodes
|
||||
//we will do this by creating a file store with an underlying round-robin store:
|
||||
//the file store will create a hash for the uploaded file, but every chunk will be
|
||||
//distributed to different nodes via round-robin scheduling
|
||||
log.Debug("Writing file to round-robin file store")
|
||||
//to do this, we create an array for chunkstores (length minus one, the pivot node)
|
||||
stores := make([]storage.ChunkStore, len(nodeIDs)-1)
|
||||
//we then need to get all stores from the sim....
|
||||
lStores := sim.NodesItems(bucketKeyStore)
|
||||
i := 0
|
||||
//...iterate the buckets...
|
||||
for id, bucketVal := range lStores {
|
||||
//...and remove the one which is the pivot node
|
||||
if id == pivot {
|
||||
continue
|
||||
}
|
||||
//the other ones are added to the array...
|
||||
stores[i] = bucketVal.(storage.ChunkStore)
|
||||
i++
|
||||
}
|
||||
//...which then gets passed to the round-robin file store
|
||||
roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams(), chunk.NewTags())
|
||||
//now we can actually upload a (random) file to the round-robin store
|
||||
size := chunkCount * chunkSize
|
||||
log.Debug("Storing data to file store")
|
||||
fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
|
||||
// wait until all chunks stored
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//get the pivot node's filestore
|
||||
item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("No filestore")
|
||||
}
|
||||
pivotFileStore := item.(*storage.FileStore)
|
||||
log.Debug("Starting retrieval routine")
|
||||
retErrC := make(chan error)
|
||||
go func() {
|
||||
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
|
||||
// we must wait for the peer connections to have started before requesting
|
||||
n, err := readAll(pivotFileStore, fileHash)
|
||||
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
|
||||
retErrC <- err
|
||||
}()
|
||||
|
||||
disconnected := watchDisconnections(ctx, sim)
|
||||
defer func() {
|
||||
if err != nil && disconnected.bool() {
|
||||
err = errors.New("disconnect events received")
|
||||
}
|
||||
}()
|
||||
|
||||
//finally check that the pivot node gets all chunks via the root hash
|
||||
log.Debug("Check retrieval")
|
||||
success := true
|
||||
var total int64
|
||||
total, err = readAll(pivotFileStore, fileHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
|
||||
if err != nil || total != int64(size) {
|
||||
success = false
|
||||
}
|
||||
|
||||
if !success {
|
||||
return fmt.Errorf("Test failed, chunks not available on all nodes")
|
||||
}
|
||||
if err := <-retErrC; err != nil {
|
||||
return fmt.Errorf("requesting chunks: %v", err)
|
||||
}
|
||||
log.Debug("Test terminated successfully")
|
||||
return nil
|
||||
})
|
||||
if result.Error != nil {
|
||||
t.Fatal(result.Error)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
|
||||
for chunks := 32; chunks <= 128; chunks *= 2 {
|
||||
for i := 2; i < 32; i *= 2 {
|
||||
b.Run(
|
||||
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
|
||||
func(b *testing.B) {
|
||||
benchmarkDeliveryFromNodes(b, i, chunks, true)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
|
||||
for chunks := 32; chunks <= 128; chunks *= 2 {
|
||||
for i := 2; i < 32; i *= 2 {
|
||||
b.Run(
|
||||
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
|
||||
func(b *testing.B) {
|
||||
benchmarkDeliveryFromNodes(b, i, chunks, false)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck bool) {
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||
addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||
SkipCheck: skipCheck,
|
||||
Syncing: SyncingDisabled,
|
||||
SyncUpdateDelay: 0,
|
||||
}, nil)
|
||||
bucket.Store(bucketKeyRegistry, r)
|
||||
|
||||
cleanup = func() {
|
||||
r.Close()
|
||||
clean()
|
||||
}
|
||||
|
||||
return r, cleanup, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
log.Info("Initializing test config")
|
||||
_, err := sim.AddNodesAndConnectChain(nodes)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
|
||||
nodeIDs := sim.UpNodeIDs()
|
||||
node := nodeIDs[len(nodeIDs)-1]
|
||||
|
||||
item, ok := sim.NodeItem(node, bucketKeyFileStore)
|
||||
if !ok {
|
||||
return errors.New("No filestore")
|
||||
}
|
||||
remoteFileStore := item.(*storage.FileStore)
|
||||
|
||||
pivotNode := nodeIDs[0]
|
||||
item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
|
||||
if !ok {
|
||||
return errors.New("No filestore")
|
||||
}
|
||||
netStore := item.(*storage.NetStore)
|
||||
|
||||
if _, err := sim.WaitTillHealthy(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disconnected := watchDisconnections(ctx, sim)
|
||||
defer func() {
|
||||
if err != nil && disconnected.bool() {
|
||||
err = errors.New("disconnect events received")
|
||||
}
|
||||
}()
|
||||
// benchmark loop
|
||||
b.ResetTimer()
|
||||
b.StopTimer()
|
||||
Loop:
|
||||
for i := 0; i < b.N; i++ {
|
||||
// uploading chunkCount random chunks to the last node
|
||||
hashes := make([]storage.Address, chunkCount)
|
||||
for i := 0; i < chunkCount; i++ {
|
||||
// create actual size real chunks
|
||||
ctx := context.TODO()
|
||||
hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("store: %v", err)
|
||||
}
|
||||
// wait until all chunks stored
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("wait store: %v", err)
|
||||
}
|
||||
// collect the hashes
|
||||
hashes[i] = hash
|
||||
}
|
||||
// now benchmark the actual retrieval
|
||||
// netstore.Get is called for each hash in a go routine and errors are collected
|
||||
b.StartTimer()
|
||||
errs := make(chan error)
|
||||
for _, hash := range hashes {
|
||||
go func(h storage.Address) {
|
||||
_, err := netStore.Get(ctx, chunk.ModeGetRequest, h)
|
||||
log.Warn("test check netstore get", "hash", h, "err", err)
|
||||
errs <- err
|
||||
}(hash)
|
||||
}
|
||||
// count and report retrieval errors
|
||||
// if there are misses then chunk timeout is too low for the distance and volume (?)
|
||||
var total, misses int
|
||||
for err := range errs {
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
misses++
|
||||
}
|
||||
total++
|
||||
if total == chunkCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
|
||||
if misses > 0 {
|
||||
err = fmt.Errorf("%v chunk not found out of %v", misses, total)
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
if result.Error != nil {
|
||||
b.Fatal(result.Error)
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -30,7 +30,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethersphere/swarm/network/simulation"
|
||||
"github.com/ethersphere/swarm/network/timeouts"
|
||||
"github.com/ethersphere/swarm/state"
|
||||
"github.com/ethersphere/swarm/storage"
|
||||
"github.com/ethersphere/swarm/testutil"
|
||||
@@ -76,7 +75,7 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
||||
return newTestExternalClient(netStore), nil
|
||||
})
|
||||
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
|
||||
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys), nil
|
||||
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
|
||||
})
|
||||
|
||||
cleanup := func() {
|
||||
@@ -299,42 +298,38 @@ func newTestExternalClient(netStore *storage.NetStore) *testExternalClient {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testExternalClient) NeedData(ctx context.Context, key []byte) (bool, func(context.Context) error) {
|
||||
fi, loaded, ok := c.netStore.GetOrCreateFetcher(ctx, key, "syncer")
|
||||
if !ok {
|
||||
return loaded, nil
|
||||
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
|
||||
wait := c.netStore.FetchFunc(ctx, storage.Address(hash))
|
||||
if wait == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case c.hashes <- key:
|
||||
case c.hashes <- hash:
|
||||
case <-ctx.Done():
|
||||
log.Warn("testExternalClient NeedData context", "err", ctx.Err())
|
||||
return false, func(_ context.Context) error {
|
||||
return func(_ context.Context) error {
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
return loaded, func(ctx context.Context) error {
|
||||
select {
|
||||
case <-fi.Delivered:
|
||||
case <-time.After(timeouts.SyncerClientWaitTimeout):
|
||||
return fmt.Errorf("chunk not delivered through syncing after %dsec. ref=%s", timeouts.SyncerClientWaitTimeout, fmt.Sprintf("%x", key))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return wait
|
||||
}
|
||||
|
||||
func (c *testExternalClient) Close() {}
|
||||
|
||||
type testExternalServer struct {
|
||||
t string
|
||||
keyFunc func(key []byte, index uint64)
|
||||
sessionAt uint64
|
||||
maxKeys uint64
|
||||
}
|
||||
|
||||
func newTestExternalServer(t string, sessionAt, maxKeys uint64) *testExternalServer {
|
||||
func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer {
|
||||
if keyFunc == nil {
|
||||
keyFunc = binary.BigEndian.PutUint64
|
||||
}
|
||||
return &testExternalServer{
|
||||
t: t,
|
||||
keyFunc: keyFunc,
|
||||
sessionAt: sessionAt,
|
||||
maxKeys: maxKeys,
|
||||
}
|
||||
@@ -350,7 +345,7 @@ func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint6
|
||||
}
|
||||
b := make([]byte, HashSize*(to-from+1))
|
||||
for i := from; i <= to; i++ {
|
||||
binary.BigEndian.PutUint64(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
|
||||
s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
|
||||
}
|
||||
return b, from, to, nil
|
||||
}
|
||||
|
@@ -225,14 +225,9 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
|
||||
for i := 0; i < lenHashes; i += HashSize {
|
||||
hash := hashes[i : i+HashSize]
|
||||
|
||||
log.Trace("checking offered hash", "ref", fmt.Sprintf("%x", hash))
|
||||
|
||||
if _, wait := c.NeedData(ctx, hash); wait != nil {
|
||||
if wait := c.NeedData(ctx, hash); wait != nil {
|
||||
ctr++
|
||||
|
||||
// set the bit, so create a request
|
||||
want.Set(i/HashSize, true)
|
||||
log.Trace("need data", "ref", fmt.Sprintf("%x", hash), "request", true)
|
||||
|
||||
// measure how long it takes before we mark chunks for retrieval, and actually send the request
|
||||
if !wantDelaySet {
|
||||
|
@@ -63,8 +63,8 @@ const (
|
||||
|
||||
// Tests in this file should not request chunks from peers.
|
||||
// This function will panic indicating that there is a problem if request has been made.
|
||||
func dummyRequestFromPeers(_ context.Context, req *storage.Request, _ enode.ID) (*enode.ID, error) {
|
||||
panic(fmt.Sprintf("unexpected request: address %s", req.Addr.String()))
|
||||
func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
|
||||
panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
|
||||
}
|
||||
|
||||
//This test is a syncing test for nodes.
|
||||
|
@@ -543,7 +543,7 @@ func (c *client) NextInterval() (start, end uint64, err error) {
|
||||
|
||||
// Client interface for incoming peer Streamer
|
||||
type Client interface {
|
||||
NeedData(context.Context, []byte) (bool, func(context.Context) error)
|
||||
NeedData(context.Context, []byte) func(context.Context) error
|
||||
Close()
|
||||
}
|
||||
|
||||
|
@@ -93,20 +93,20 @@ func newTestClient(t string) *testClient {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *testClient) NeedData(ctx context.Context, hash []byte) (bool, func(context.Context) error) {
|
||||
func (self *testClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
|
||||
self.receivedHashes[string(hash)] = hash
|
||||
if bytes.Equal(hash, hash0[:]) {
|
||||
return false, func(context.Context) error {
|
||||
return func(context.Context) error {
|
||||
<-self.wait0
|
||||
return nil
|
||||
}
|
||||
} else if bytes.Equal(hash, hash2[:]) {
|
||||
return false, func(context.Context) error {
|
||||
return func(context.Context) error {
|
||||
<-self.wait2
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *testClient) Close() {}
|
||||
|
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethersphere/swarm/chunk"
|
||||
"github.com/ethersphere/swarm/log"
|
||||
"github.com/ethersphere/swarm/network/timeouts"
|
||||
"github.com/ethersphere/swarm/storage"
|
||||
)
|
||||
|
||||
@@ -74,7 +73,7 @@ func (s *SwarmSyncerServer) Close() {
|
||||
|
||||
// GetData retrieves the actual chunk from netstore
|
||||
func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
|
||||
ch, err := s.netStore.Store.Get(ctx, chunk.ModeGetSync, storage.Address(key))
|
||||
ch, err := s.netStore.Get(ctx, chunk.ModeGetSync, storage.Address(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -199,24 +198,9 @@ func RegisterSwarmSyncerClient(streamer *Registry, netStore *storage.NetStore) {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (loaded bool, wait func(context.Context) error) {
|
||||
start := time.Now()
|
||||
|
||||
fi, loaded, ok := s.netStore.GetOrCreateFetcher(ctx, key, "syncer")
|
||||
if !ok {
|
||||
return loaded, nil
|
||||
}
|
||||
|
||||
return loaded, func(ctx context.Context) error {
|
||||
select {
|
||||
case <-fi.Delivered:
|
||||
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("fetcher.%s.syncer", fi.CreatedBy), nil).UpdateSince(start)
|
||||
case <-time.After(timeouts.SyncerClientWaitTimeout):
|
||||
metrics.GetOrRegisterCounter("fetcher.syncer.timeout", nil).Inc(1)
|
||||
return fmt.Errorf("chunk not delivered through syncing after %dsec. ref=%s", timeouts.SyncerClientWaitTimeout, fmt.Sprintf("%x", key))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// NeedData
|
||||
func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) {
|
||||
return s.netStore.FetchFunc(ctx, key)
|
||||
}
|
||||
|
||||
func (s *SwarmSyncerClient) Close() {}
|
||||
|
@@ -1,24 +0,0 @@
|
||||
package timeouts
|
||||
|
||||
import "time"
|
||||
|
||||
// FailedPeerSkipDelay is the time we consider a peer to be skipped for a particular request/chunk,
|
||||
// because this peer failed to deliver it during the SearchTimeout interval
|
||||
var FailedPeerSkipDelay = 20 * time.Second
|
||||
|
||||
// FetcherGlobalTimeout is the max time a node tries to find a chunk for a client, after which it returns a 404
|
||||
// Basically this is the amount of time a singleflight request for a given chunk lives
|
||||
var FetcherGlobalTimeout = 10 * time.Second
|
||||
|
||||
// SearchTimeout is the max time requests wait for a peer to deliver a chunk, after which another peer is tried
|
||||
var SearchTimeout = 500 * time.Millisecond
|
||||
|
||||
// SyncerClientWaitTimeout is the max time a syncer client waits for a chunk to be delivered during syncing
|
||||
var SyncerClientWaitTimeout = 20 * time.Second
|
||||
|
||||
// Within handleOfferedHashesMsg - how long to wait for a given batch of chunks to be delivered by the peer offering them
|
||||
var SyncBatchTimeout = 10 * time.Second
|
||||
|
||||
// Within SwarmSyncerServer - If at least one chunk is added to the batch and no new chunks
|
||||
// are added in BatchTimeout period, the batch will be returned.
|
||||
var BatchTimeout = 2 * time.Second
|
Reference in New Issue
Block a user