Merge netsim mig to master (#17241)

* swarm: merged stream-tests migration to develop

* swarm/network: expose simulation RandomUpNode to use in stream tests

* swarm/network: wait for subs in PeerEvents and fix stream.runSyncTest

* swarm: enforce waitkademlia for snapshot tests

* swarm: fixed syncer tests and snapshot_sync_test

* swarm: linting of simulation package

* swarm: address review comments

* swarm/network/stream: fix delivery_test bugs and refactor

* swarm/network/stream: addressed PR comments @janos

* swarm/network/stream: enforce waitKademlia, improve TestIntervals

* swarm/network/stream: TestIntervals not waiting for chunk to be stored
This commit is contained in:
holisticode
2018-07-30 15:55:25 -05:00
committed by Balint Gabor
parent 3ea8ac6a9a
commit d6efa69187
14 changed files with 1411 additions and 2479 deletions

View File

@ -43,7 +43,7 @@ func (s *Simulation) SetNodeItem(id discover.NodeID, key interface{}, value inte
s.buckets[id].Store(key, value) s.buckets[id].Store(key, value)
} }
// NodeItems returns a map of items from all nodes that are all set under the // NodesItems returns a map of items from all nodes that are all set under the
// same BucketKey. // same BucketKey.
func (s *Simulation) NodesItems(key interface{}) (values map[discover.NodeID]interface{}) { func (s *Simulation) NodesItems(key interface{}) (values map[discover.NodeID]interface{}) {
s.mu.RLock() s.mu.RLock()

View File

@ -54,7 +54,7 @@ func (s *Simulation) ConnectToLastNode(id discover.NodeID) (err error) {
// ConnectToRandomNode connects the node with provieded NodeID // ConnectToRandomNode connects the node with provieded NodeID
// to a random node that is up. // to a random node that is up.
func (s *Simulation) ConnectToRandomNode(id discover.NodeID) (err error) { func (s *Simulation) ConnectToRandomNode(id discover.NodeID) (err error) {
n := s.randomUpNode(id) n := s.RandomUpNode(id)
if n == nil { if n == nil {
return ErrNodeNotFound return ErrNodeNotFound
} }
@ -135,7 +135,7 @@ func (s *Simulation) ConnectNodesStar(id discover.NodeID, ids []discover.NodeID)
return nil return nil
} }
// ConnectNodesStar connects all nodes in a star topology // ConnectNodesStarPivot connects all nodes in a star topology
// with the center at already set pivot node. // with the center at already set pivot node.
// If ids argument is nil, all nodes that are up will be connected. // If ids argument is nil, all nodes that are up will be connected.
func (s *Simulation) ConnectNodesStarPivot(ids []discover.NodeID) (err error) { func (s *Simulation) ConnectNodesStarPivot(ids []discover.NodeID) (err error) {

View File

@ -18,6 +18,7 @@ package simulation
import ( import (
"context" "context"
"sync"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
@ -71,24 +72,32 @@ func (f *PeerEventsFilter) MsgCode(c uint64) *PeerEventsFilter {
func (s *Simulation) PeerEvents(ctx context.Context, ids []discover.NodeID, filters ...*PeerEventsFilter) <-chan PeerEvent { func (s *Simulation) PeerEvents(ctx context.Context, ids []discover.NodeID, filters ...*PeerEventsFilter) <-chan PeerEvent {
eventC := make(chan PeerEvent) eventC := make(chan PeerEvent)
// wait group to make sure all subscriptions to admin peerEvents are established
// before this function returns.
var subsWG sync.WaitGroup
for _, id := range ids { for _, id := range ids {
s.shutdownWG.Add(1) s.shutdownWG.Add(1)
subsWG.Add(1)
go func(id discover.NodeID) { go func(id discover.NodeID) {
defer s.shutdownWG.Done() defer s.shutdownWG.Done()
client, err := s.Net.GetNode(id).Client() client, err := s.Net.GetNode(id).Client()
if err != nil { if err != nil {
subsWG.Done()
eventC <- PeerEvent{NodeID: id, Error: err} eventC <- PeerEvent{NodeID: id, Error: err}
return return
} }
events := make(chan *p2p.PeerEvent) events := make(chan *p2p.PeerEvent)
sub, err := client.Subscribe(ctx, "admin", events, "peerEvents") sub, err := client.Subscribe(ctx, "admin", events, "peerEvents")
if err != nil { if err != nil {
subsWG.Done()
eventC <- PeerEvent{NodeID: id, Error: err} eventC <- PeerEvent{NodeID: id, Error: err}
return return
} }
defer sub.Unsubscribe() defer sub.Unsubscribe()
subsWG.Done()
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -153,5 +162,7 @@ func (s *Simulation) PeerEvents(ctx context.Context, ids []discover.NodeID, filt
}(id) }(id)
} }
// wait all subscriptions
subsWG.Wait()
return eventC return eventC
} }

View File

@ -29,7 +29,7 @@ var (
DefaultHTTPSimAddr = ":8888" DefaultHTTPSimAddr = ":8888"
) )
//`With`(builder) pattern constructor for Simulation to //WithServer implements the builder pattern constructor for Simulation to
//start with a HTTP server //start with a HTTP server
func (s *Simulation) WithServer(addr string) *Simulation { func (s *Simulation) WithServer(addr string) *Simulation {
//assign default addr if nothing provided //assign default addr if nothing provided
@ -46,7 +46,12 @@ func (s *Simulation) WithServer(addr string) *Simulation {
Addr: addr, Addr: addr,
Handler: s.handler, Handler: s.handler,
} }
go s.httpSrv.ListenAndServe() go func() {
err := s.httpSrv.ListenAndServe()
if err != nil {
log.Error("Error starting the HTTP server", "error", err)
}
}()
return s return s
} }
@ -55,7 +60,7 @@ func (s *Simulation) addSimulationRoutes() {
s.handler.POST("/runsim", s.RunSimulation) s.handler.POST("/runsim", s.RunSimulation)
} }
// StartNetwork starts all nodes in the network // RunSimulation is the actual POST endpoint runner
func (s *Simulation) RunSimulation(w http.ResponseWriter, req *http.Request) { func (s *Simulation) RunSimulation(w http.ResponseWriter, req *http.Request) {
log.Debug("RunSimulation endpoint running") log.Debug("RunSimulation endpoint running")
s.runC <- struct{}{} s.runC <- struct{}{}

View File

@ -96,7 +96,12 @@ func sendRunSignal(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Request failed: %v", err) t.Fatalf("Request failed: %v", err)
} }
defer resp.Body.Close() defer func() {
err := resp.Body.Close()
if err != nil {
log.Error("Error closing response body", "err", err)
}
}()
log.Debug("Signal sent") log.Debug("Signal sent")
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status) t.Fatalf("err %s", resp.Status)

View File

@ -195,7 +195,7 @@ func (s *Simulation) AddNodesAndConnectStar(count int, opts ...AddNodeOption) (i
return ids, nil return ids, nil
} }
//Upload a snapshot //UploadSnapshot uploads a snapshot to the simulation
//This method tries to open the json file provided, applies the config to all nodes //This method tries to open the json file provided, applies the config to all nodes
//and then loads the snapshot into the Simulation network //and then loads the snapshot into the Simulation network
func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption) error { func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption) error {
@ -203,7 +203,12 @@ func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption)
if err != nil { if err != nil {
return err return err
} }
defer f.Close() defer func() {
err := f.Close()
if err != nil {
log.Error("Error closing snapshot file", "err", err)
}
}()
jsonbyte, err := ioutil.ReadAll(f) jsonbyte, err := ioutil.ReadAll(f)
if err != nil { if err != nil {
return err return err
@ -294,7 +299,7 @@ func (s *Simulation) StopNode(id discover.NodeID) (err error) {
// StopRandomNode stops a random node. // StopRandomNode stops a random node.
func (s *Simulation) StopRandomNode() (id discover.NodeID, err error) { func (s *Simulation) StopRandomNode() (id discover.NodeID, err error) {
n := s.randomUpNode() n := s.RandomUpNode()
if n == nil { if n == nil {
return id, ErrNodeNotFound return id, ErrNodeNotFound
} }
@ -324,18 +329,18 @@ func init() {
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
} }
// randomUpNode returns a random SimNode that is up. // RandomUpNode returns a random SimNode that is up.
// Arguments are NodeIDs for nodes that should not be returned. // Arguments are NodeIDs for nodes that should not be returned.
func (s *Simulation) randomUpNode(exclude ...discover.NodeID) *adapters.SimNode { func (s *Simulation) RandomUpNode(exclude ...discover.NodeID) *adapters.SimNode {
return s.randomNode(s.UpNodeIDs(), exclude...) return s.randomNode(s.UpNodeIDs(), exclude...)
} }
// randomUpNode returns a random SimNode that is not up. // randomDownNode returns a random SimNode that is not up.
func (s *Simulation) randomDownNode(exclude ...discover.NodeID) *adapters.SimNode { func (s *Simulation) randomDownNode(exclude ...discover.NodeID) *adapters.SimNode {
return s.randomNode(s.DownNodeIDs(), exclude...) return s.randomNode(s.DownNodeIDs(), exclude...)
} }
// randomUpNode returns a random SimNode from the slice of NodeIDs. // randomNode returns a random SimNode from the slice of NodeIDs.
func (s *Simulation) randomNode(ids []discover.NodeID, exclude ...discover.NodeID) *adapters.SimNode { func (s *Simulation) randomNode(ids []discover.NodeID, exclude ...discover.NodeID) *adapters.SimNode {
for _, e := range exclude { for _, e := range exclude {
var i int var i int

View File

@ -39,7 +39,7 @@ func (s *Simulation) Service(name string, id discover.NodeID) node.Service {
// RandomService returns a single Service by name on a // RandomService returns a single Service by name on a
// randomly chosen node that is up. // randomly chosen node that is up.
func (s *Simulation) RandomService(name string) node.Service { func (s *Simulation) RandomService(name string) node.Service {
n := s.randomUpNode() n := s.RandomUpNode()
if n == nil { if n == nil {
return nil return nil
} }

View File

@ -18,135 +18,70 @@ package stream
import ( import (
"context" "context"
"encoding/binary" crand "crypto/rand"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand"
"os" "os"
"strings"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time" "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing" p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/mock" mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
"github.com/ethereum/go-ethereum/swarm/storage/mock/db"
colorable "github.com/mattn/go-colorable" colorable "github.com/mattn/go-colorable"
) )
var ( var (
deliveries map[discover.NodeID]*Delivery
stores map[discover.NodeID]storage.ChunkStore
toAddr func(discover.NodeID) *network.BzzAddr
peerCount func(discover.NodeID) int
adapter = flag.String("adapter", "sim", "type of simulation: sim|exec|docker")
loglevel = flag.Int("loglevel", 2, "verbosity of logs") loglevel = flag.Int("loglevel", 2, "verbosity of logs")
nodes = flag.Int("nodes", 0, "number of nodes") nodes = flag.Int("nodes", 0, "number of nodes")
chunks = flag.Int("chunks", 0, "number of chunks") chunks = flag.Int("chunks", 0, "number of chunks")
useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)") useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
) longrunning = flag.Bool("longrunning", false, "do run long-running tests")
bucketKeyDB = simulation.BucketKey("db")
bucketKeyStore = simulation.BucketKey("store")
bucketKeyFileStore = simulation.BucketKey("filestore")
bucketKeyNetStore = simulation.BucketKey("netstore")
bucketKeyDelivery = simulation.BucketKey("delivery")
bucketKeyRegistry = simulation.BucketKey("registry")
var (
defaultSkipCheck bool
waitPeerErrC chan error
chunkSize = 4096 chunkSize = 4096
registries map[discover.NodeID]*TestRegistry pof = pot.DefaultPof(256)
createStoreFunc func(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error)
getRetrieveFunc = defaultRetrieveFunc
subscriptionCount = 0
globalStore mock.GlobalStorer
globalStoreDir string
) )
var services = adapters.Services{
"streamer": NewStreamerService,
"intervalsStreamer": newIntervalsStreamerService,
}
func init() { func init() {
flag.Parse() flag.Parse()
// register the Delivery service which will run as a devp2p rand.Seed(time.Now().UnixNano())
// protocol when using the exec adapter
adapters.RegisterServices(services)
log.PrintOrigins(true) log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
} }
func createGlobalStore() { func createGlobalStore() (string, *mockdb.GlobalStore, error) {
var err error var globalStore *mockdb.GlobalStore
globalStoreDir, err = ioutil.TempDir("", "global.store") globalStoreDir, err := ioutil.TempDir("", "global.store")
if err != nil { if err != nil {
log.Error("Error initiating global store temp directory!", "err", err) log.Error("Error initiating global store temp directory!", "err", err)
return return "", nil, err
} }
globalStore, err = db.NewGlobalStore(globalStoreDir) globalStore, err = mockdb.NewGlobalStore(globalStoreDir)
if err != nil { if err != nil {
log.Error("Error initiating global store!", "err", err) log.Error("Error initiating global store!", "err", err)
return "", nil, err
} }
} return globalStoreDir, globalStore, nil
// NewStreamerService
func NewStreamerService(ctx *adapters.ServiceContext) (node.Service, error) {
var err error
id := ctx.Config.ID
addr := toAddr(id)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
stores[id], err = createStoreFunc(id, addr)
if err != nil {
return nil, err
}
store := stores[id].(*storage.LocalStore)
db := storage.NewDBAPI(store)
delivery := NewDelivery(kad, db)
deliveries[id] = delivery
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
SkipCheck: defaultSkipCheck,
DoRetrieve: false,
})
RegisterSwarmSyncerServer(r, db)
RegisterSwarmSyncerClient(r, db)
go func() {
waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id))
}()
fileStore := storage.NewFileStore(storage.NewNetStore(store, getRetrieveFunc(id)), storage.NewFileStoreParams())
testRegistry := &TestRegistry{Registry: r, fileStore: fileStore}
registries[id] = testRegistry
return testRegistry, nil
}
func defaultRetrieveFunc(id discover.NodeID) func(ctx context.Context, chunk *storage.Chunk) error {
return nil
}
func datadirsCleanup() {
for _, id := range ids {
os.RemoveAll(datadirs[id])
}
if globalStoreDir != "" {
os.RemoveAll(globalStoreDir)
}
}
//local stores need to be cleaned up after the sim is done
func localStoreCleanup() {
log.Info("Cleaning up...")
for _, id := range ids {
registries[id].Close()
stores[id].Close()
}
log.Info("Local store cleanup done")
} }
func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) { func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
@ -174,9 +109,7 @@ func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *stora
db := storage.NewDBAPI(localStore) db := storage.NewDBAPI(localStore)
delivery := NewDelivery(to, db) delivery := NewDelivery(to, db)
streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil)
SkipCheck: defaultSkipCheck,
})
teardown := func() { teardown := func() {
streamer.Close() streamer.Close()
removeDataDir() removeDataDir()
@ -233,22 +166,6 @@ func (rrs *roundRobinStore) Close() {
} }
} }
type TestRegistry struct {
*Registry
fileStore *storage.FileStore
}
func (r *TestRegistry) APIs() []rpc.API {
a := r.Registry.APIs()
a = append(a, rpc.API{
Namespace: "stream",
Version: "3.0",
Service: r,
Public: true,
})
return a
}
func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) { func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
r, _ := fileStore.Retrieve(context.TODO(), hash) r, _ := fileStore.Retrieve(context.TODO(), hash)
buf := make([]byte, 1024) buf := make([]byte, 1024)
@ -265,185 +182,74 @@ func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
return total, nil return total, nil
} }
func (r *TestRegistry) ReadAll(hash common.Hash) (int64, error) { func uploadFilesToNodes(sim *simulation.Simulation) ([]storage.Address, []string, error) {
return readAll(r.fileStore, hash[:]) nodes := sim.UpNodeIDs()
} nodeCnt := len(nodes)
log.Debug(fmt.Sprintf("Uploading %d files to nodes", nodeCnt))
//array holding generated files
rfiles := make([]string, nodeCnt)
//array holding the root hashes of the files
rootAddrs := make([]storage.Address, nodeCnt)
func (r *TestRegistry) Start(server *p2p.Server) error { var err error
return r.Registry.Start(server) //for every node, generate a file and upload
} for i, id := range nodes {
item, ok := sim.NodeItem(id, bucketKeyFileStore)
func (r *TestRegistry) Stop() error { if !ok {
return r.Registry.Stop() return nil, nil, fmt.Errorf("Error accessing localstore")
} }
fileStore := item.(*storage.FileStore)
type TestExternalRegistry struct { //generate a file
*Registry rfiles[i], err = generateRandomFile()
}
func (r *TestExternalRegistry) APIs() []rpc.API {
a := r.Registry.APIs()
a = append(a, rpc.API{
Namespace: "stream",
Version: "3.0",
Service: r,
Public: true,
})
return a
}
func (r *TestExternalRegistry) GetHashes(ctx context.Context, peerId discover.NodeID, s Stream) (*rpc.Subscription, error) {
peer := r.getPeer(peerId)
client, err := peer.getClient(ctx, s)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
//store it (upload it) on the FileStore
c := client.Client.(*testExternalClient) ctx := context.TODO()
rk, wait, err := fileStore.Store(ctx, strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false)
notifier, supported := rpc.NotifierFromContext(ctx) log.Debug("Uploaded random string file to node")
if !supported {
return nil, fmt.Errorf("Subscribe not supported")
}
sub := notifier.CreateSubscription()
go func() {
// if we begin sending event immediately some events
// will probably be dropped since the subscription ID might not be send to
// the client.
// ref: rpc/subscription_test.go#L65
time.Sleep(1 * time.Second)
for {
select {
case h := <-c.hashes:
<-c.enableNotificationsC // wait for notification subscription to complete
if err := notifier.Notify(sub.ID, h); err != nil {
log.Warn(fmt.Sprintf("rpc sub notifier notify stream %s: %v", s, err))
}
case err := <-sub.Err():
if err != nil { if err != nil {
log.Warn(fmt.Sprintf("caught subscription error in stream %s: %v", s, err)) return nil, nil, err
} }
case <-notifier.Closed(): err = wait(ctx)
log.Trace(fmt.Sprintf("rpc sub notifier closed"))
return
}
}
}()
return sub, nil
}
func (r *TestExternalRegistry) EnableNotifications(peerId discover.NodeID, s Stream) error {
peer := r.getPeer(peerId)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
client, err := peer.getClient(ctx, s)
if err != nil { if err != nil {
return err return nil, nil, err
} }
rootAddrs[i] = rk
close(client.Client.(*testExternalClient).enableNotificationsC) }
return rootAddrs, rfiles, nil
return nil
} }
// TODO: merge functionalities of testExternalClient and testExternalServer //generate a random file (string)
// with testClient and testServer. func generateRandomFile() (string, error) {
//generate a random file size between minFileSize and maxFileSize
type testExternalClient struct { fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize
hashes chan []byte log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize))
db *storage.DBAPI b := make([]byte, fileSize*1024)
enableNotificationsC chan struct{} _, err := crand.Read(b)
if err != nil {
log.Error("Error generating random file.", "err", err)
return "", err
}
return string(b), nil
} }
func newTestExternalClient(db *storage.DBAPI) *testExternalClient { //create a local store for the given node
return &testExternalClient{ func createTestLocalStorageForID(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, string, error) {
hashes: make(chan []byte), var datadir string
db: db, var err error
enableNotificationsC: make(chan struct{}), datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString()))
if err != nil {
return nil, "", err
} }
} var store storage.ChunkStore
params := storage.NewDefaultLocalStoreParams()
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func() { params.ChunkDbPath = datadir
chunk, _ := c.db.GetOrCreateRequest(ctx, hash) params.BaseKey = addr.Over()
if chunk.ReqC == nil { store, err = storage.NewTestLocalStoreForAddr(params)
return nil if err != nil {
} os.RemoveAll(datadir)
c.hashes <- hash return nil, "", err
return func() { }
chunk.WaitToStore() return store, datadir, nil
}
}
func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
return nil
}
func (c *testExternalClient) Close() {}
const testExternalServerBatchSize = 10
type testExternalServer struct {
t string
keyFunc func(key []byte, index uint64)
sessionAt uint64
maxKeys uint64
streamer *TestExternalRegistry
}
func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer {
if keyFunc == nil {
keyFunc = binary.BigEndian.PutUint64
}
return &testExternalServer{
t: t,
keyFunc: keyFunc,
sessionAt: sessionAt,
maxKeys: maxKeys,
}
}
func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
if from == 0 && to == 0 {
from = s.sessionAt
to = s.sessionAt + testExternalServerBatchSize
}
if to-from > testExternalServerBatchSize {
to = from + testExternalServerBatchSize - 1
}
if from >= s.maxKeys && to > s.maxKeys {
return nil, 0, 0, nil, io.EOF
}
if to > s.maxKeys {
to = s.maxKeys
}
b := make([]byte, HashSize*(to-from+1))
for i := from; i <= to; i++ {
s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
}
return b, from, to, nil, nil
}
func (s *testExternalServer) GetData(context.Context, []byte) ([]byte, error) {
return make([]byte, 4096), nil
}
func (s *testExternalServer) Close() {}
// Sets the global value defaultSkipCheck.
// It should be used in test function defer to reset the global value
// to the original value.
//
// defer setDefaultSkipCheck(defaultSkipCheck)
// defaultSkipCheck = skipCheck
//
// This works as defer function arguments evaluations are evaluated as ususal,
// but only the function body invocation is deferred.
func setDefaultSkipCheck(skipCheck bool) {
defaultSkipCheck = skipCheck
} }

View File

@ -22,18 +22,19 @@ import (
crand "crypto/rand" crand "crypto/rand"
"fmt" "fmt"
"io" "io"
"os"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/simulations" "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing" p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network"
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" "github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
@ -308,159 +309,164 @@ func TestDeliveryFromNodes(t *testing.T) {
} }
func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) { func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
defaultSkipCheck = skipCheck sim := simulation.New(map[string]simulation.ServiceFunc{
toAddr = network.NewAddrFromNodeID "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
createStoreFunc = createTestLocalStorageFromSim
conf := &streamTesting.RunConfig{
Adapter: *adapter,
NodeCount: nodes,
ConnLevel: conns,
ToAddr: toAddr,
Services: services,
EnableMsgEvents: false,
}
sim, teardown, err := streamTesting.NewSimulation(conf) id := ctx.Config.ID
var rpcSubscriptionsWg sync.WaitGroup addr := network.NewAddrFromNodeID(id)
defer func() { store, datadir, err := createTestLocalStorageForID(id, addr)
rpcSubscriptionsWg.Wait()
teardown()
}()
if err != nil { if err != nil {
t.Fatal(err.Error()) return nil, nil, err
} }
stores = make(map[discover.NodeID]storage.ChunkStore) bucket.Store(bucketKeyStore, store)
for i, id := range sim.IDs { cleanup = func() {
stores[id] = sim.Stores[i] os.RemoveAll(datadir)
} store.Close()
registries = make(map[discover.NodeID]*TestRegistry)
deliveries = make(map[discover.NodeID]*Delivery)
peerCount = func(id discover.NodeID) int {
if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
return 1
}
return 2
} }
localStore := store.(*storage.LocalStore)
db := storage.NewDBAPI(localStore)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, db)
// here we distribute chunks of a random file into Stores of nodes 1 to nodes r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams()) SkipCheck: skipCheck,
size := chunkCount * chunkSize
ctx := context.TODO()
fileHash, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
// wait until all chunks stored
if err != nil {
t.Fatal(err.Error())
}
err = wait(ctx)
if err != nil {
t.Fatal(err.Error())
}
errc := make(chan error, 1)
waitPeerErrC = make(chan error)
quitC := make(chan struct{})
defer close(quitC)
action := func(ctx context.Context) error {
// each node Subscribes to each other's swarmChunkServerStreamName
// need to wait till an aynchronous process registers the peers in streamer.peers
// that is used by Subscribe
// using a global err channel to share betweem action and node service
i := 0
for err := range waitPeerErrC {
if err != nil {
return fmt.Errorf("error waiting for peers: %s", err)
}
i++
if i == nodes {
break
}
}
// each node subscribes to the upstream swarm chunk server stream
// which responds to chunk retrieve requests all but the last node in the chain does not
for j := 0; j < nodes-1; j++ {
id := sim.IDs[j]
err := sim.CallClient(id, func(client *rpc.Client) error {
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-doneC
rpcSubscriptionsWg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
sid := sim.IDs[j+1]
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
}) })
if err != nil { bucket.Store(bucketKeyRegistry, r)
return err
}
}
// create a retriever FileStore for the pivot node
delivery := deliveries[sim.IDs[0]]
retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error { retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck) return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
} }
netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc) netStore := storage.NewNetStore(localStore, retrieveFunc)
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
return r, cleanup, nil
},
})
defer sim.Close()
log.Info("Adding nodes to simulation")
_, err := sim.AddNodesAndConnectChain(nodes)
if err != nil {
t.Fatal(err)
}
log.Info("Starting simulation")
ctx := context.Background()
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
nodeIDs := sim.UpNodeIDs()
//determine the pivot node to be the first node of the simulation
sim.SetPivotNode(nodeIDs[0])
//distribute chunks of a random file into Stores of nodes 1 to nodes
//we will do this by creating a file store with an underlying round-robin store:
//the file store will create a hash for the uploaded file, but every chunk will be
//distributed to different nodes via round-robin scheduling
log.Debug("Writing file to round-robin file store")
//to do this, we create an array for chunkstores (length minus one, the pivot node)
stores := make([]storage.ChunkStore, len(nodeIDs)-1)
//we then need to get all stores from the sim....
lStores := sim.NodesItems(bucketKeyStore)
i := 0
//...iterate the buckets...
for id, bucketVal := range lStores {
//...and remove the one which is the pivot node
if id == *sim.PivotNodeID() {
continue
}
//the other ones are added to the array...
stores[i] = bucketVal.(storage.ChunkStore)
i++
}
//...which then gets passed to the round-robin file store
roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
//now we can actually upload a (random) file to the round-robin store
size := chunkCount * chunkSize
log.Debug("Storing data to file store")
fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
// wait until all chunks stored
if err != nil {
return err
}
err = wait(ctx)
if err != nil {
return err
}
//each of the nodes (except pivot node) subscribes to the stream of the next node
for j, node := range nodeIDs[0 : nodes-1] {
sid := nodeIDs[j+1]
item, ok := sim.NodeItem(node, bucketKeyRegistry)
if !ok {
return fmt.Errorf("No registry")
}
registry := item.(*Registry)
err = registry.Subscribe(sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
if err != nil {
return err
}
}
//get the pivot node's filestore
item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
if !ok {
return fmt.Errorf("No filestore")
}
pivotFileStore := item.(*storage.FileStore)
log.Debug("Starting retrieval routine")
go func() { go func() {
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks // start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
// we must wait for the peer connections to have started before requesting // we must wait for the peer connections to have started before requesting
n, err := readAll(fileStore, fileHash) n, err := readAll(pivotFileStore, fileHash)
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err) log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
if err != nil { if err != nil {
errc <- fmt.Errorf("requesting chunks action error: %v", err) t.Fatalf("requesting chunks action error: %v", err)
} }
}() }()
return nil
} log.Debug("Waiting for kademlia")
check := func(ctx context.Context, id discover.NodeID) (bool, error) { if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
select { return err
case err := <-errc:
return false, err
case <-ctx.Done():
return false, ctx.Err()
default:
}
var total int64
err := sim.CallClient(id, func(client *rpc.Client) error {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
return client.CallContext(ctx, &total, "stream_readAll", common.BytesToHash(fileHash))
})
log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
if err != nil || total != int64(size) {
return false, nil
}
return true, nil
} }
conf.Step = &simulations.Step{ log.Debug("Watching for disconnections")
Action: action, disconnections := sim.PeerEvents(
Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]), context.Background(),
// we are only testing the pivot node (net.Nodes[0]) sim.NodeIDs(),
Expect: &simulations.Expectation{ simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
Nodes: sim.IDs[0:1], )
Check: check,
}, go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
t.Fatal(d.Error)
} }
startedAt := time.Now() }
timeout := 300 * time.Second }()
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() //finally check that the pivot node gets all chunks via the root hash
result, err := sim.Run(ctx, conf) log.Debug("Check retrieval")
finishedAt := time.Now() success := true
var total int64
total, err = readAll(pivotFileStore, fileHash)
if err != nil { if err != nil {
t.Fatalf("Setting up simulation failed: %v", err) return err
} }
log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
if err != nil || total != int64(size) {
success = false
}
if !success {
return fmt.Errorf("Test failed, chunks not available on all nodes")
}
log.Debug("Test terminated successfully")
return nil
})
if result.Error != nil { if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error) t.Fatal(result.Error)
} }
streamTesting.CheckResult(t, result, startedAt, finishedAt)
} }
func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) { func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
@ -490,142 +496,90 @@ func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
} }
func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) { func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
defaultSkipCheck = skipCheck sim := simulation.New(map[string]simulation.ServiceFunc{
toAddr = network.NewAddrFromNodeID "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
createStoreFunc = createTestLocalStorageFromSim
registries = make(map[discover.NodeID]*TestRegistry)
timeout := 300 * time.Second id := ctx.Config.ID
ctx, cancel := context.WithTimeout(context.Background(), timeout) addr := network.NewAddrFromNodeID(id)
defer cancel() store, datadir, err := createTestLocalStorageForID(id, addr)
conf := &streamTesting.RunConfig{
Adapter: *adapter,
NodeCount: nodes,
ConnLevel: conns,
ToAddr: toAddr,
Services: services,
EnableMsgEvents: false,
}
sim, teardown, err := streamTesting.NewSimulation(conf)
var rpcSubscriptionsWg sync.WaitGroup
defer func() {
rpcSubscriptionsWg.Wait()
teardown()
}()
if err != nil { if err != nil {
b.Fatal(err.Error()) return nil, nil, err
} }
bucket.Store(bucketKeyStore, store)
cleanup = func() {
os.RemoveAll(datadir)
store.Close()
}
localStore := store.(*storage.LocalStore)
db := storage.NewDBAPI(localStore)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, db)
stores = make(map[discover.NodeID]storage.ChunkStore) r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
deliveries = make(map[discover.NodeID]*Delivery) SkipCheck: skipCheck,
for i, id := range sim.IDs { DoSync: true,
stores[id] = sim.Stores[i] SyncUpdateDelay: 0,
}
peerCount = func(id discover.NodeID) int {
if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
return 1
}
return 2
}
// wait channel for all nodes all peer connections to set up
waitPeerErrC = make(chan error)
// create a FileStore for the last node in the chain which we are gonna write to
remoteFileStore := storage.NewFileStore(sim.Stores[nodes-1], storage.NewFileStoreParams())
// channel to signal simulation initialisation with action call complete
// or node disconnections
disconnectC := make(chan error)
quitC := make(chan struct{})
initC := make(chan error)
action := func(ctx context.Context) error {
// each node Subscribes to each other's swarmChunkServerStreamName
// need to wait till an aynchronous process registers the peers in streamer.peers
// that is used by Subscribe
// waitPeerErrC using a global err channel to share betweem action and node service
i := 0
for err := range waitPeerErrC {
if err != nil {
return fmt.Errorf("error waiting for peers: %s", err)
}
i++
if i == nodes {
break
}
}
var err error
// each node except the last one subscribes to the upstream swarm chunk server stream
// which responds to chunk retrieve requests
for j := 0; j < nodes-1; j++ {
id := sim.IDs[j]
err = sim.CallClient(id, func(client *rpc.Client) error {
doneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-doneC
rpcSubscriptionsWg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
sid := sim.IDs[j+1] // the upstream peer's id
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
}) })
if err != nil {
break
}
}
initC <- err
return nil
}
// the check function is only triggered when the benchmark finishes
trigger := make(chan discover.NodeID)
check := func(ctx context.Context, id discover.NodeID) (_ bool, err error) {
return true, nil
}
conf.Step = &simulations.Step{
Action: action,
Trigger: trigger,
// we are only testing the pivot node (net.Nodes[0])
Expect: &simulations.Expectation{
Nodes: sim.IDs[0:1],
Check: check,
},
}
// run the simulation in the background
errc := make(chan error)
go func() {
_, err := sim.Run(ctx, conf)
close(quitC)
errc <- err
}()
// wait for simulation action to complete stream subscriptions
err = <-initC
if err != nil {
b.Fatalf("simulation failed to initialise. expected no error. got %v", err)
}
// create a retriever FileStore for the pivot node
// by now deliveries are set for each node by the streamer service
delivery := deliveries[sim.IDs[0]]
retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error { retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck) return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
} }
netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc) netStore := storage.NewNetStore(localStore, retrieveFunc)
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
return r, cleanup, nil
},
})
defer sim.Close()
log.Info("Initializing test config")
_, err := sim.AddNodesAndConnectChain(nodes)
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
nodeIDs := sim.UpNodeIDs()
node := nodeIDs[len(nodeIDs)-1]
item, ok := sim.NodeItem(node, bucketKeyFileStore)
if !ok {
b.Fatal("No filestore")
}
remoteFileStore := item.(*storage.FileStore)
pivotNode := nodeIDs[0]
item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
if !ok {
b.Fatal("No filestore")
}
netStore := item.(*storage.NetStore)
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
return err
}
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
)
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
b.Fatal(d.Error)
}
}
}()
// benchmark loop // benchmark loop
b.ResetTimer() b.ResetTimer()
b.StopTimer() b.StopTimer()
Loop: Loop:
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
// uploading chunkCount random chunks to the last node // uploading chunkCount random chunks to the last node
hashes := make([]storage.Address, chunkCount) hashes := make([]storage.Address, chunkCount)
@ -670,38 +624,18 @@ Loop:
} }
b.StopTimer() b.StopTimer()
select {
case err = <-disconnectC:
if err != nil {
break Loop
}
default:
}
if misses > 0 { if misses > 0 {
err = fmt.Errorf("%v chunk not found out of %v", misses, total) err = fmt.Errorf("%v chunk not found out of %v", misses, total)
break Loop break Loop
} }
} }
select {
case <-quitC:
case trigger <- sim.IDs[0]:
}
if err == nil {
err = <-errc
} else {
if e := <-errc; e != nil {
b.Errorf("sim.Run function error: %v", e)
}
}
// benchmark over, trigger the check function to conclude the simulation
if err != nil { if err != nil {
b.Fatalf("expected no error. got %v", err) b.Fatal(err)
}
return nil
})
if result.Error != nil {
b.Fatal(result.Error)
} }
}
func createTestLocalStorageFromSim(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
return stores[id], nil
} }

View File

@ -22,52 +22,22 @@ import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io" "io"
"os"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network"
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" "github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
var (
externalStreamName = "externalStream"
externalStreamSessionAt uint64 = 50
externalStreamMaxKeys uint64 = 100
)
func newIntervalsStreamerService(ctx *adapters.ServiceContext) (node.Service, error) {
id := ctx.Config.ID
addr := toAddr(id)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
store := stores[id].(*storage.LocalStore)
db := storage.NewDBAPI(store)
delivery := NewDelivery(kad, db)
deliveries[id] = delivery
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
SkipCheck: defaultSkipCheck,
})
r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
return newTestExternalClient(db), nil
})
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
})
go func() {
waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id))
}()
return &TestExternalRegistry{r}, nil
}
func TestIntervals(t *testing.T) { func TestIntervals(t *testing.T) {
testIntervals(t, true, nil, false) testIntervals(t, true, nil, false)
testIntervals(t, false, NewRange(9, 26), false) testIntervals(t, false, NewRange(9, 26), false)
@ -81,95 +51,112 @@ func TestIntervals(t *testing.T) {
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) { func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
nodes := 2 nodes := 2
chunkCount := dataChunkCount chunkCount := dataChunkCount
externalStreamName := "externalStream"
externalStreamSessionAt := uint64(50)
externalStreamMaxKeys := uint64(100)
defer setDefaultSkipCheck(defaultSkipCheck) sim := simulation.New(map[string]simulation.ServiceFunc{
defaultSkipCheck = skipCheck "intervalsStreamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
toAddr = network.NewAddrFromNodeID id := ctx.Config.ID
conf := &streamTesting.RunConfig{ addr := network.NewAddrFromNodeID(id)
Adapter: *adapter, store, datadir, err := createTestLocalStorageForID(id, addr)
NodeCount: nodes, if err != nil {
ConnLevel: 1, return nil, nil, err
ToAddr: toAddr,
Services: services,
DefaultService: "intervalsStreamer",
} }
bucket.Store(bucketKeyStore, store)
cleanup = func() {
store.Close()
os.RemoveAll(datadir)
}
localStore := store.(*storage.LocalStore)
db := storage.NewDBAPI(localStore)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, db)
sim, teardown, err := streamTesting.NewSimulation(conf) r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
var rpcSubscriptionsWg sync.WaitGroup SkipCheck: skipCheck,
defer func() { })
rpcSubscriptionsWg.Wait() bucket.Store(bucketKeyRegistry, r)
teardown()
}() r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
return newTestExternalClient(db), nil
})
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
})
fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
return r, cleanup, nil
},
})
defer sim.Close()
log.Info("Adding nodes to simulation")
_, err := sim.AddNodesAndConnectChain(nodes)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
stores = make(map[discover.NodeID]storage.ChunkStore) ctx, cancel := context.WithTimeout(context.Background(), 100*time.Second)
deliveries = make(map[discover.NodeID]*Delivery) defer cancel()
for i, id := range sim.IDs {
stores[id] = sim.Stores[i]
}
peerCount = func(id discover.NodeID) int { result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
return 1 nodeIDs := sim.UpNodeIDs()
} storer := nodeIDs[0]
checker := nodeIDs[1]
item, ok := sim.NodeItem(storer, bucketKeyFileStore)
if !ok {
return fmt.Errorf("No filestore")
}
fileStore := item.(*storage.FileStore)
fileStore := storage.NewFileStore(sim.Stores[0], storage.NewFileStoreParams())
size := chunkCount * chunkSize size := chunkCount * chunkSize
ctx := context.TODO()
_, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) _, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
if err != nil { if err != nil {
log.Error("Store error: %v", "err", err)
t.Fatal(err) t.Fatal(err)
} }
err = wait(ctx) err = wait(ctx)
if err != nil { if err != nil {
log.Error("Wait error: %v", "err", err)
t.Fatal(err) t.Fatal(err)
} }
errc := make(chan error, 1) item, ok = sim.NodeItem(checker, bucketKeyRegistry)
waitPeerErrC = make(chan error) if !ok {
quitC := make(chan struct{}) return fmt.Errorf("No registry")
defer close(quitC)
action := func(ctx context.Context) error {
i := 0
for err := range waitPeerErrC {
if err != nil {
return fmt.Errorf("error waiting for peers: %s", err)
}
i++
if i == nodes {
break
}
}
id := sim.IDs[1]
err := sim.CallClient(id, func(client *rpc.Client) error {
sid := sim.IDs[0]
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-doneC
rpcSubscriptionsWg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, 100*time.Second)
defer cancel()
err = client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(externalStreamName, "", live), history, Top)
if err != nil {
return err
} }
registry := item.(*Registry)
liveErrC := make(chan error) liveErrC := make(chan error)
historyErrC := make(chan error) historyErrC := make(chan error)
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
log.Error("WaitKademlia error: %v", "err", err)
return err
}
log.Debug("Watching for disconnections")
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
)
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
t.Fatal(d.Error)
}
}
}()
go func() { go func() {
if !live { if !live {
close(liveErrC) close(liveErrC)
@ -182,17 +169,16 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
}() }()
// live stream // live stream
liveHashesChan := make(chan []byte) var liveHashesChan chan []byte
liveSubscription, err := client.Subscribe(ctx, "stream", liveHashesChan, "getHashes", sid, NewStream(externalStreamName, "", true)) liveHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", true))
if err != nil { if err != nil {
log.Error("Subscription error: %v", "err", err)
return return
} }
defer liveSubscription.Unsubscribe()
i := externalStreamSessionAt i := externalStreamSessionAt
// we have subscribed, enable notifications // we have subscribed, enable notifications
err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", true)) err = enableNotifications(registry, storer, NewStream(externalStreamName, "", true))
if err != nil { if err != nil {
return return
} }
@ -209,8 +195,6 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
if i > externalStreamMaxKeys { if i > externalStreamMaxKeys {
return return
} }
case err = <-liveSubscription.Err():
return
case <-ctx.Done(): case <-ctx.Done():
return return
} }
@ -229,12 +213,11 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
}() }()
// history stream // history stream
historyHashesChan := make(chan []byte) var historyHashesChan chan []byte
historySubscription, err := client.Subscribe(ctx, "stream", historyHashesChan, "getHashes", sid, NewStream(externalStreamName, "", false)) historyHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", false))
if err != nil { if err != nil {
return return
} }
defer historySubscription.Unsubscribe()
var i uint64 var i uint64
historyTo := externalStreamMaxKeys historyTo := externalStreamMaxKeys
@ -246,7 +229,7 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
} }
// we have subscribed, enable notifications // we have subscribed, enable notifications
err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", false)) err = enableNotifications(registry, storer, NewStream(externalStreamName, "", false))
if err != nil { if err != nil {
return return
} }
@ -263,14 +246,16 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
if i > historyTo { if i > historyTo {
return return
} }
case err = <-historySubscription.Err():
return
case <-ctx.Done(): case <-ctx.Done():
return return
} }
} }
}() }()
err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top)
if err != nil {
return err
}
if err := <-liveErrC; err != nil { if err := <-liveErrC; err != nil {
return err return err
} }
@ -280,38 +265,123 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
return nil return nil
}) })
return err
if result.Error != nil {
t.Fatal(result.Error)
} }
check := func(ctx context.Context, id discover.NodeID) (bool, error) { }
select {
case err := <-errc: func getHashes(ctx context.Context, r *Registry, peerID discover.NodeID, s Stream) (chan []byte, error) {
return false, err peer := r.getPeer(peerID)
case <-ctx.Done():
return false, ctx.Err() client, err := peer.getClient(ctx, s)
default: if err != nil {
} return nil, err
return true, nil
} }
conf.Step = &simulations.Step{ c := client.Client.(*testExternalClient)
Action: action,
Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]), return c.hashes, nil
Expect: &simulations.Expectation{
Nodes: sim.IDs[1:1],
Check: check,
},
}
startedAt := time.Now()
timeout := 300 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
result, err := sim.Run(ctx, conf)
finishedAt := time.Now()
if err != nil {
t.Fatalf("Setting up simulation failed: %v", err)
}
if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error)
}
streamTesting.CheckResult(t, result, startedAt, finishedAt)
} }
func enableNotifications(r *Registry, peerID discover.NodeID, s Stream) error {
peer := r.getPeer(peerID)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
client, err := peer.getClient(ctx, s)
if err != nil {
return err
}
close(client.Client.(*testExternalClient).enableNotificationsC)
return nil
}
type testExternalClient struct {
hashes chan []byte
db *storage.DBAPI
enableNotificationsC chan struct{}
}
func newTestExternalClient(db *storage.DBAPI) *testExternalClient {
return &testExternalClient{
hashes: make(chan []byte),
db: db,
enableNotificationsC: make(chan struct{}),
}
}
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func() {
chunk, _ := c.db.GetOrCreateRequest(ctx, hash)
if chunk.ReqC == nil {
return nil
}
c.hashes <- hash
//NOTE: This was failing on go1.9.x with a deadlock.
//Sometimes this function would just block
//It is commented now, but it may be well worth after the chunk refactor
//to re-enable this and see if the problem has been addressed
/*
return func() {
return chunk.WaitToStore()
}
*/
return nil
}
func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
return nil
}
func (c *testExternalClient) Close() {}
const testExternalServerBatchSize = 10
type testExternalServer struct {
t string
keyFunc func(key []byte, index uint64)
sessionAt uint64
maxKeys uint64
}
func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer {
if keyFunc == nil {
keyFunc = binary.BigEndian.PutUint64
}
return &testExternalServer{
t: t,
keyFunc: keyFunc,
sessionAt: sessionAt,
maxKeys: maxKeys,
}
}
func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
if from == 0 && to == 0 {
from = s.sessionAt
to = s.sessionAt + testExternalServerBatchSize
}
if to-from > testExternalServerBatchSize {
to = from + testExternalServerBatchSize - 1
}
if from >= s.maxKeys && to > s.maxKeys {
return nil, 0, 0, nil, io.EOF
}
if to > s.maxKeys {
to = s.maxKeys
}
b := make([]byte, HashSize*(to-from+1))
for i := from; i <= to; i++ {
s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
}
return b, from, to, nil, nil
}
func (s *testExternalServer) GetData(context.Context, []byte) ([]byte, error) {
return make([]byte, 4096), nil
}
func (s *testExternalServer) Close() {}

View File

@ -17,20 +17,19 @@ package stream
import ( import (
"context" "context"
crand "crypto/rand"
"fmt" "fmt"
"math/rand" "os"
"strings"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations" "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network"
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" "github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
@ -40,40 +39,6 @@ const (
maxFileSize = 40 maxFileSize = 40
) )
func initRetrievalTest() {
//global func to get overlay address from discover ID
toAddr = func(id discover.NodeID) *network.BzzAddr {
addr := network.NewAddrFromNodeID(id)
return addr
}
//global func to create local store
createStoreFunc = createTestLocalStorageForId
//local stores
stores = make(map[discover.NodeID]storage.ChunkStore)
//data directories for each node and store
datadirs = make(map[discover.NodeID]string)
//deliveries for each node
deliveries = make(map[discover.NodeID]*Delivery)
//global retrieve func
getRetrieveFunc = func(id discover.NodeID) func(ctx context.Context, chunk *storage.Chunk) error {
return func(ctx context.Context, chunk *storage.Chunk) error {
skipCheck := true
return deliveries[id].RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
}
}
//registries, map of discover.NodeID to its streamer
registries = make(map[discover.NodeID]*TestRegistry)
//not needed for this test but required from common_test for NewStreamService
waitPeerErrC = make(chan error)
//also not needed for this test but required for NewStreamService
peerCount = func(id discover.NodeID) int {
if ids[0] == id || ids[len(ids)-1] == id {
return 1
}
return 2
}
}
//This test is a retrieval test for nodes. //This test is a retrieval test for nodes.
//A configurable number of nodes can be //A configurable number of nodes can be
//provided to the test. //provided to the test.
@ -81,7 +46,10 @@ func initRetrievalTest() {
//Number of nodes can be provided via commandline too. //Number of nodes can be provided via commandline too.
func TestFileRetrieval(t *testing.T) { func TestFileRetrieval(t *testing.T) {
if *nodes != 0 { if *nodes != 0 {
fileRetrievalTest(t, *nodes) err := runFileRetrievalTest(*nodes)
if err != nil {
t.Fatal(err)
}
} else { } else {
nodeCnt := []int{16} nodeCnt := []int{16}
//if the `longrunning` flag has been provided //if the `longrunning` flag has been provided
@ -90,7 +58,10 @@ func TestFileRetrieval(t *testing.T) {
nodeCnt = append(nodeCnt, 32, 64, 128) nodeCnt = append(nodeCnt, 32, 64, 128)
} }
for _, n := range nodeCnt { for _, n := range nodeCnt {
fileRetrievalTest(t, n) err := runFileRetrievalTest(n)
if err != nil {
t.Fatal(err)
}
} }
} }
} }
@ -105,7 +76,10 @@ func TestRetrieval(t *testing.T) {
//if nodes/chunks have been provided via commandline, //if nodes/chunks have been provided via commandline,
//run the tests with these values //run the tests with these values
if *nodes != 0 && *chunks != 0 { if *nodes != 0 && *chunks != 0 {
retrievalTest(t, *chunks, *nodes) err := runRetrievalTest(*chunks, *nodes)
if err != nil {
t.Fatal(err)
}
} else { } else {
var nodeCnt []int var nodeCnt []int
var chnkCnt []int var chnkCnt []int
@ -121,76 +95,17 @@ func TestRetrieval(t *testing.T) {
} }
for _, n := range nodeCnt { for _, n := range nodeCnt {
for _, c := range chnkCnt { for _, c := range chnkCnt {
retrievalTest(t, c, n) err := runRetrievalTest(c, n)
}
}
}
}
//Every test runs 3 times, a live, a history, and a live AND history
func fileRetrievalTest(t *testing.T, nodeCount int) {
//test live and NO history
log.Info("Testing live and no history", "nodeCount", nodeCount)
live = true
history = false
err := runFileRetrievalTest(nodeCount)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
//test history only
log.Info("Testing history only", "nodeCount", nodeCount)
live = false
history = true
err = runFileRetrievalTest(nodeCount)
if err != nil {
t.Fatal(err)
} }
//finally test live and history
log.Info("Testing live and history", "nodeCount", nodeCount)
live = true
err = runFileRetrievalTest(nodeCount)
if err != nil {
t.Fatal(err)
} }
}
//Every test runs 3 times, a live, a history, and a live AND history
func retrievalTest(t *testing.T, chunkCount int, nodeCount int) {
//test live and NO history
log.Info("Testing live and no history", "chunkCount", chunkCount, "nodeCount", nodeCount)
live = true
history = false
err := runRetrievalTest(chunkCount, nodeCount)
if err != nil {
t.Fatal(err)
}
//test history only
log.Info("Testing history only", "chunkCount", chunkCount, "nodeCount", nodeCount)
live = false
history = true
err = runRetrievalTest(chunkCount, nodeCount)
if err != nil {
t.Fatal(err)
}
//finally test live and history
log.Info("Testing live and history", "chunkCount", chunkCount, "nodeCount", nodeCount)
live = true
err = runRetrievalTest(chunkCount, nodeCount)
if err != nil {
t.Fatal(err)
} }
} }
/* /*
The upload is done by dependency to the global
`live` and `history` variables;
If `live` is set, first stream subscriptions are established,
then files are uploaded to nodes.
If `history` is enabled, first upload files, then build up subscriptions.
The test loads a snapshot file to construct the swarm network, The test loads a snapshot file to construct the swarm network,
assuming that the snapshot file identifies a healthy assuming that the snapshot file identifies a healthy
kademlia network. Nevertheless a health check runs in the kademlia network. Nevertheless a health check runs in the
@ -199,261 +114,129 @@ simulation's `action` function.
The snapshot should have 'streamer' in its service list. The snapshot should have 'streamer' in its service list.
*/ */
func runFileRetrievalTest(nodeCount int) error { func runFileRetrievalTest(nodeCount int) error {
//for every run (live, history), int the variables sim := simulation.New(map[string]simulation.ServiceFunc{
initRetrievalTest() "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
//the ids of the snapshot nodes, initiate only now as we need nodeCount
ids = make([]discover.NodeID, nodeCount) id := ctx.Config.ID
//channel to check for disconnection errors addr := network.NewAddrFromNodeID(id)
disconnectC := make(chan error) store, datadir, err := createTestLocalStorageForID(id, addr)
//channel to close disconnection watcher routine if err != nil {
quitC := make(chan struct{}) return nil, nil, err
//the test conf (using same as in `snapshot_sync_test` }
conf = &synctestConfig{} bucket.Store(bucketKeyStore, store)
cleanup = func() {
os.RemoveAll(datadir)
store.Close()
}
localStore := store.(*storage.LocalStore)
db := storage.NewDBAPI(localStore)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, db)
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
DoSync: true,
SyncUpdateDelay: 3 * time.Second,
})
fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
return r, cleanup, nil
},
})
defer sim.Close()
log.Info("Initializing test config")
conf := &synctestConfig{}
//map of discover ID to indexes of chunks expected at that ID
conf.idToChunksMap = make(map[discover.NodeID][]int)
//map of overlay address to discover ID //map of overlay address to discover ID
conf.addrToIdMap = make(map[string]discover.NodeID) conf.addrToIDMap = make(map[string]discover.NodeID)
//array where the generated chunk hashes will be stored //array where the generated chunk hashes will be stored
conf.hashes = make([]storage.Address, 0) conf.hashes = make([]storage.Address, 0)
//load nodes from the snapshot file
net, err := initNetWithSnapshot(nodeCount) err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
if err != nil { if err != nil {
return err return err
} }
var rpcSubscriptionsWg sync.WaitGroup
//do cleanup after test is terminated ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
defer func() { defer cancelSimRun()
//shutdown the snapshot network
net.Shutdown() result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
//after the test, clean up local stores initialized with createLocalStoreForId nodeIDs := sim.UpNodeIDs()
localStoreCleanup() for _, n := range nodeIDs {
//finally clear all data directories //get the kademlia overlay address from this ID
datadirsCleanup() a := network.ToOverlayAddr(n.Bytes())
}()
//get the nodes of the network
nodes := net.GetNodes()
//iterate over all nodes...
for c := 0; c < len(nodes); c++ {
//create an array of discovery nodeIDS
ids[c] = nodes[c].ID()
a := network.ToOverlayAddr(ids[c].Bytes())
//append it to the array of all overlay addresses //append it to the array of all overlay addresses
conf.addrs = append(conf.addrs, a) conf.addrs = append(conf.addrs, a)
conf.addrToIdMap[string(a)] = ids[c] //the proximity calculation is on overlay addr,
//the p2p/simulations check func triggers on discover.NodeID,
//so we need to know which overlay addr maps to which nodeID
conf.addrToIDMap[string(a)] = n
} }
//needed for healthy call
ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs)
//an array for the random files //an array for the random files
var randomFiles []string var randomFiles []string
//channel to signal when the upload has finished //channel to signal when the upload has finished
uploadFinished := make(chan struct{}) //uploadFinished := make(chan struct{})
//channel to trigger new node checks //channel to trigger new node checks
trigger := make(chan discover.NodeID)
//simulation action
action := func(ctx context.Context) error {
//first run the health check on all nodes,
//wait until nodes are all healthy
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
healthy := true
for _, id := range ids {
r := registries[id]
//PeerPot for this node
addr := common.Bytes2Hex(r.addr.OAddr)
pp := ppmap[addr]
//call Healthy RPC
h := r.delivery.overlay.Healthy(pp)
//print info
log.Debug(r.delivery.overlay.String())
log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
if !h.GotNN || !h.Full {
healthy = false
break
}
}
if healthy {
break
}
}
if history { conf.hashes, randomFiles, err = uploadFilesToNodes(sim)
log.Info("Uploading for history")
//If testing only history, we upload the chunk(s) first
conf.hashes, randomFiles, err = uploadFilesToNodes(nodes)
if err != nil { if err != nil {
return err return err
} }
} if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
//variables needed to wait for all subscriptions established before uploading
errc := make(chan error)
//now setup and start event watching in order to know when we can upload
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second)
defer watchCancel()
log.Info("Setting up stream subscription")
//We need two iterations, one to subscribe to the subscription events
//(so we know when setup phase is finished), and one to
//actually run the stream subscriptions. We can't do it in the same iteration,
//because while the first nodes in the loop are setting up subscriptions,
//the latter ones have not subscribed to listen to peer events yet,
//and then we miss events.
//first iteration: setup disconnection watcher and subscribe to peer events
for j, id := range ids {
log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j))
client, err := net.GetNode(id).Client()
if err != nil {
return err return err
} }
wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC)
// doneC is nil, the error happened which is sent to errc channel, already
if wsDoneC == nil {
continue
}
rpcSubscriptionsWg.Add(1)
go func() {
<-wsDoneC
rpcSubscriptionsWg.Done()
}()
//watch for peers disconnecting
wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-wdDoneC
rpcSubscriptionsWg.Done()
}()
}
//second iteration: start syncing and setup stream subscriptions
for j, id := range ids {
log.Trace(fmt.Sprintf("Start syncing and stream subscriptions: %d", j))
client, err := net.GetNode(id).Client()
if err != nil {
return err
}
//start syncing!
var cnt int
err = client.CallContext(ctx, &cnt, "stream_startSyncing")
if err != nil {
return err
}
//increment the number of subscriptions we need to wait for
//by the count returned from startSyncing (SYNC subscriptions)
subscriptionCount += cnt
//now also add the number of RETRIEVAL_REQUEST subscriptions
for snid := range registries[id].peers {
subscriptionCount++
err = client.CallContext(ctx, nil, "stream_subscribeStream", snid, NewStream(swarmChunkServerStreamName, "", false), nil, Top)
if err != nil {
return err
}
}
}
//now wait until the number of expected subscriptions has been finished
//`watchSubscriptionEvents` will write with a `nil` value to errc
//every time a `SubscriptionMsg` has been received
for err := range errc {
if err != nil {
return err
}
//`nil` received, decrement count
subscriptionCount--
//all subscriptions received
if subscriptionCount == 0 {
break
}
}
log.Info("Stream subscriptions successfully requested, action terminated")
if live {
//upload generated files to nodes
var hashes []storage.Address
var rfiles []string
hashes, rfiles, err = uploadFilesToNodes(nodes)
if err != nil {
return err
}
conf.hashes = append(conf.hashes, hashes...)
randomFiles = append(randomFiles, rfiles...)
//signal to the trigger loop that the upload has finished
uploadFinished <- struct{}{}
}
return nil
}
//check defines what will be checked during the test
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
case e := <-disconnectC:
log.Error(e.Error())
return false, fmt.Errorf("Disconnect event detected, network unhealthy")
default:
}
log.Trace(fmt.Sprintf("Checking node: %s", id))
//if there are more than one chunk, test only succeeds if all expected chunks are found
allSuccess := true
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
// or until the timeout is reached.
allSuccess := false
for !allSuccess {
for _, id := range nodeIDs {
//for each expected chunk, check if it is in the local store
localChunks := conf.idToChunksMap[id]
localSuccess := true
for _, ch := range localChunks {
//get the real chunk by the index in the index array
chunk := conf.hashes[ch]
log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
//check if the expected chunk is indeed in the localstore
var err error
//check on the node's FileStore (netstore) //check on the node's FileStore (netstore)
fileStore := registries[id].fileStore item, ok := sim.NodeItem(id, bucketKeyFileStore)
if !ok {
return fmt.Errorf("No registry")
}
fileStore := item.(*storage.FileStore)
//check all chunks //check all chunks
for i, hash := range conf.hashes { for i, hash := range conf.hashes {
reader, _ := fileStore.Retrieve(context.TODO(), hash) reader, _ := fileStore.Retrieve(context.TODO(), hash)
//check that we can read the file size and that it corresponds to the generated file size //check that we can read the file size and that it corresponds to the generated file size
if s, err := reader.Size(context.TODO(), nil); err != nil || s != int64(len(randomFiles[i])) { if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
allSuccess = false allSuccess = false
log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
} else { } else {
log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
} }
} }
if err != nil {
return allSuccess, nil log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
} localSuccess = false
} else {
//for each tick, run the checks on all nodes log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
timingTicker := time.NewTicker(5 * time.Second)
defer timingTicker.Stop()
go func() {
//for live upload, we should wait for uploads to have finished
//before starting to trigger the checks, due to file size
if live {
<-uploadFinished
}
for range timingTicker.C {
for i := 0; i < len(ids); i++ {
log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i]))
trigger <- ids[i]
} }
} }
}() allSuccess = localSuccess
}
log.Info("Starting simulation run...") }
if !allSuccess {
timeout := MaxTimeout * time.Second return fmt.Errorf("Not all chunks succeeded!")
ctx, cancel := context.WithTimeout(context.Background(), timeout) }
defer cancel() return nil
//run the simulation
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
Action: action,
Trigger: trigger,
Expect: &simulations.Expectation{
Nodes: ids,
Check: check,
},
}) })
if result.Error != nil { if result.Error != nil {
@ -466,14 +249,6 @@ func runFileRetrievalTest(nodeCount int) error {
/* /*
The test generates the given number of chunks. The test generates the given number of chunks.
The upload is done by dependency to the global
`live` and `history` variables;
If `live` is set, first stream subscriptions are established, then
upload to a random node.
If `history` is enabled, first upload then build up subscriptions.
The test loads a snapshot file to construct the swarm network, The test loads a snapshot file to construct the swarm network,
assuming that the snapshot file identifies a healthy assuming that the snapshot file identifies a healthy
kademlia network. Nevertheless a health check runs in the kademlia network. Nevertheless a health check runs in the
@ -482,259 +257,129 @@ simulation's `action` function.
The snapshot should have 'streamer' in its service list. The snapshot should have 'streamer' in its service list.
*/ */
func runRetrievalTest(chunkCount int, nodeCount int) error { func runRetrievalTest(chunkCount int, nodeCount int) error {
//for every run (live, history), int the variables sim := simulation.New(map[string]simulation.ServiceFunc{
initRetrievalTest() "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
//the ids of the snapshot nodes, initiate only now as we need nodeCount
ids = make([]discover.NodeID, nodeCount) id := ctx.Config.ID
//channel to check for disconnection errors addr := network.NewAddrFromNodeID(id)
disconnectC := make(chan error) store, datadir, err := createTestLocalStorageForID(id, addr)
//channel to close disconnection watcher routine if err != nil {
quitC := make(chan struct{}) return nil, nil, err
//the test conf (using same as in `snapshot_sync_test` }
conf = &synctestConfig{} bucket.Store(bucketKeyStore, store)
cleanup = func() {
os.RemoveAll(datadir)
store.Close()
}
localStore := store.(*storage.LocalStore)
db := storage.NewDBAPI(localStore)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, db)
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
DoSync: true,
SyncUpdateDelay: 0,
})
fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
bucketKeyFileStore = simulation.BucketKey("filestore")
bucket.Store(bucketKeyFileStore, fileStore)
return r, cleanup, nil
},
})
defer sim.Close()
conf := &synctestConfig{}
//map of discover ID to indexes of chunks expected at that ID
conf.idToChunksMap = make(map[discover.NodeID][]int)
//map of overlay address to discover ID //map of overlay address to discover ID
conf.addrToIdMap = make(map[string]discover.NodeID) conf.addrToIDMap = make(map[string]discover.NodeID)
//array where the generated chunk hashes will be stored //array where the generated chunk hashes will be stored
conf.hashes = make([]storage.Address, 0) conf.hashes = make([]storage.Address, 0)
//load nodes from the snapshot file
net, err := initNetWithSnapshot(nodeCount) err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
if err != nil { if err != nil {
return err return err
} }
var rpcSubscriptionsWg sync.WaitGroup
//do cleanup after test is terminated ctx := context.Background()
defer func() { result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
//shutdown the snapshot network nodeIDs := sim.UpNodeIDs()
net.Shutdown() for _, n := range nodeIDs {
//after the test, clean up local stores initialized with createLocalStoreForId //get the kademlia overlay address from this ID
localStoreCleanup() a := network.ToOverlayAddr(n.Bytes())
//finally clear all data directories
datadirsCleanup()
}()
//get the nodes of the network
nodes := net.GetNodes()
//select one index at random...
idx := rand.Intn(len(nodes))
//...and get the the node at that index
//this is the node selected for upload
uploadNode := nodes[idx]
//iterate over all nodes...
for c := 0; c < len(nodes); c++ {
//create an array of discovery nodeIDS
ids[c] = nodes[c].ID()
a := network.ToOverlayAddr(ids[c].Bytes())
//append it to the array of all overlay addresses //append it to the array of all overlay addresses
conf.addrs = append(conf.addrs, a) conf.addrs = append(conf.addrs, a)
conf.addrToIdMap[string(a)] = ids[c] //the proximity calculation is on overlay addr,
//the p2p/simulations check func triggers on discover.NodeID,
//so we need to know which overlay addr maps to which nodeID
conf.addrToIDMap[string(a)] = n
} }
//needed for healthy call //an array for the random files
ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs) var randomFiles []string
//this is the node selected for upload
trigger := make(chan discover.NodeID) node := sim.RandomUpNode()
//simulation action item, ok := sim.NodeItem(node.ID, bucketKeyStore)
action := func(ctx context.Context) error { if !ok {
//first run the health check on all nodes, return fmt.Errorf("No localstore")
//wait until nodes are all healthy
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
healthy := true
for _, id := range ids {
r := registries[id]
//PeerPot for this node
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes()))
pp := ppmap[addr]
//call Healthy RPC
h := r.delivery.overlay.Healthy(pp)
//print info
log.Debug(r.delivery.overlay.String())
log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
if !h.GotNN || !h.Full {
healthy = false
break
} }
} lstore := item.(*storage.LocalStore)
if healthy { conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
break
}
}
if history {
log.Info("Uploading for history")
//If testing only history, we upload the chunk(s) first
conf.hashes, err = uploadFileToSingleNodeStore(uploadNode.ID(), chunkCount)
if err != nil { if err != nil {
return err return err
} }
} if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
//variables needed to wait for all subscriptions established before uploading
errc := make(chan error)
//now setup and start event watching in order to know when we can upload
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second)
defer watchCancel()
log.Info("Setting up stream subscription")
//We need two iterations, one to subscribe to the subscription events
//(so we know when setup phase is finished), and one to
//actually run the stream subscriptions. We can't do it in the same iteration,
//because while the first nodes in the loop are setting up subscriptions,
//the latter ones have not subscribed to listen to peer events yet,
//and then we miss events.
//first iteration: setup disconnection watcher and subscribe to peer events
for j, id := range ids {
log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j))
client, err := net.GetNode(id).Client()
if err != nil {
return err return err
} }
//check for `SubscribeMsg` events to know when setup phase is complete // File retrieval check is repeated until all uploaded files are retrieved from all nodes
wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC) // or until the timeout is reached.
// doneC is nil, the error happened which is sent to errc channel, already allSuccess := false
if wsDoneC == nil { for !allSuccess {
continue for _, id := range nodeIDs {
} //for each expected chunk, check if it is in the local store
rpcSubscriptionsWg.Add(1) localChunks := conf.idToChunksMap[id]
go func() { localSuccess := true
<-wsDoneC for _, ch := range localChunks {
rpcSubscriptionsWg.Done() //get the real chunk by the index in the index array
}() chunk := conf.hashes[ch]
log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
//watch for peers disconnecting //check if the expected chunk is indeed in the localstore
wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC) var err error
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-wdDoneC
rpcSubscriptionsWg.Done()
}()
}
//second iteration: start syncing and setup stream subscriptions
for j, id := range ids {
log.Trace(fmt.Sprintf("Start syncing and stream subscriptions: %d", j))
client, err := net.GetNode(id).Client()
if err != nil {
return err
}
//start syncing!
var cnt int
err = client.CallContext(ctx, &cnt, "stream_startSyncing")
if err != nil {
return err
}
//increment the number of subscriptions we need to wait for
//by the count returned from startSyncing (SYNC subscriptions)
subscriptionCount += cnt
//now also add the number of RETRIEVAL_REQUEST subscriptions
for snid := range registries[id].peers {
subscriptionCount++
err = client.CallContext(ctx, nil, "stream_subscribeStream", snid, NewStream(swarmChunkServerStreamName, "", false), nil, Top)
if err != nil {
return err
}
}
}
//now wait until the number of expected subscriptions has been finished
//`watchSubscriptionEvents` will write with a `nil` value to errc
//every time a `SubscriptionMsg` has been received
for err := range errc {
if err != nil {
return err
}
//`nil` received, decrement count
subscriptionCount--
//all subscriptions received
if subscriptionCount == 0 {
break
}
}
log.Info("Stream subscriptions successfully requested, action terminated")
if live {
//now upload the chunks to the selected random single node
chnks, err := uploadFileToSingleNodeStore(uploadNode.ID(), chunkCount)
if err != nil {
return err
}
conf.hashes = append(conf.hashes, chnks...)
}
return nil
}
chunkSize := storage.DefaultChunkSize
//check defines what will be checked during the test
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
//don't check the uploader node
if id == uploadNode.ID() {
return true, nil
}
select {
case <-ctx.Done():
return false, ctx.Err()
case e := <-disconnectC:
log.Error(e.Error())
return false, fmt.Errorf("Disconnect event detected, network unhealthy")
default:
}
log.Trace(fmt.Sprintf("Checking node: %s", id))
//if there are more than one chunk, test only succeeds if all expected chunks are found
allSuccess := true
//check on the node's FileStore (netstore) //check on the node's FileStore (netstore)
fileStore := registries[id].fileStore item, ok := sim.NodeItem(id, bucketKeyFileStore)
if !ok {
return fmt.Errorf("No registry")
}
fileStore := item.(*storage.FileStore)
//check all chunks //check all chunks
for _, chnk := range conf.hashes { for i, hash := range conf.hashes {
reader, _ := fileStore.Retrieve(context.TODO(), chnk) reader, _ := fileStore.Retrieve(context.TODO(), hash)
//assuming that reading the Size of the chunk is enough to know we found it //check that we can read the file size and that it corresponds to the generated file size
if s, err := reader.Size(context.TODO(), nil); err != nil || s != chunkSize { if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
allSuccess = false allSuccess = false
log.Warn("Retrieve error", "err", err, "chunk", chnk, "nodeId", id) log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
} else { } else {
log.Debug(fmt.Sprintf("Chunk %x found", chnk)) log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
} }
} }
return allSuccess, nil if err != nil {
} log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
localSuccess = false
//for each tick, run the checks on all nodes } else {
timingTicker := time.NewTicker(5 * time.Second) log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
defer timingTicker.Stop()
go func() {
for range timingTicker.C {
for i := 0; i < len(ids); i++ {
log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i]))
trigger <- ids[i]
} }
} }
}() allSuccess = localSuccess
}
log.Info("Starting simulation run...") }
if !allSuccess {
timeout := MaxTimeout * time.Second return fmt.Errorf("Not all chunks succeeded!")
ctx, cancel := context.WithTimeout(context.Background(), timeout) }
defer cancel() return nil
//run the simulation
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
Action: action,
Trigger: trigger,
Expect: &simulations.Expectation{
Nodes: ids,
Check: check,
},
}) })
if result.Error != nil { if result.Error != nil {
@ -743,53 +388,3 @@ func runRetrievalTest(chunkCount int, nodeCount int) error {
return nil return nil
} }
//upload generated files to nodes
//every node gets one file uploaded
func uploadFilesToNodes(nodes []*simulations.Node) ([]storage.Address, []string, error) {
nodeCnt := len(nodes)
log.Debug(fmt.Sprintf("Uploading %d files to nodes", nodeCnt))
//array holding generated files
rfiles := make([]string, nodeCnt)
//array holding the root hashes of the files
rootAddrs := make([]storage.Address, nodeCnt)
var err error
//for every node, generate a file and upload
for i, n := range nodes {
id := n.ID()
fileStore := registries[id].fileStore
//generate a file
rfiles[i], err = generateRandomFile()
if err != nil {
return nil, nil, err
}
//store it (upload it) on the FileStore
ctx := context.TODO()
rk, wait, err := fileStore.Store(ctx, strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false)
log.Debug("Uploaded random string file to node")
if err != nil {
return nil, nil, err
}
err = wait(ctx)
if err != nil {
return nil, nil, err
}
rootAddrs[i] = rk
}
return rootAddrs, rfiles, nil
}
//generate a random file (string)
func generateRandomFile() (string, error) {
//generate a random file size between minFileSize and maxFileSize
fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize
log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize))
b := make([]byte, fileSize*1024)
_, err := crand.Read(b)
if err != nil {
log.Error("Error generating random file.", "err", err)
return "", err
}
return string(b), nil
}

View File

@ -18,12 +18,8 @@ package stream
import ( import (
"context" "context"
crand "crypto/rand" crand "crypto/rand"
"encoding/json"
"flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand"
"os" "os"
"sync" "sync"
"testing" "testing"
@ -31,82 +27,27 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network"
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" "github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/pot" "github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
) )
const testMinProxBinSize = 2 const testMinProxBinSize = 2
const MaxTimeout = 600 const MaxTimeout = 600
var (
pof = pot.DefaultPof(256)
conf *synctestConfig
ids []discover.NodeID
datadirs map[discover.NodeID]string
ppmap map[string]*network.PeerPot
live bool
history bool
longrunning = flag.Bool("longrunning", false, "do run long-running tests")
)
type synctestConfig struct { type synctestConfig struct {
addrs [][]byte addrs [][]byte
hashes []storage.Address hashes []storage.Address
idToChunksMap map[discover.NodeID][]int idToChunksMap map[discover.NodeID][]int
chunksToNodesMap map[string][]int chunksToNodesMap map[string][]int
addrToIdMap map[string]discover.NodeID addrToIDMap map[string]discover.NodeID
}
func init() {
rand.Seed(time.Now().Unix())
}
//common_test needs to initialize the test in a init() func
//in order for adapters to register the NewStreamerService;
//this service is dependent on some global variables
//we thus need to initialize first as init() as well.
func initSyncTest() {
//assign the toAddr func so NewStreamerService can build the addr
toAddr = func(id discover.NodeID) *network.BzzAddr {
addr := network.NewAddrFromNodeID(id)
return addr
}
//global func to create local store
if *useMockStore {
createStoreFunc = createMockStore
} else {
createStoreFunc = createTestLocalStorageForId
}
//local stores
stores = make(map[discover.NodeID]storage.ChunkStore)
//data directories for each node and store
datadirs = make(map[discover.NodeID]string)
//deliveries for each node
deliveries = make(map[discover.NodeID]*Delivery)
//registries, map of discover.NodeID to its streamer
registries = make(map[discover.NodeID]*TestRegistry)
//not needed for this test but required from common_test for NewStreamService
waitPeerErrC = make(chan error)
//also not needed for this test but required for NewStreamService
peerCount = func(id discover.NodeID) int {
if ids[0] == id || ids[len(ids)-1] == id {
return 1
}
return 2
}
if *useMockStore {
createGlobalStore()
}
} }
//This test is a syncing test for nodes. //This test is a syncing test for nodes.
@ -116,12 +57,12 @@ func initSyncTest() {
//to the pivot node, and we check that nodes get the chunks //to the pivot node, and we check that nodes get the chunks
//they are expected to store based on the syncing protocol. //they are expected to store based on the syncing protocol.
//Number of chunks and nodes can be provided via commandline too. //Number of chunks and nodes can be provided via commandline too.
func TestSyncing(t *testing.T) { func TestSyncingViaGlobalSync(t *testing.T) {
//if nodes/chunks have been provided via commandline, //if nodes/chunks have been provided via commandline,
//run the tests with these values //run the tests with these values
if *nodes != 0 && *chunks != 0 { if *nodes != 0 && *chunks != 0 {
log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes)) log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
testSyncing(t, *chunks, *nodes) testSyncingViaGlobalSync(t, *chunks, *nodes)
} else { } else {
var nodeCnt []int var nodeCnt []int
var chnkCnt []int var chnkCnt []int
@ -138,51 +79,194 @@ func TestSyncing(t *testing.T) {
for _, chnk := range chnkCnt { for _, chnk := range chnkCnt {
for _, n := range nodeCnt { for _, n := range nodeCnt {
log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n)) log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
testSyncing(t, chnk, n) testSyncingViaGlobalSync(t, chnk, n)
} }
} }
} }
} }
//Do run the tests func TestSyncingViaDirectSubscribe(t *testing.T) {
//Every test runs 3 times, a live, a history, and a live AND history //if nodes/chunks have been provided via commandline,
func testSyncing(t *testing.T, chunkCount int, nodeCount int) { //run the tests with these values
//test live and NO history if *nodes != 0 && *chunks != 0 {
log.Info("Testing live and no history") log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
live = true err := testSyncingViaDirectSubscribe(*chunks, *nodes)
history = false
err := runSyncTest(chunkCount, nodeCount, live, history)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
//test history only } else {
log.Info("Testing history only") var nodeCnt []int
live = false var chnkCnt []int
history = true //if the `longrunning` flag has been provided
err = runSyncTest(chunkCount, nodeCount, live, history) //run more test combinations
if *longrunning {
chnkCnt = []int{1, 8, 32, 256, 1024}
nodeCnt = []int{32, 16}
} else {
//default test
chnkCnt = []int{4, 32}
nodeCnt = []int{32, 16}
}
for _, chnk := range chnkCnt {
for _, n := range nodeCnt {
log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
err := testSyncingViaDirectSubscribe(chnk, n)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
//finally test live and history }
log.Info("Testing live and history") }
live = true }
err = runSyncTest(chunkCount, nodeCount, live, history) }
func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
id := ctx.Config.ID
addr := network.NewAddrFromNodeID(id)
store, datadir, err := createTestLocalStorageForID(id, addr)
if err != nil {
return nil, nil, err
}
bucket.Store(bucketKeyStore, store)
cleanup = func() {
os.RemoveAll(datadir)
store.Close()
}
localStore := store.(*storage.LocalStore)
db := storage.NewDBAPI(localStore)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, db)
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
DoSync: true,
SyncUpdateDelay: 3 * time.Second,
})
bucket.Store(bucketKeyRegistry, r)
return r, cleanup, nil
},
})
defer sim.Close()
log.Info("Initializing test config")
conf := &synctestConfig{}
//map of discover ID to indexes of chunks expected at that ID
conf.idToChunksMap = make(map[discover.NodeID][]int)
//map of overlay address to discover ID
conf.addrToIDMap = make(map[string]discover.NodeID)
//array where the generated chunk hashes will be stored
conf.hashes = make([]storage.Address, 0)
err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancelSimRun()
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
nodeIDs := sim.UpNodeIDs()
for _, n := range nodeIDs {
//get the kademlia overlay address from this ID
a := network.ToOverlayAddr(n.Bytes())
//append it to the array of all overlay addresses
conf.addrs = append(conf.addrs, a)
//the proximity calculation is on overlay addr,
//the p2p/simulations check func triggers on discover.NodeID,
//so we need to know which overlay addr maps to which nodeID
conf.addrToIDMap[string(a)] = n
}
//get the the node at that index
//this is the node selected for upload
node := sim.RandomUpNode()
item, ok := sim.NodeItem(node.ID, bucketKeyStore)
if !ok {
return fmt.Errorf("No localstore")
}
lstore := item.(*storage.LocalStore)
hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
if err != nil {
return err
}
conf.hashes = append(conf.hashes, hashes...)
mapKeysToNodes(conf)
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
return err
}
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
// or until the timeout is reached.
allSuccess := false
var gDir string
var globalStore *mockdb.GlobalStore
if *useMockStore {
gDir, globalStore, err = createGlobalStore()
if err != nil {
return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
}
defer func() {
os.RemoveAll(gDir)
err := globalStore.Close()
if err != nil {
log.Error("Error closing global store! %v", "err", err)
}
}()
}
for !allSuccess {
for _, id := range nodeIDs {
//for each expected chunk, check if it is in the local store
localChunks := conf.idToChunksMap[id]
localSuccess := true
for _, ch := range localChunks {
//get the real chunk by the index in the index array
chunk := conf.hashes[ch]
log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
//check if the expected chunk is indeed in the localstore
var err error
if *useMockStore {
//use the globalStore if the mockStore should be used; in that case,
//the complete localStore stack is bypassed for getting the chunk
_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
} else {
//use the actual localstore
item, ok := sim.NodeItem(id, bucketKeyStore)
if !ok {
return fmt.Errorf("Error accessing localstore")
}
lstore := item.(*storage.LocalStore)
_, err = lstore.Get(ctx, chunk)
}
if err != nil {
log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
localSuccess = false
} else {
log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
}
}
allSuccess = localSuccess
}
}
if !allSuccess {
return fmt.Errorf("Not all chunks succeeded!")
}
return nil
})
if result.Error != nil {
t.Fatal(result.Error)
}
} }
/* /*
The test generates the given number of chunks The test generates the given number of chunks
The upload is done by dependency to the global
`live` and `history` variables;
If `live` is set, first stream subscriptions are established, then
upload to a random node.
If `history` is enabled, first upload then build up subscriptions.
For every chunk generated, the nearest node addresses For every chunk generated, the nearest node addresses
are identified, we verify that the nodes closer to the are identified, we verify that the nodes closer to the
chunk addresses actually do have the chunks in their local stores. chunk addresses actually do have the chunks in their local stores.
@ -190,178 +274,84 @@ chunk addresses actually do have the chunks in their local stores.
The test loads a snapshot file to construct the swarm network, The test loads a snapshot file to construct the swarm network,
assuming that the snapshot file identifies a healthy assuming that the snapshot file identifies a healthy
kademlia network. The snapshot should have 'streamer' in its service list. kademlia network. The snapshot should have 'streamer' in its service list.
For every test run, a series of three tests will be executed:
- a LIVE test first, where first subscriptions are established,
then a file (random chunks) is uploaded
- a HISTORY test, where the file is uploaded first, and then
the subscriptions are established
- a crude LIVE AND HISTORY test last, where (different) chunks
are uploaded twice, once before and once after subscriptions
*/ */
func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error { func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
initSyncTest() sim := simulation.New(map[string]simulation.ServiceFunc{
//the ids of the snapshot nodes, initiate only now as we need nodeCount "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
ids = make([]discover.NodeID, nodeCount)
//initialize the test struct id := ctx.Config.ID
conf = &synctestConfig{} addr := network.NewAddrFromNodeID(id)
store, datadir, err := createTestLocalStorageForID(id, addr)
if err != nil {
return nil, nil, err
}
bucket.Store(bucketKeyStore, store)
cleanup = func() {
os.RemoveAll(datadir)
store.Close()
}
localStore := store.(*storage.LocalStore)
db := storage.NewDBAPI(localStore)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, db)
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil)
bucket.Store(bucketKeyRegistry, r)
fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
return r, cleanup, nil
},
})
defer sim.Close()
ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancelSimRun()
conf := &synctestConfig{}
//map of discover ID to indexes of chunks expected at that ID //map of discover ID to indexes of chunks expected at that ID
conf.idToChunksMap = make(map[discover.NodeID][]int) conf.idToChunksMap = make(map[discover.NodeID][]int)
//map of overlay address to discover ID //map of overlay address to discover ID
conf.addrToIdMap = make(map[string]discover.NodeID) conf.addrToIDMap = make(map[string]discover.NodeID)
//array where the generated chunk hashes will be stored //array where the generated chunk hashes will be stored
conf.hashes = make([]storage.Address, 0) conf.hashes = make([]storage.Address, 0)
//channel to trigger node checks in the simulation
trigger := make(chan discover.NodeID)
//channel to check for disconnection errors
disconnectC := make(chan error)
//channel to close disconnection watcher routine
quitC := make(chan struct{})
//load nodes from the snapshot file err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
net, err := initNetWithSnapshot(nodeCount)
if err != nil { if err != nil {
return err return err
} }
var rpcSubscriptionsWg sync.WaitGroup
//do cleanup after test is terminated
defer func() {
// close quitC channel to signall all goroutines to clanup
// before calling simulation network shutdown.
close(quitC)
//wait for all rpc subscriptions to unsubscribe
rpcSubscriptionsWg.Wait()
//shutdown the snapshot network
net.Shutdown()
//after the test, clean up local stores initialized with createLocalStoreForId
localStoreCleanup()
//finally clear all data directories
datadirsCleanup()
}()
//get the nodes of the network
nodes := net.GetNodes()
//select one index at random...
idx := rand.Intn(len(nodes))
//...and get the the node at that index
//this is the node selected for upload
node := nodes[idx]
log.Info("Initializing test config") result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
//iterate over all nodes... nodeIDs := sim.UpNodeIDs()
for c := 0; c < len(nodes); c++ { for _, n := range nodeIDs {
//create an array of discovery node IDs
ids[c] = nodes[c].ID()
//get the kademlia overlay address from this ID //get the kademlia overlay address from this ID
a := network.ToOverlayAddr(ids[c].Bytes()) a := network.ToOverlayAddr(n.Bytes())
//append it to the array of all overlay addresses //append it to the array of all overlay addresses
conf.addrs = append(conf.addrs, a) conf.addrs = append(conf.addrs, a)
//the proximity calculation is on overlay addr, //the proximity calculation is on overlay addr,
//the p2p/simulations check func triggers on discover.NodeID, //the p2p/simulations check func triggers on discover.NodeID,
//so we need to know which overlay addr maps to which nodeID //so we need to know which overlay addr maps to which nodeID
conf.addrToIdMap[string(a)] = ids[c] conf.addrToIDMap[string(a)] = n
}
log.Info("Test config successfully initialized")
//only needed for healthy call when debugging
ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs)
//define the action to be performed before the test checks: start syncing
action := func(ctx context.Context) error {
//first run the health check on all nodes,
//wait until nodes are all healthy
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
healthy := true
for _, id := range ids {
r := registries[id]
//PeerPot for this node
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes()))
pp := ppmap[addr]
//call Healthy RPC
h := r.delivery.overlay.Healthy(pp)
//print info
log.Debug(r.delivery.overlay.String())
log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
if !h.GotNN || !h.Full {
healthy = false
break
}
}
if healthy {
break
}
} }
if history { var subscriptionCount int
log.Info("Uploading for history")
//If testing only history, we upload the chunk(s) first
chunks, err := uploadFileToSingleNodeStore(node.ID(), chunkCount)
if err != nil {
return err
}
conf.hashes = append(conf.hashes, chunks...)
//finally map chunks to the closest addresses
mapKeysToNodes(conf)
}
//variables needed to wait for all subscriptions established before uploading filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
errc := make(chan error) eventC := sim.PeerEvents(ctx, nodeIDs, filter)
//now setup and start event watching in order to know when we can upload for j, node := range nodeIDs {
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second)
defer watchCancel()
log.Info("Setting up stream subscription")
//We need two iterations, one to subscribe to the subscription events
//(so we know when setup phase is finished), and one to
//actually run the stream subscriptions. We can't do it in the same iteration,
//because while the first nodes in the loop are setting up subscriptions,
//the latter ones have not subscribed to listen to peer events yet,
//and then we miss events.
//first iteration: setup disconnection watcher and subscribe to peer events
for j, id := range ids {
log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j))
client, err := net.GetNode(id).Client()
if err != nil {
return err
}
wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC)
// doneC is nil, the error happened which is sent to errc channel, already
if wsDoneC == nil {
continue
}
rpcSubscriptionsWg.Add(1)
go func() {
<-wsDoneC
rpcSubscriptionsWg.Done()
}()
//watch for peers disconnecting
wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-wdDoneC
rpcSubscriptionsWg.Done()
}()
}
//second iteration: start syncing
for j, id := range ids {
log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j)) log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
client, err := net.GetNode(id).Client()
if err != nil {
return err
}
//start syncing! //start syncing!
item, ok := sim.NodeItem(node, bucketKeyRegistry)
if !ok {
return fmt.Errorf("No registry")
}
registry := item.(*Registry)
var cnt int var cnt int
err = client.CallContext(ctx, &cnt, "stream_startSyncing") cnt, err = startSyncing(registry, conf)
if err != nil { if err != nil {
return err return err
} }
@ -370,57 +360,50 @@ func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error {
subscriptionCount += cnt subscriptionCount += cnt
} }
//now wait until the number of expected subscriptions has been finished for e := range eventC {
//`watchSubscriptionEvents` will write with a `nil` value to errc if e.Error != nil {
for err := range errc { return e.Error
if err != nil {
return err
} }
//`nil` received, decrement count
subscriptionCount-- subscriptionCount--
//all subscriptions received
if subscriptionCount == 0 { if subscriptionCount == 0 {
break break
} }
} }
//select a random node for upload
log.Info("Stream subscriptions successfully requested") node := sim.RandomUpNode()
if live { item, ok := sim.NodeItem(node.ID, bucketKeyStore)
//now upload the chunks to the selected random single node if !ok {
hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount) return fmt.Errorf("No localstore")
}
lstore := item.(*storage.LocalStore)
hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
if err != nil { if err != nil {
return err return err
} }
conf.hashes = append(conf.hashes, hashes...) conf.hashes = append(conf.hashes, hashes...)
//finally map chunks to the closest addresses
log.Debug(fmt.Sprintf("Uploaded chunks for live syncing: %v", conf.hashes))
mapKeysToNodes(conf) mapKeysToNodes(conf)
log.Info(fmt.Sprintf("Uploaded %d chunks to random single node", chunkCount))
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
return err
} }
log.Info("Action terminated") var gDir string
var globalStore *mockdb.GlobalStore
return nil if *useMockStore {
gDir, globalStore, err = createGlobalStore()
if err != nil {
return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
} }
defer os.RemoveAll(gDir)
//check defines what will be checked during the test
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
case e := <-disconnectC:
log.Error(e.Error())
return false, fmt.Errorf("Disconnect event detected, network unhealthy")
default:
} }
log.Trace(fmt.Sprintf("Checking node: %s", id)) // File retrieval check is repeated until all uploaded files are retrieved from all nodes
//select the local store for the given node // or until the timeout is reached.
//if there are more than one chunk, test only succeeds if all expected chunks are found allSuccess := false
allSuccess := true for !allSuccess {
for _, id := range nodeIDs {
//all the chunk indexes which are supposed to be found for this node
localChunks := conf.idToChunksMap[id]
//for each expected chunk, check if it is in the local store //for each expected chunk, check if it is in the local store
localChunks := conf.idToChunksMap[id]
localSuccess := true
for _, ch := range localChunks { for _, ch := range localChunks {
//get the real chunk by the index in the index array //get the real chunk by the index in the index array
chunk := conf.hashes[ch] chunk := conf.hashes[ch]
@ -428,59 +411,38 @@ func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error {
//check if the expected chunk is indeed in the localstore //check if the expected chunk is indeed in the localstore
var err error var err error
if *useMockStore { if *useMockStore {
if globalStore == nil {
return false, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
}
//use the globalStore if the mockStore should be used; in that case, //use the globalStore if the mockStore should be used; in that case,
//the complete localStore stack is bypassed for getting the chunk //the complete localStore stack is bypassed for getting the chunk
_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk) _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
} else { } else {
//use the actual localstore //use the actual localstore
lstore := stores[id] item, ok := sim.NodeItem(id, bucketKeyStore)
_, err = lstore.Get(context.TODO(), chunk) if !ok {
return fmt.Errorf("Error accessing localstore")
}
lstore := item.(*storage.LocalStore)
_, err = lstore.Get(ctx, chunk)
} }
if err != nil { if err != nil {
log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
allSuccess = false localSuccess = false
} else { } else {
log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
} }
} }
allSuccess = localSuccess
return allSuccess, nil
}
//for each tick, run the checks on all nodes
timingTicker := time.NewTicker(time.Second * 1)
defer timingTicker.Stop()
go func() {
for range timingTicker.C {
for i := 0; i < len(ids); i++ {
log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i]))
trigger <- ids[i]
} }
} }
}() if !allSuccess {
return fmt.Errorf("Not all chunks succeeded!")
log.Info("Starting simulation run...") }
return nil
timeout := MaxTimeout * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
//run the simulation
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
Action: action,
Trigger: trigger,
Expect: &simulations.Expectation{
Nodes: ids,
Check: check,
},
}) })
if result.Error != nil { if result.Error != nil {
return result.Error return result.Error
} }
log.Info("Simulation terminated") log.Info("Simulation terminated")
return nil return nil
} }
@ -489,20 +451,9 @@ func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error {
//issues `RequestSubscriptionMsg` to peers, based on po, by iterating over //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
//the kademlia's `EachBin` function. //the kademlia's `EachBin` function.
//returns the number of subscriptions requested //returns the number of subscriptions requested
func (r *TestRegistry) StartSyncing(ctx context.Context) (int, error) { func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
var err error var err error
if log.Lvl(*loglevel) == log.LvlDebug {
//PeerPot for this node
addr := common.Bytes2Hex(r.addr.OAddr)
pp := ppmap[addr]
//call Healthy RPC
h := r.delivery.overlay.Healthy(pp)
//print info
log.Debug(r.delivery.overlay.String())
log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
}
kad, ok := r.delivery.overlay.(*network.Kademlia) kad, ok := r.delivery.overlay.(*network.Kademlia)
if !ok { if !ok {
return 0, fmt.Errorf("Not a Kademlia!") return 0, fmt.Errorf("Not a Kademlia!")
@ -512,14 +463,10 @@ func (r *TestRegistry) StartSyncing(ctx context.Context) (int, error) {
//iterate over each bin and solicit needed subscription to bins //iterate over each bin and solicit needed subscription to bins
kad.EachBin(r.addr.Over(), pof, 0, func(conn network.OverlayConn, po int) bool { kad.EachBin(r.addr.Over(), pof, 0, func(conn network.OverlayConn, po int) bool {
//identify begin and start index of the bin(s) we want to subscribe to //identify begin and start index of the bin(s) we want to subscribe to
log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), conf.addrToIdMap[string(conn.Address())], po)) histRange := &Range{}
var histRange *Range
if history {
histRange = &Range{}
}
subCnt++ subCnt++
err = r.RequestSubscription(conf.addrToIdMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), live), histRange, Top) err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), histRange, Top)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err)) log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
return false return false
@ -552,7 +499,7 @@ func mapKeysToNodes(conf *synctestConfig) {
return false return false
} }
if pl == 256 || pl == po { if pl == 256 || pl == po {
log.Trace(fmt.Sprintf("appending %s", conf.addrToIdMap[string(a)])) log.Trace(fmt.Sprintf("appending %s", conf.addrToIDMap[string(a)]))
nns = append(nns, indexmap[string(a)]) nns = append(nns, indexmap[string(a)])
nodemap[string(a)] = append(nodemap[string(a)], i) nodemap[string(a)] = append(nodemap[string(a)], i)
} }
@ -567,26 +514,24 @@ func mapKeysToNodes(conf *synctestConfig) {
} }
for addr, chunks := range nodemap { for addr, chunks := range nodemap {
//this selects which chunks are expected to be found with the given node //this selects which chunks are expected to be found with the given node
conf.idToChunksMap[conf.addrToIdMap[addr]] = chunks conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
} }
log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap)) log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
conf.chunksToNodesMap = kmap conf.chunksToNodesMap = kmap
} }
//upload a file(chunks) to a single local node store //upload a file(chunks) to a single local node store
func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int) ([]storage.Address, error) { func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
log.Debug(fmt.Sprintf("Uploading to node id: %s", id)) log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
lstore := stores[id]
size := chunkSize
fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams()) fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
size := chunkSize
var rootAddrs []storage.Address var rootAddrs []storage.Address
for i := 0; i < chunkCount; i++ { for i := 0; i < chunkCount; i++ {
ctx := context.TODO() rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
rk, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = wait(ctx) err = wait(context.TODO())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -595,129 +540,3 @@ func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int) ([]storage.
return rootAddrs, nil return rootAddrs, nil
} }
//initialize a network from a snapshot
func initNetWithSnapshot(nodeCount int) (*simulations.Network, error) {
var a adapters.NodeAdapter
//add the streamer service to the node adapter
if *adapter == "exec" {
dirname, err := ioutil.TempDir(".", "")
if err != nil {
return nil, err
}
a = adapters.NewExecAdapter(dirname)
} else if *adapter == "tcp" {
a = adapters.NewTCPAdapter(services)
} else if *adapter == "sim" {
a = adapters.NewSimAdapter(services)
}
log.Info("Setting up Snapshot network")
net := simulations.NewNetwork(a, &simulations.NetworkConfig{
ID: "0",
DefaultService: "streamer",
})
f, err := os.Open(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
if err != nil {
return nil, err
}
defer f.Close()
jsonbyte, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
var snap simulations.Snapshot
err = json.Unmarshal(jsonbyte, &snap)
if err != nil {
return nil, err
}
//the snapshot probably has the property EnableMsgEvents not set
//just in case, set it to true!
//(we need this to wait for messages before uploading)
for _, n := range snap.Nodes {
n.Node.Config.EnableMsgEvents = true
}
log.Info("Waiting for p2p connections to be established...")
//now we can load the snapshot
err = net.Load(&snap)
if err != nil {
return nil, err
}
log.Info("Snapshot loaded")
return net, nil
}
//we want to wait for subscriptions to be established before uploading to test
//that live syncing is working correctly
func watchSubscriptionEvents(ctx context.Context, id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) (doneC <-chan struct{}) {
events := make(chan *p2p.PeerEvent)
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
if err != nil {
log.Error(err.Error())
errc <- fmt.Errorf("error getting peer events for node %v: %s", id, err)
return
}
c := make(chan struct{})
go func() {
defer func() {
log.Trace("watch subscription events: unsubscribe", "id", id)
sub.Unsubscribe()
close(c)
}()
for {
select {
case <-quitC:
return
case <-ctx.Done():
select {
case errc <- ctx.Err():
case <-quitC:
}
return
case e := <-events:
//just catch SubscribeMsg
if e.Type == p2p.PeerEventTypeMsgRecv && e.Protocol == "stream" && e.MsgCode != nil && *e.MsgCode == 4 {
errc <- nil
}
case err := <-sub.Err():
if err != nil {
select {
case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err):
case <-quitC:
}
return
}
}
}
}()
return c
}
//create a local store for the given node
func createTestLocalStorageForId(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
var datadir string
var err error
datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString()))
if err != nil {
return nil, err
}
datadirs[id] = datadir
var store storage.ChunkStore
params := storage.NewDefaultLocalStoreParams()
params.ChunkDbPath = datadir
params.BaseKey = addr.Over()
store, err = storage.NewTestLocalStoreForAddr(params)
if err != nil {
return nil, err
}
return store, nil
}

View File

@ -23,18 +23,22 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
"os"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations" "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network"
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" "github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
) )
const dataChunkCount = 200 const dataChunkCount = 200
@ -46,95 +50,139 @@ func TestSyncerSimulation(t *testing.T) {
testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1) testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1)
} }
func createMockStore(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) { func createMockStore(globalStore *mockdb.GlobalStore, id discover.NodeID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
var err error
address := common.BytesToAddress(id.Bytes()) address := common.BytesToAddress(id.Bytes())
mockStore := globalStore.NewNodeStore(address) mockStore := globalStore.NewNodeStore(address)
params := storage.NewDefaultLocalStoreParams() params := storage.NewDefaultLocalStoreParams()
datadirs[id], err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
if err != nil { if err != nil {
return nil, err return nil, "", err
} }
params.Init(datadirs[id]) params.Init(datadir)
params.BaseKey = addr.Over() params.BaseKey = addr.Over()
lstore, err := storage.NewLocalStore(params, mockStore) lstore, err = storage.NewLocalStore(params, mockStore)
return lstore, nil return lstore, datadir, nil
} }
func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) { func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) {
defer setDefaultSkipCheck(defaultSkipCheck) sim := simulation.New(map[string]simulation.ServiceFunc{
defaultSkipCheck = skipCheck "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
//data directories for each node and store var store storage.ChunkStore
datadirs = make(map[discover.NodeID]string) var globalStore *mockdb.GlobalStore
if *useMockStore { var gDir, datadir string
createStoreFunc = createMockStore
createGlobalStore()
} else {
createStoreFunc = createTestLocalStorageFromSim
}
defer datadirsCleanup()
registries = make(map[discover.NodeID]*TestRegistry) id := ctx.Config.ID
toAddr = func(id discover.NodeID) *network.BzzAddr {
addr := network.NewAddrFromNodeID(id) addr := network.NewAddrFromNodeID(id)
//hack to put addresses in same space //hack to put addresses in same space
addr.OAddr[0] = byte(0) addr.OAddr[0] = byte(0)
return addr
if *useMockStore {
gDir, globalStore, err = createGlobalStore()
if err != nil {
return nil, nil, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
} }
conf := &streamTesting.RunConfig{ store, datadir, err = createMockStore(globalStore, id, addr)
Adapter: *adapter, } else {
NodeCount: nodes, store, datadir, err = createTestLocalStorageForID(id, addr)
ConnLevel: conns,
ToAddr: toAddr,
Services: services,
EnableMsgEvents: false,
} }
// HACK: these are global variables in the test so that they are available for if err != nil {
// the service constructor function return nil, nil, err
// TODO: will this work with exec/docker adapter? }
// localstore of nodes made available for action and check calls bucket.Store(bucketKeyStore, store)
stores = make(map[discover.NodeID]storage.ChunkStore) cleanup = func() {
deliveries = make(map[discover.NodeID]*Delivery) store.Close()
os.RemoveAll(datadir)
if *useMockStore {
err := globalStore.Close()
if err != nil {
log.Error("Error closing global store! %v", "err", err)
}
os.RemoveAll(gDir)
}
}
localStore := store.(*storage.LocalStore)
db := storage.NewDBAPI(localStore)
bucket.Store(bucketKeyDB, db)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, db)
bucket.Store(bucketKeyDelivery, delivery)
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
SkipCheck: skipCheck,
})
fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
return r, cleanup, nil
},
})
defer sim.Close()
// create context for simulation run // create context for simulation run
timeout := 30 * time.Second timeout := 30 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
// defer cancel should come before defer simulation teardown // defer cancel should come before defer simulation teardown
defer cancel() defer cancel()
// create simulation network with the config _, err := sim.AddNodesAndConnectChain(nodes)
sim, teardown, err := streamTesting.NewSimulation(conf) if err != nil {
var rpcSubscriptionsWg sync.WaitGroup t.Fatal(err)
defer func() { }
rpcSubscriptionsWg.Wait() result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
teardown() nodeIDs := sim.UpNodeIDs()
nodeIndex := make(map[discover.NodeID]int)
for i, id := range nodeIDs {
nodeIndex[id] = i
}
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
)
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
t.Fatal(d.Error)
}
}
}() }()
// each node Subscribes to each other's swarmChunkServerStreamName
for j := 0; j < nodes-1; j++ {
id := nodeIDs[j]
client, err := sim.Net.GetNode(id).Client()
if err != nil {
t.Fatal(err)
}
sid := nodeIDs[j+1]
client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top)
if err != nil {
return err
}
if j > 0 || nodes == 2 {
item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore)
if !ok {
return fmt.Errorf("No filestore")
}
fileStore := item.(*storage.FileStore)
size := chunkCount * chunkSize
_, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
if err != nil { if err != nil {
t.Fatal(err.Error()) t.Fatal(err.Error())
} }
wait(ctx)
nodeIndex := make(map[discover.NodeID]int)
for i, id := range sim.IDs {
nodeIndex[id] = i
if !*useMockStore {
stores[id] = sim.Stores[i]
sim.Stores[i] = stores[id]
} }
} }
// peerCount function gives the number of peer connections for a nodeID // here we distribute chunks of a random file into stores 1...nodes
// this is needed for the service run function to wait until if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
// each protocol instance runs and the streamer peers are available return err
peerCount = func(id discover.NodeID) int {
if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
return 1
}
return 2
}
waitPeerErrC = make(chan error)
// create DBAPI-s for all nodes
dbs := make([]*storage.DBAPI, nodes)
for i := 0; i < nodes; i++ {
dbs[i] = storage.NewDBAPI(sim.Stores[i].(*storage.LocalStore))
} }
// collect hashes in po 1 bin for each node // collect hashes in po 1 bin for each node
@ -145,93 +193,31 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
if i < nodes-1 { if i < nodes-1 {
hashCounts[i] = hashCounts[i+1] hashCounts[i] = hashCounts[i+1]
} }
dbs[i].Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool { item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB)
if !ok {
return fmt.Errorf("No DB")
}
db := item.(*storage.DBAPI)
db.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
hashes[i] = append(hashes[i], addr) hashes[i] = append(hashes[i], addr)
totalHashes++ totalHashes++
hashCounts[i]++ hashCounts[i]++
return true return true
}) })
} }
// errc is error channel for simulation
errc := make(chan error, 1)
quitC := make(chan struct{})
defer close(quitC)
// action is subscribe
action := func(ctx context.Context) error {
// need to wait till an aynchronous process registers the peers in streamer.peers
// that is used by Subscribe
// the global peerCount function tells how many connections each node has
// TODO: this is to be reimplemented with peerEvent watcher without global var
i := 0
for err := range waitPeerErrC {
if err != nil {
return fmt.Errorf("error waiting for peers: %s", err)
}
i++
if i == nodes {
break
}
}
// each node Subscribes to each other's swarmChunkServerStreamName
for j := 0; j < nodes-1; j++ {
id := sim.IDs[j]
sim.Stores[j] = stores[id]
err := sim.CallClient(id, func(client *rpc.Client) error {
// report disconnect events to the error channel cos peers should not disconnect
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-doneC
rpcSubscriptionsWg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
// start syncing, i.e., subscribe to upstream peers po 1 bin
sid := sim.IDs[j+1]
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top)
})
if err != nil {
return err
}
}
// here we distribute chunks of a random file into stores 1...nodes
rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
size := chunkCount * chunkSize
_, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
if err != nil {
t.Fatal(err.Error())
}
// need to wait cos we then immediately collect the relevant bin content
wait(ctx)
if err != nil {
t.Fatal(err.Error())
}
return nil
}
// this makes sure check is not called before the previous call finishes
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
select {
case err := <-errc:
return false, err
case <-ctx.Done():
return false, ctx.Err()
default:
}
i := nodeIndex[id]
var total, found int var total, found int
for _, node := range nodeIDs {
i := nodeIndex[node]
for j := i; j < nodes; j++ { for j := i; j < nodes; j++ {
total += len(hashes[j]) total += len(hashes[j])
for _, key := range hashes[j] { for _, key := range hashes[j] {
chunk, err := dbs[i].Get(ctx, key) item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB)
if !ok {
return fmt.Errorf("No DB")
}
db := item.(*storage.DBAPI)
chunk, err := db.Get(ctx, key)
if err == storage.ErrFetching { if err == storage.ErrFetching {
<-chunk.ReqC <-chunk.ReqC
} else if err != nil { } else if err != nil {
@ -242,26 +228,15 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
found++ found++
} }
} }
log.Debug("sync check", "node", id, "index", i, "bin", po, "found", found, "total", total) log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total)
return total == found, nil
} }
if total == found && total > 0 {
return nil
}
return fmt.Errorf("Total not equallying found: total is %d", total)
})
conf.Step = &simulations.Step{
Action: action,
Trigger: streamTesting.Trigger(500*time.Millisecond, quitC, sim.IDs[0:nodes-1]...),
Expect: &simulations.Expectation{
Nodes: sim.IDs[0:1],
Check: check,
},
}
startedAt := time.Now()
result, err := sim.Run(ctx, conf)
finishedAt := time.Now()
if err != nil {
t.Fatalf("Setting up simulation failed: %v", err)
}
if result.Error != nil { if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error) t.Fatal(result.Error)
} }
streamTesting.CheckResult(t, result, startedAt, finishedAt)
} }

View File

@ -1,293 +0,0 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package testing
import (
"context"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"os"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/storage"
)
type Simulation struct {
Net *simulations.Network
Stores []storage.ChunkStore
Addrs []network.Addr
IDs []discover.NodeID
}
func SetStores(addrs ...network.Addr) ([]storage.ChunkStore, func(), error) {
var datadirs []string
stores := make([]storage.ChunkStore, len(addrs))
var err error
for i, addr := range addrs {
var datadir string
datadir, err = ioutil.TempDir("", "streamer")
if err != nil {
break
}
var store storage.ChunkStore
params := storage.NewDefaultLocalStoreParams()
params.Init(datadir)
params.BaseKey = addr.Over()
store, err = storage.NewTestLocalStoreForAddr(params)
if err != nil {
break
}
datadirs = append(datadirs, datadir)
stores[i] = store
}
teardown := func() {
for i, datadir := range datadirs {
stores[i].Close()
os.RemoveAll(datadir)
}
}
return stores, teardown, err
}
func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) {
teardown = func() {}
switch adapterType {
case "sim":
adapter = adapters.NewSimAdapter(services)
case "exec":
baseDir, err0 := ioutil.TempDir("", "swarm-test")
if err0 != nil {
return nil, teardown, err0
}
teardown = func() { os.RemoveAll(baseDir) }
adapter = adapters.NewExecAdapter(baseDir)
case "docker":
adapter, err = adapters.NewDockerAdapter()
if err != nil {
return nil, teardown, err
}
default:
return nil, teardown, errors.New("adapter needs to be one of sim, exec, docker")
}
return adapter, teardown, nil
}
func CheckResult(t *testing.T, result *simulations.StepResult, startedAt, finishedAt time.Time) {
t.Logf("Simulation passed in %s", result.FinishedAt.Sub(result.StartedAt))
if len(result.Passes) > 1 {
var min, max time.Duration
var sum int
for _, pass := range result.Passes {
duration := pass.Sub(result.StartedAt)
if sum == 0 || duration < min {
min = duration
}
if duration > max {
max = duration
}
sum += int(duration.Nanoseconds())
}
t.Logf("Min: %s, Max: %s, Average: %s", min, max, time.Duration(sum/len(result.Passes))*time.Nanosecond)
}
t.Logf("Setup: %s, Shutdown: %s", result.StartedAt.Sub(startedAt), finishedAt.Sub(result.FinishedAt))
}
type RunConfig struct {
Adapter string
Step *simulations.Step
NodeCount int
ConnLevel int
ToAddr func(discover.NodeID) *network.BzzAddr
Services adapters.Services
DefaultService string
EnableMsgEvents bool
}
func NewSimulation(conf *RunConfig) (*Simulation, func(), error) {
// create network
nodes := conf.NodeCount
adapter, adapterTeardown, err := NewAdapter(conf.Adapter, conf.Services)
if err != nil {
return nil, adapterTeardown, err
}
defaultService := "streamer"
if conf.DefaultService != "" {
defaultService = conf.DefaultService
}
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
ID: "0",
DefaultService: defaultService,
})
teardown := func() {
adapterTeardown()
net.Shutdown()
}
ids := make([]discover.NodeID, nodes)
addrs := make([]network.Addr, nodes)
// start nodes
for i := 0; i < nodes; i++ {
nodeconf := adapters.RandomNodeConfig()
nodeconf.EnableMsgEvents = conf.EnableMsgEvents
node, err := net.NewNodeWithConfig(nodeconf)
if err != nil {
return nil, teardown, fmt.Errorf("error creating node: %s", err)
}
ids[i] = node.ID()
addrs[i] = conf.ToAddr(ids[i])
}
// set nodes number of Stores available
stores, storeTeardown, err := SetStores(addrs...)
teardown = func() {
net.Shutdown()
adapterTeardown()
storeTeardown()
}
if err != nil {
return nil, teardown, err
}
s := &Simulation{
Net: net,
Stores: stores,
IDs: ids,
Addrs: addrs,
}
return s, teardown, nil
}
func (s *Simulation) Run(ctx context.Context, conf *RunConfig) (*simulations.StepResult, error) {
// bring up nodes, launch the servive
nodes := conf.NodeCount
conns := conf.ConnLevel
for i := 0; i < nodes; i++ {
if err := s.Net.Start(s.IDs[i]); err != nil {
return nil, fmt.Errorf("error starting node %s: %s", s.IDs[i].TerminalString(), err)
}
}
// run a simulation which connects the 10 nodes in a chain
wg := sync.WaitGroup{}
for i := range s.IDs {
// collect the overlay addresses, to
for j := 0; j < conns; j++ {
var k int
if j == 0 {
k = i - 1
} else {
k = rand.Intn(len(s.IDs))
}
if i > 0 {
wg.Add(1)
go func(i, k int) {
defer wg.Done()
s.Net.Connect(s.IDs[i], s.IDs[k])
}(i, k)
}
}
}
wg.Wait()
log.Info(fmt.Sprintf("simulation with %v nodes", len(s.Addrs)))
// create an only locally retrieving FileStore for the pivot node to test
// if retriee requests have arrived
result := simulations.NewSimulation(s.Net).Run(ctx, conf.Step)
return result, nil
}
// WatchDisconnections subscribes to admin peerEvents and sends peer event drop
// errors to the errc channel. Channel quitC signals the termination of the event loop.
// Returned doneC will be closed after the rpc subscription is unsubscribed,
// signaling that simulations network is safe to shutdown.
func WatchDisconnections(id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) (doneC <-chan struct{}, err error) {
events := make(chan *p2p.PeerEvent)
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
if err != nil {
return nil, fmt.Errorf("error getting peer events for node %v: %s", id, err)
}
c := make(chan struct{})
go func() {
defer func() {
log.Trace("watch disconnections: unsubscribe", "id", id)
sub.Unsubscribe()
close(c)
}()
for {
select {
case <-quitC:
return
case e := <-events:
if e.Type == p2p.PeerEventTypeDrop {
select {
case errc <- fmt.Errorf("peerEvent for node %v: %v", id, e):
case <-quitC:
return
}
}
case err := <-sub.Err():
if err != nil {
select {
case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err):
case <-quitC:
return
}
}
}
}
}()
return c, nil
}
func Trigger(d time.Duration, quitC chan struct{}, ids ...discover.NodeID) chan discover.NodeID {
trigger := make(chan discover.NodeID)
go func() {
defer close(trigger)
ticker := time.NewTicker(d)
defer ticker.Stop()
// we are only testing the pivot node (net.Nodes[0])
for range ticker.C {
for _, id := range ids {
select {
case trigger <- id:
case <-quitC:
return
}
}
}
}()
return trigger
}
func (sim *Simulation) CallClient(id discover.NodeID, f func(*rpc.Client) error) error {
node := sim.Net.GetNode(id)
if node == nil {
return fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return fmt.Errorf("error getting node client: %s", err)
}
return f(client)
}