swarm: network rewrite merge
This commit is contained in:
449
swarm/network/stream/common_test.go
Normal file
449
swarm/network/stream/common_test.go
Normal file
@@ -0,0 +1,449 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
var (
|
||||
deliveries map[discover.NodeID]*Delivery
|
||||
stores map[discover.NodeID]storage.ChunkStore
|
||||
toAddr func(discover.NodeID) *network.BzzAddr
|
||||
peerCount func(discover.NodeID) int
|
||||
adapter = flag.String("adapter", "sim", "type of simulation: sim|exec|docker")
|
||||
loglevel = flag.Int("loglevel", 2, "verbosity of logs")
|
||||
nodes = flag.Int("nodes", 0, "number of nodes")
|
||||
chunks = flag.Int("chunks", 0, "number of chunks")
|
||||
useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
|
||||
)
|
||||
|
||||
var (
|
||||
defaultSkipCheck bool
|
||||
waitPeerErrC chan error
|
||||
chunkSize = 4096
|
||||
registries map[discover.NodeID]*TestRegistry
|
||||
createStoreFunc func(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error)
|
||||
getRetrieveFunc = defaultRetrieveFunc
|
||||
subscriptionCount = 0
|
||||
globalStore mock.GlobalStorer
|
||||
globalStoreDir string
|
||||
)
|
||||
|
||||
var services = adapters.Services{
|
||||
"streamer": NewStreamerService,
|
||||
"intervalsStreamer": newIntervalsStreamerService,
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Parse()
|
||||
// register the Delivery service which will run as a devp2p
|
||||
// protocol when using the exec adapter
|
||||
adapters.RegisterServices(services)
|
||||
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
func createGlobalStore() {
|
||||
var err error
|
||||
globalStoreDir, err = ioutil.TempDir("", "global.store")
|
||||
if err != nil {
|
||||
log.Error("Error initiating global store temp directory!", "err", err)
|
||||
return
|
||||
}
|
||||
globalStore, err = db.NewGlobalStore(globalStoreDir)
|
||||
if err != nil {
|
||||
log.Error("Error initiating global store!", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// NewStreamerService
|
||||
func NewStreamerService(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
var err error
|
||||
id := ctx.Config.ID
|
||||
addr := toAddr(id)
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
stores[id], err = createStoreFunc(id, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
store := stores[id].(*storage.LocalStore)
|
||||
db := storage.NewDBAPI(store)
|
||||
delivery := NewDelivery(kad, db)
|
||||
deliveries[id] = delivery
|
||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
||||
SkipCheck: defaultSkipCheck,
|
||||
DoRetrieve: false,
|
||||
})
|
||||
RegisterSwarmSyncerServer(r, db)
|
||||
RegisterSwarmSyncerClient(r, db)
|
||||
go func() {
|
||||
waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id))
|
||||
}()
|
||||
fileStore := storage.NewFileStore(storage.NewNetStore(store, getRetrieveFunc(id)), storage.NewFileStoreParams())
|
||||
testRegistry := &TestRegistry{Registry: r, fileStore: fileStore}
|
||||
registries[id] = testRegistry
|
||||
return testRegistry, nil
|
||||
}
|
||||
|
||||
func defaultRetrieveFunc(id discover.NodeID) func(chunk *storage.Chunk) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func datadirsCleanup() {
|
||||
for _, id := range ids {
|
||||
os.RemoveAll(datadirs[id])
|
||||
}
|
||||
if globalStoreDir != "" {
|
||||
os.RemoveAll(globalStoreDir)
|
||||
}
|
||||
}
|
||||
|
||||
//local stores need to be cleaned up after the sim is done
|
||||
func localStoreCleanup() {
|
||||
log.Info("Cleaning up...")
|
||||
for _, id := range ids {
|
||||
registries[id].Close()
|
||||
stores[id].Close()
|
||||
}
|
||||
log.Info("Local store cleanup done")
|
||||
}
|
||||
|
||||
func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
|
||||
// setup
|
||||
addr := network.RandomAddr() // tested peers peer address
|
||||
to := network.NewKademlia(addr.OAddr, network.NewKadParams())
|
||||
|
||||
// temp datadir
|
||||
datadir, err := ioutil.TempDir("", "streamer")
|
||||
if err != nil {
|
||||
return nil, nil, nil, func() {}, err
|
||||
}
|
||||
removeDataDir := func() {
|
||||
os.RemoveAll(datadir)
|
||||
}
|
||||
|
||||
params := storage.NewDefaultLocalStoreParams()
|
||||
params.Init(datadir)
|
||||
params.BaseKey = addr.Over()
|
||||
|
||||
localStore, err := storage.NewTestLocalStoreForAddr(params)
|
||||
if err != nil {
|
||||
return nil, nil, nil, removeDataDir, err
|
||||
}
|
||||
|
||||
db := storage.NewDBAPI(localStore)
|
||||
delivery := NewDelivery(to, db)
|
||||
streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
||||
SkipCheck: defaultSkipCheck,
|
||||
})
|
||||
teardown := func() {
|
||||
streamer.Close()
|
||||
removeDataDir()
|
||||
}
|
||||
protocolTester := p2ptest.NewProtocolTester(t, network.NewNodeIDFromAddr(addr), 1, streamer.runProtocol)
|
||||
|
||||
err = waitForPeers(streamer, 1*time.Second, 1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, errors.New("timeout: peer is not created")
|
||||
}
|
||||
|
||||
return protocolTester, streamer, localStore, teardown, nil
|
||||
}
|
||||
|
||||
func waitForPeers(streamer *Registry, timeout time.Duration, expectedPeers int) error {
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
timeoutTimer := time.NewTimer(timeout)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if streamer.peersCount() >= expectedPeers {
|
||||
return nil
|
||||
}
|
||||
case <-timeoutTimer.C:
|
||||
return errors.New("timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type roundRobinStore struct {
|
||||
index uint32
|
||||
stores []storage.ChunkStore
|
||||
}
|
||||
|
||||
func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
|
||||
return &roundRobinStore{
|
||||
stores: stores,
|
||||
}
|
||||
}
|
||||
|
||||
func (rrs *roundRobinStore) Get(addr storage.Address) (*storage.Chunk, error) {
|
||||
return nil, errors.New("get not well defined on round robin store")
|
||||
}
|
||||
|
||||
func (rrs *roundRobinStore) Put(chunk *storage.Chunk) {
|
||||
i := atomic.AddUint32(&rrs.index, 1)
|
||||
idx := int(i) % len(rrs.stores)
|
||||
rrs.stores[idx].Put(chunk)
|
||||
}
|
||||
|
||||
func (rrs *roundRobinStore) Close() {
|
||||
for _, store := range rrs.stores {
|
||||
store.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type TestRegistry struct {
|
||||
*Registry
|
||||
fileStore *storage.FileStore
|
||||
}
|
||||
|
||||
func (r *TestRegistry) APIs() []rpc.API {
|
||||
a := r.Registry.APIs()
|
||||
a = append(a, rpc.API{
|
||||
Namespace: "stream",
|
||||
Version: "3.0",
|
||||
Service: r,
|
||||
Public: true,
|
||||
})
|
||||
return a
|
||||
}
|
||||
|
||||
func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
|
||||
r, _ := fileStore.Retrieve(hash)
|
||||
buf := make([]byte, 1024)
|
||||
var n int
|
||||
var total int64
|
||||
var err error
|
||||
for (total == 0 || n > 0) && err == nil {
|
||||
n, err = r.ReadAt(buf, total)
|
||||
total += int64(n)
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
return total, err
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (r *TestRegistry) ReadAll(hash common.Hash) (int64, error) {
|
||||
return readAll(r.fileStore, hash[:])
|
||||
}
|
||||
|
||||
func (r *TestRegistry) Start(server *p2p.Server) error {
|
||||
return r.Registry.Start(server)
|
||||
}
|
||||
|
||||
func (r *TestRegistry) Stop() error {
|
||||
return r.Registry.Stop()
|
||||
}
|
||||
|
||||
type TestExternalRegistry struct {
|
||||
*Registry
|
||||
}
|
||||
|
||||
func (r *TestExternalRegistry) APIs() []rpc.API {
|
||||
a := r.Registry.APIs()
|
||||
a = append(a, rpc.API{
|
||||
Namespace: "stream",
|
||||
Version: "3.0",
|
||||
Service: r,
|
||||
Public: true,
|
||||
})
|
||||
return a
|
||||
}
|
||||
|
||||
func (r *TestExternalRegistry) GetHashes(ctx context.Context, peerId discover.NodeID, s Stream) (*rpc.Subscription, error) {
|
||||
peer := r.getPeer(peerId)
|
||||
|
||||
client, err := peer.getClient(ctx, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := client.Client.(*testExternalClient)
|
||||
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
return nil, fmt.Errorf("Subscribe not supported")
|
||||
}
|
||||
|
||||
sub := notifier.CreateSubscription()
|
||||
|
||||
go func() {
|
||||
// if we begin sending event immediately some events
|
||||
// will probably be dropped since the subscription ID might not be send to
|
||||
// the client.
|
||||
// ref: rpc/subscription_test.go#L65
|
||||
time.Sleep(1 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case h := <-c.hashes:
|
||||
<-c.enableNotificationsC // wait for notification subscription to complete
|
||||
if err := notifier.Notify(sub.ID, h); err != nil {
|
||||
log.Warn(fmt.Sprintf("rpc sub notifier notify stream %s: %v", s, err))
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
if err != nil {
|
||||
log.Warn(fmt.Sprintf("caught subscription error in stream %s: %v", s, err))
|
||||
}
|
||||
case <-notifier.Closed():
|
||||
log.Trace(fmt.Sprintf("rpc sub notifier closed"))
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
func (r *TestExternalRegistry) EnableNotifications(peerId discover.NodeID, s Stream) error {
|
||||
peer := r.getPeer(peerId)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client, err := peer.getClient(ctx, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
close(client.Client.(*testExternalClient).enableNotificationsC)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: merge functionalities of testExternalClient and testExternalServer
|
||||
// with testClient and testServer.
|
||||
|
||||
type testExternalClient struct {
|
||||
hashes chan []byte
|
||||
db *storage.DBAPI
|
||||
enableNotificationsC chan struct{}
|
||||
}
|
||||
|
||||
func newTestExternalClient(db *storage.DBAPI) *testExternalClient {
|
||||
return &testExternalClient{
|
||||
hashes: make(chan []byte),
|
||||
db: db,
|
||||
enableNotificationsC: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testExternalClient) NeedData(hash []byte) func() {
|
||||
chunk, _ := c.db.GetOrCreateRequest(hash)
|
||||
if chunk.ReqC == nil {
|
||||
return nil
|
||||
}
|
||||
c.hashes <- hash
|
||||
return func() {
|
||||
chunk.WaitToStore()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *testExternalClient) Close() {}
|
||||
|
||||
const testExternalServerBatchSize = 10
|
||||
|
||||
type testExternalServer struct {
|
||||
t string
|
||||
keyFunc func(key []byte, index uint64)
|
||||
sessionAt uint64
|
||||
maxKeys uint64
|
||||
streamer *TestExternalRegistry
|
||||
}
|
||||
|
||||
func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer {
|
||||
if keyFunc == nil {
|
||||
keyFunc = binary.BigEndian.PutUint64
|
||||
}
|
||||
return &testExternalServer{
|
||||
t: t,
|
||||
keyFunc: keyFunc,
|
||||
sessionAt: sessionAt,
|
||||
maxKeys: maxKeys,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
|
||||
if from == 0 && to == 0 {
|
||||
from = s.sessionAt
|
||||
to = s.sessionAt + testExternalServerBatchSize
|
||||
}
|
||||
if to-from > testExternalServerBatchSize {
|
||||
to = from + testExternalServerBatchSize - 1
|
||||
}
|
||||
if from >= s.maxKeys && to > s.maxKeys {
|
||||
return nil, 0, 0, nil, io.EOF
|
||||
}
|
||||
if to > s.maxKeys {
|
||||
to = s.maxKeys
|
||||
}
|
||||
b := make([]byte, HashSize*(to-from+1))
|
||||
for i := from; i <= to; i++ {
|
||||
s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
|
||||
}
|
||||
return b, from, to, nil, nil
|
||||
}
|
||||
|
||||
func (s *testExternalServer) GetData([]byte) ([]byte, error) {
|
||||
return make([]byte, 4096), nil
|
||||
}
|
||||
|
||||
func (s *testExternalServer) Close() {}
|
||||
|
||||
// Sets the global value defaultSkipCheck.
|
||||
// It should be used in test function defer to reset the global value
|
||||
// to the original value.
|
||||
//
|
||||
// defer setDefaultSkipCheck(defaultSkipCheck)
|
||||
// defaultSkipCheck = skipCheck
|
||||
//
|
||||
// This works as defer function arguments evaluations are evaluated as ususal,
|
||||
// but only the function body invocation is deferred.
|
||||
func setDefaultSkipCheck(skipCheck bool) {
|
||||
defaultSkipCheck = skipCheck
|
||||
}
|
272
swarm/network/stream/delivery.go
Normal file
272
swarm/network/stream/delivery.go
Normal file
@@ -0,0 +1,272 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
swarmChunkServerStreamName = "RETRIEVE_REQUEST"
|
||||
deliveryCap = 32
|
||||
)
|
||||
|
||||
var (
|
||||
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
|
||||
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
|
||||
|
||||
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
|
||||
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
|
||||
)
|
||||
|
||||
type Delivery struct {
|
||||
db *storage.DBAPI
|
||||
overlay network.Overlay
|
||||
receiveC chan *ChunkDeliveryMsg
|
||||
getPeer func(discover.NodeID) *Peer
|
||||
}
|
||||
|
||||
func NewDelivery(overlay network.Overlay, db *storage.DBAPI) *Delivery {
|
||||
d := &Delivery{
|
||||
db: db,
|
||||
overlay: overlay,
|
||||
receiveC: make(chan *ChunkDeliveryMsg, deliveryCap),
|
||||
}
|
||||
|
||||
go d.processReceivedChunks()
|
||||
return d
|
||||
}
|
||||
|
||||
// SwarmChunkServer implements Server
|
||||
type SwarmChunkServer struct {
|
||||
deliveryC chan []byte
|
||||
batchC chan []byte
|
||||
db *storage.DBAPI
|
||||
currentLen uint64
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
// NewSwarmChunkServer is SwarmChunkServer constructor
|
||||
func NewSwarmChunkServer(db *storage.DBAPI) *SwarmChunkServer {
|
||||
s := &SwarmChunkServer{
|
||||
deliveryC: make(chan []byte, deliveryCap),
|
||||
batchC: make(chan []byte),
|
||||
db: db,
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
go s.processDeliveries()
|
||||
return s
|
||||
}
|
||||
|
||||
// processDeliveries handles delivered chunk hashes
|
||||
func (s *SwarmChunkServer) processDeliveries() {
|
||||
var hashes []byte
|
||||
var batchC chan []byte
|
||||
for {
|
||||
select {
|
||||
case <-s.quit:
|
||||
return
|
||||
case hash := <-s.deliveryC:
|
||||
hashes = append(hashes, hash...)
|
||||
batchC = s.batchC
|
||||
case batchC <- hashes:
|
||||
hashes = nil
|
||||
batchC = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetNextBatch
|
||||
func (s *SwarmChunkServer) SetNextBatch(_, _ uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) {
|
||||
select {
|
||||
case hashes = <-s.batchC:
|
||||
case <-s.quit:
|
||||
return
|
||||
}
|
||||
|
||||
from = s.currentLen
|
||||
s.currentLen += uint64(len(hashes))
|
||||
to = s.currentLen
|
||||
return
|
||||
}
|
||||
|
||||
// Close needs to be called on a stream server
|
||||
func (s *SwarmChunkServer) Close() {
|
||||
close(s.quit)
|
||||
}
|
||||
|
||||
// GetData retrives chunk data from db store
|
||||
func (s *SwarmChunkServer) GetData(key []byte) ([]byte, error) {
|
||||
chunk, err := s.db.Get(storage.Address(key))
|
||||
if err == storage.ErrFetching {
|
||||
<-chunk.ReqC
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return chunk.SData, nil
|
||||
}
|
||||
|
||||
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
|
||||
type RetrieveRequestMsg struct {
|
||||
Addr storage.Address
|
||||
SkipCheck bool
|
||||
}
|
||||
|
||||
func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) error {
|
||||
log.Trace("received request", "peer", sp.ID(), "hash", req.Addr)
|
||||
handleRetrieveRequestMsgCount.Inc(1)
|
||||
|
||||
s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", false))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
streamer := s.Server.(*SwarmChunkServer)
|
||||
chunk, created := d.db.GetOrCreateRequest(req.Addr)
|
||||
if chunk.ReqC != nil {
|
||||
if created {
|
||||
if err := d.RequestFromPeers(chunk.Addr[:], true, sp.ID()); err != nil {
|
||||
log.Warn("unable to forward chunk request", "peer", sp.ID(), "key", chunk.Addr, "err", err)
|
||||
chunk.SetErrored(storage.ErrChunkForward)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
t := time.NewTimer(10 * time.Minute)
|
||||
defer t.Stop()
|
||||
|
||||
log.Debug("waiting delivery", "peer", sp.ID(), "hash", req.Addr, "node", common.Bytes2Hex(d.overlay.BaseAddr()), "created", created)
|
||||
start := time.Now()
|
||||
select {
|
||||
case <-chunk.ReqC:
|
||||
log.Debug("retrieve request ReqC closed", "peer", sp.ID(), "hash", req.Addr, "time", time.Since(start))
|
||||
case <-t.C:
|
||||
log.Debug("retrieve request timeout", "peer", sp.ID(), "hash", req.Addr)
|
||||
chunk.SetErrored(storage.ErrChunkTimeout)
|
||||
return
|
||||
}
|
||||
chunk.SetErrored(nil)
|
||||
|
||||
if req.SkipCheck {
|
||||
err := sp.Deliver(chunk, s.priority)
|
||||
if err != nil {
|
||||
log.Warn("ERROR in handleRetrieveRequestMsg, DROPPING peer!", "err", err)
|
||||
sp.Drop(err)
|
||||
}
|
||||
}
|
||||
streamer.deliveryC <- chunk.Addr[:]
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
// TODO: call the retrieve function of the outgoing syncer
|
||||
if req.SkipCheck {
|
||||
log.Trace("deliver", "peer", sp.ID(), "hash", chunk.Addr)
|
||||
if length := len(chunk.SData); length < 9 {
|
||||
log.Error("Chunk.SData to deliver is too short", "len(chunk.SData)", length, "address", chunk.Addr)
|
||||
}
|
||||
return sp.Deliver(chunk, s.priority)
|
||||
}
|
||||
streamer.deliveryC <- chunk.Addr[:]
|
||||
return nil
|
||||
}
|
||||
|
||||
type ChunkDeliveryMsg struct {
|
||||
Addr storage.Address
|
||||
SData []byte // the stored chunk Data (incl size)
|
||||
peer *Peer // set in handleChunkDeliveryMsg
|
||||
}
|
||||
|
||||
func (d *Delivery) handleChunkDeliveryMsg(sp *Peer, req *ChunkDeliveryMsg) error {
|
||||
req.peer = sp
|
||||
d.receiveC <- req
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Delivery) processReceivedChunks() {
|
||||
R:
|
||||
for req := range d.receiveC {
|
||||
processReceivedChunksCount.Inc(1)
|
||||
|
||||
// this should be has locally
|
||||
chunk, err := d.db.Get(req.Addr)
|
||||
if err == nil {
|
||||
continue R
|
||||
}
|
||||
if err != storage.ErrFetching {
|
||||
log.Error("processReceivedChunks db error", "addr", req.Addr, "err", err, "chunk", chunk)
|
||||
continue R
|
||||
}
|
||||
select {
|
||||
case <-chunk.ReqC:
|
||||
log.Error("someone else delivered?", "hash", chunk.Addr.Hex())
|
||||
continue R
|
||||
default:
|
||||
}
|
||||
chunk.SData = req.SData
|
||||
d.db.Put(chunk)
|
||||
|
||||
go func(req *ChunkDeliveryMsg) {
|
||||
err := chunk.WaitToStore()
|
||||
if err == storage.ErrChunkInvalid {
|
||||
req.peer.Drop(err)
|
||||
}
|
||||
}(req)
|
||||
}
|
||||
}
|
||||
|
||||
// RequestFromPeers sends a chunk retrieve request to
|
||||
func (d *Delivery) RequestFromPeers(hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error {
|
||||
var success bool
|
||||
var err error
|
||||
requestFromPeersCount.Inc(1)
|
||||
d.overlay.EachConn(hash, 255, func(p network.OverlayConn, po int, nn bool) bool {
|
||||
spId := p.(network.Peer).ID()
|
||||
for _, p := range peersToSkip {
|
||||
if p == spId {
|
||||
log.Trace("Delivery.RequestFromPeers: skip peer", "peer", spId)
|
||||
return true
|
||||
}
|
||||
}
|
||||
sp := d.getPeer(spId)
|
||||
if sp == nil {
|
||||
log.Warn("Delivery.RequestFromPeers: peer not found", "id", spId)
|
||||
return true
|
||||
}
|
||||
// TODO: skip light nodes that do not accept retrieve requests
|
||||
err = sp.SendPriority(&RetrieveRequestMsg{
|
||||
Addr: hash,
|
||||
SkipCheck: skipCheck,
|
||||
}, Top)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
requestFromPeersEachCount.Inc(1)
|
||||
success = true
|
||||
return false
|
||||
})
|
||||
if success {
|
||||
return nil
|
||||
}
|
||||
return errors.New("no peer found")
|
||||
}
|
699
swarm/network/stream/delivery_test.go
Normal file
699
swarm/network/stream/delivery_test.go
Normal file
@@ -0,0 +1,699 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
func TestStreamerRetrieveRequest(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
streamer.delivery.RequestFromPeers(hash0[:], true)
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "RetrieveRequestMsg",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 5,
|
||||
Msg: &RetrieveRequestMsg{
|
||||
Addr: hash0[:],
|
||||
SkipCheck: true,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
|
||||
|
||||
peer := streamer.getPeer(peerID)
|
||||
|
||||
peer.handleSubscribeMsg(&SubscribeMsg{
|
||||
Stream: NewStream(swarmChunkServerStreamName, "", false),
|
||||
History: nil,
|
||||
Priority: Top,
|
||||
})
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "RetrieveRequestMsg",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 5,
|
||||
Msg: &RetrieveRequestMsg{
|
||||
Addr: chunk.Addr[:],
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
HandoverProof: nil,
|
||||
Hashes: nil,
|
||||
From: 0,
|
||||
To: 0,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
|
||||
if err == nil || err.Error() != expectedError {
|
||||
t.Fatalf("Expected error %v, got %v", expectedError, err)
|
||||
}
|
||||
}
|
||||
|
||||
// upstream request server receives a retrieve Request and responds with
|
||||
// offered hashes or delivery if skipHash is set to true
|
||||
func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
|
||||
tester, streamer, localStore, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
peer := streamer.getPeer(peerID)
|
||||
|
||||
stream := NewStream(swarmChunkServerStreamName, "", false)
|
||||
|
||||
peer.handleSubscribeMsg(&SubscribeMsg{
|
||||
Stream: stream,
|
||||
History: nil,
|
||||
Priority: Top,
|
||||
})
|
||||
|
||||
hash := storage.Address(hash0[:])
|
||||
chunk := storage.NewChunk(hash, nil)
|
||||
chunk.SData = hash
|
||||
localStore.Put(chunk)
|
||||
chunk.WaitToStore()
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "RetrieveRequestMsg",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 5,
|
||||
Msg: &RetrieveRequestMsg{
|
||||
Addr: hash,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
Hashes: hash,
|
||||
From: 0,
|
||||
// TODO: why is this 32???
|
||||
To: 32,
|
||||
Stream: stream,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hash = storage.Address(hash1[:])
|
||||
chunk = storage.NewChunk(hash, nil)
|
||||
chunk.SData = hash1[:]
|
||||
localStore.Put(chunk)
|
||||
chunk.WaitToStore()
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "RetrieveRequestMsg",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 5,
|
||||
Msg: &RetrieveRequestMsg{
|
||||
Addr: hash,
|
||||
SkipCheck: true,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 6,
|
||||
Msg: &ChunkDeliveryMsg{
|
||||
Addr: hash,
|
||||
SData: hash,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
||||
tester, streamer, localStore, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
|
||||
return &testClient{
|
||||
t: t,
|
||||
}, nil
|
||||
})
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
stream := NewStream("foo", "", true)
|
||||
err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
chunkKey := hash0[:]
|
||||
chunkData := hash1[:]
|
||||
chunk, created := localStore.GetOrCreateRequest(chunkKey)
|
||||
|
||||
if !created {
|
||||
t.Fatal("chunk already exists")
|
||||
}
|
||||
select {
|
||||
case <-chunk.ReqC:
|
||||
t.Fatal("chunk is already received")
|
||||
default:
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Subscribe message",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 4,
|
||||
Msg: &SubscribeMsg{
|
||||
Stream: stream,
|
||||
History: NewRange(5, 8),
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
},
|
||||
p2ptest.Exchange{
|
||||
Label: "ChunkDeliveryRequest message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 6,
|
||||
Msg: &ChunkDeliveryMsg{
|
||||
Addr: chunkKey,
|
||||
SData: chunkData,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
timeout := time.NewTimer(1 * time.Second)
|
||||
|
||||
select {
|
||||
case <-timeout.C:
|
||||
t.Fatal("timeout receiving chunk")
|
||||
case <-chunk.ReqC:
|
||||
}
|
||||
|
||||
storedChunk, err := localStore.Get(chunkKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(storedChunk.SData, chunkData) {
|
||||
t.Fatal("Retrieved chunk has different data than original")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDeliveryFromNodes(t *testing.T) {
|
||||
testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
|
||||
testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
|
||||
testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
|
||||
testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
|
||||
testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
|
||||
testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
|
||||
testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
|
||||
testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
|
||||
}
|
||||
|
||||
func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
|
||||
defaultSkipCheck = skipCheck
|
||||
toAddr = network.NewAddrFromNodeID
|
||||
createStoreFunc = createTestLocalStorageFromSim
|
||||
conf := &streamTesting.RunConfig{
|
||||
Adapter: *adapter,
|
||||
NodeCount: nodes,
|
||||
ConnLevel: conns,
|
||||
ToAddr: toAddr,
|
||||
Services: services,
|
||||
EnableMsgEvents: false,
|
||||
}
|
||||
|
||||
sim, teardown, err := streamTesting.NewSimulation(conf)
|
||||
var rpcSubscriptionsWg sync.WaitGroup
|
||||
defer func() {
|
||||
rpcSubscriptionsWg.Wait()
|
||||
teardown()
|
||||
}()
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
stores = make(map[discover.NodeID]storage.ChunkStore)
|
||||
for i, id := range sim.IDs {
|
||||
stores[id] = sim.Stores[i]
|
||||
}
|
||||
registries = make(map[discover.NodeID]*TestRegistry)
|
||||
deliveries = make(map[discover.NodeID]*Delivery)
|
||||
peerCount = func(id discover.NodeID) int {
|
||||
if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
|
||||
// here we distribute chunks of a random file into Stores of nodes 1 to nodes
|
||||
rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
|
||||
size := chunkCount * chunkSize
|
||||
fileHash, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
||||
// wait until all chunks stored
|
||||
wait()
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
errc := make(chan error, 1)
|
||||
waitPeerErrC = make(chan error)
|
||||
quitC := make(chan struct{})
|
||||
defer close(quitC)
|
||||
|
||||
action := func(ctx context.Context) error {
|
||||
// each node Subscribes to each other's swarmChunkServerStreamName
|
||||
// need to wait till an aynchronous process registers the peers in streamer.peers
|
||||
// that is used by Subscribe
|
||||
// using a global err channel to share betweem action and node service
|
||||
i := 0
|
||||
for err := range waitPeerErrC {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for peers: %s", err)
|
||||
}
|
||||
i++
|
||||
if i == nodes {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// each node subscribes to the upstream swarm chunk server stream
|
||||
// which responds to chunk retrieve requests all but the last node in the chain does not
|
||||
for j := 0; j < nodes-1; j++ {
|
||||
id := sim.IDs[j]
|
||||
err := sim.CallClient(id, func(client *rpc.Client) error {
|
||||
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-doneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
sid := sim.IDs[j+1]
|
||||
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// create a retriever FileStore for the pivot node
|
||||
delivery := deliveries[sim.IDs[0]]
|
||||
retrieveFunc := func(chunk *storage.Chunk) error {
|
||||
return delivery.RequestFromPeers(chunk.Addr[:], skipCheck)
|
||||
}
|
||||
netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc)
|
||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||
|
||||
go func() {
|
||||
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
|
||||
// we must wait for the peer connections to have started before requesting
|
||||
n, err := readAll(fileStore, fileHash)
|
||||
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
|
||||
if err != nil {
|
||||
errc <- fmt.Errorf("requesting chunks action error: %v", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
|
||||
select {
|
||||
case err := <-errc:
|
||||
return false, err
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
default:
|
||||
}
|
||||
var total int64
|
||||
err := sim.CallClient(id, func(client *rpc.Client) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
return client.CallContext(ctx, &total, "stream_readAll", common.BytesToHash(fileHash))
|
||||
})
|
||||
log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
|
||||
if err != nil || total != int64(size) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
conf.Step = &simulations.Step{
|
||||
Action: action,
|
||||
Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]),
|
||||
// we are only testing the pivot node (net.Nodes[0])
|
||||
Expect: &simulations.Expectation{
|
||||
Nodes: sim.IDs[0:1],
|
||||
Check: check,
|
||||
},
|
||||
}
|
||||
startedAt := time.Now()
|
||||
timeout := 300 * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
result, err := sim.Run(ctx, conf)
|
||||
finishedAt := time.Now()
|
||||
if err != nil {
|
||||
t.Fatalf("Setting up simulation failed: %v", err)
|
||||
}
|
||||
if result.Error != nil {
|
||||
t.Fatalf("Simulation failed: %s", result.Error)
|
||||
}
|
||||
streamTesting.CheckResult(t, result, startedAt, finishedAt)
|
||||
}
|
||||
|
||||
func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
|
||||
for chunks := 32; chunks <= 128; chunks *= 2 {
|
||||
for i := 2; i < 32; i *= 2 {
|
||||
b.Run(
|
||||
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
|
||||
func(b *testing.B) {
|
||||
benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
|
||||
for chunks := 32; chunks <= 128; chunks *= 2 {
|
||||
for i := 2; i < 32; i *= 2 {
|
||||
b.Run(
|
||||
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
|
||||
func(b *testing.B) {
|
||||
benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
|
||||
defaultSkipCheck = skipCheck
|
||||
toAddr = network.NewAddrFromNodeID
|
||||
createStoreFunc = createTestLocalStorageFromSim
|
||||
registries = make(map[discover.NodeID]*TestRegistry)
|
||||
|
||||
timeout := 300 * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
conf := &streamTesting.RunConfig{
|
||||
Adapter: *adapter,
|
||||
NodeCount: nodes,
|
||||
ConnLevel: conns,
|
||||
ToAddr: toAddr,
|
||||
Services: services,
|
||||
EnableMsgEvents: false,
|
||||
}
|
||||
sim, teardown, err := streamTesting.NewSimulation(conf)
|
||||
var rpcSubscriptionsWg sync.WaitGroup
|
||||
defer func() {
|
||||
rpcSubscriptionsWg.Wait()
|
||||
teardown()
|
||||
}()
|
||||
if err != nil {
|
||||
b.Fatal(err.Error())
|
||||
}
|
||||
|
||||
stores = make(map[discover.NodeID]storage.ChunkStore)
|
||||
deliveries = make(map[discover.NodeID]*Delivery)
|
||||
for i, id := range sim.IDs {
|
||||
stores[id] = sim.Stores[i]
|
||||
}
|
||||
peerCount = func(id discover.NodeID) int {
|
||||
if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
// wait channel for all nodes all peer connections to set up
|
||||
waitPeerErrC = make(chan error)
|
||||
|
||||
// create a FileStore for the last node in the chain which we are gonna write to
|
||||
remoteFileStore := storage.NewFileStore(sim.Stores[nodes-1], storage.NewFileStoreParams())
|
||||
|
||||
// channel to signal simulation initialisation with action call complete
|
||||
// or node disconnections
|
||||
disconnectC := make(chan error)
|
||||
quitC := make(chan struct{})
|
||||
|
||||
initC := make(chan error)
|
||||
|
||||
action := func(ctx context.Context) error {
|
||||
// each node Subscribes to each other's swarmChunkServerStreamName
|
||||
// need to wait till an aynchronous process registers the peers in streamer.peers
|
||||
// that is used by Subscribe
|
||||
// waitPeerErrC using a global err channel to share betweem action and node service
|
||||
i := 0
|
||||
for err := range waitPeerErrC {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for peers: %s", err)
|
||||
}
|
||||
i++
|
||||
if i == nodes {
|
||||
break
|
||||
}
|
||||
}
|
||||
var err error
|
||||
// each node except the last one subscribes to the upstream swarm chunk server stream
|
||||
// which responds to chunk retrieve requests
|
||||
for j := 0; j < nodes-1; j++ {
|
||||
id := sim.IDs[j]
|
||||
err = sim.CallClient(id, func(client *rpc.Client) error {
|
||||
doneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-doneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
sid := sim.IDs[j+1] // the upstream peer's id
|
||||
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
|
||||
})
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
initC <- err
|
||||
return nil
|
||||
}
|
||||
|
||||
// the check function is only triggered when the benchmark finishes
|
||||
trigger := make(chan discover.NodeID)
|
||||
check := func(ctx context.Context, id discover.NodeID) (_ bool, err error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
conf.Step = &simulations.Step{
|
||||
Action: action,
|
||||
Trigger: trigger,
|
||||
// we are only testing the pivot node (net.Nodes[0])
|
||||
Expect: &simulations.Expectation{
|
||||
Nodes: sim.IDs[0:1],
|
||||
Check: check,
|
||||
},
|
||||
}
|
||||
|
||||
// run the simulation in the background
|
||||
errc := make(chan error)
|
||||
go func() {
|
||||
_, err := sim.Run(ctx, conf)
|
||||
close(quitC)
|
||||
errc <- err
|
||||
}()
|
||||
|
||||
// wait for simulation action to complete stream subscriptions
|
||||
err = <-initC
|
||||
if err != nil {
|
||||
b.Fatalf("simulation failed to initialise. expected no error. got %v", err)
|
||||
}
|
||||
|
||||
// create a retriever FileStore for the pivot node
|
||||
// by now deliveries are set for each node by the streamer service
|
||||
delivery := deliveries[sim.IDs[0]]
|
||||
retrieveFunc := func(chunk *storage.Chunk) error {
|
||||
return delivery.RequestFromPeers(chunk.Addr[:], skipCheck)
|
||||
}
|
||||
netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc)
|
||||
|
||||
// benchmark loop
|
||||
b.ResetTimer()
|
||||
b.StopTimer()
|
||||
Loop:
|
||||
for i := 0; i < b.N; i++ {
|
||||
// uploading chunkCount random chunks to the last node
|
||||
hashes := make([]storage.Address, chunkCount)
|
||||
for i := 0; i < chunkCount; i++ {
|
||||
// create actual size real chunks
|
||||
hash, wait, err := remoteFileStore.Store(io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
|
||||
// wait until all chunks stored
|
||||
wait()
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error. got %v", err)
|
||||
}
|
||||
// collect the hashes
|
||||
hashes[i] = hash
|
||||
}
|
||||
// now benchmark the actual retrieval
|
||||
// netstore.Get is called for each hash in a go routine and errors are collected
|
||||
b.StartTimer()
|
||||
errs := make(chan error)
|
||||
for _, hash := range hashes {
|
||||
go func(h storage.Address) {
|
||||
_, err := netStore.Get(h)
|
||||
log.Warn("test check netstore get", "hash", h, "err", err)
|
||||
errs <- err
|
||||
}(hash)
|
||||
}
|
||||
// count and report retrieval errors
|
||||
// if there are misses then chunk timeout is too low for the distance and volume (?)
|
||||
var total, misses int
|
||||
for err := range errs {
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
misses++
|
||||
}
|
||||
total++
|
||||
if total == chunkCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
|
||||
select {
|
||||
case err = <-disconnectC:
|
||||
if err != nil {
|
||||
break Loop
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
if misses > 0 {
|
||||
err = fmt.Errorf("%v chunk not found out of %v", misses, total)
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-quitC:
|
||||
case trigger <- sim.IDs[0]:
|
||||
}
|
||||
if err == nil {
|
||||
err = <-errc
|
||||
} else {
|
||||
if e := <-errc; e != nil {
|
||||
b.Errorf("sim.Run function error: %v", e)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmark over, trigger the check function to conclude the simulation
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error. got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createTestLocalStorageFromSim(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
|
||||
return stores[id], nil
|
||||
}
|
42
swarm/network/stream/intervals/dbstore_test.go
Normal file
42
swarm/network/stream/intervals/dbstore_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package intervals
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
)
|
||||
|
||||
// TestDBStore tests basic functionality of DBStore.
|
||||
func TestDBStore(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "intervals_test_db_store")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
store, err := state.NewDBStore(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
testStore(t, store)
|
||||
}
|
206
swarm/network/stream/intervals/intervals.go
Normal file
206
swarm/network/stream/intervals/intervals.go
Normal file
@@ -0,0 +1,206 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package intervals
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Intervals store a list of intervals. Its purpose is to provide
|
||||
// methods to add new intervals and retrieve missing intervals that
|
||||
// need to be added.
|
||||
// It may be used in synchronization of streaming data to persist
|
||||
// retrieved data ranges between sessions.
|
||||
type Intervals struct {
|
||||
start uint64
|
||||
ranges [][2]uint64
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// New creates a new instance of Intervals.
|
||||
// Start argument limits the lower bound of intervals.
|
||||
// No range bellow start bound will be added by Add method or
|
||||
// returned by Next method. This limit may be used for
|
||||
// tracking "live" synchronization, where the sync session
|
||||
// starts from a specific value, and if "live" sync intervals
|
||||
// need to be merged with historical ones, it can be safely done.
|
||||
func NewIntervals(start uint64) *Intervals {
|
||||
return &Intervals{
|
||||
start: start,
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a new range to intervals. Range start and end are values
|
||||
// are both inclusive.
|
||||
func (i *Intervals) Add(start, end uint64) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
i.add(start, end)
|
||||
}
|
||||
|
||||
func (i *Intervals) add(start, end uint64) {
|
||||
if start < i.start {
|
||||
start = i.start
|
||||
}
|
||||
if end < i.start {
|
||||
return
|
||||
}
|
||||
minStartJ := -1
|
||||
maxEndJ := -1
|
||||
j := 0
|
||||
for ; j < len(i.ranges); j++ {
|
||||
if minStartJ < 0 {
|
||||
if (start <= i.ranges[j][0] && end+1 >= i.ranges[j][0]) || (start <= i.ranges[j][1]+1 && end+1 >= i.ranges[j][1]) {
|
||||
if i.ranges[j][0] < start {
|
||||
start = i.ranges[j][0]
|
||||
}
|
||||
minStartJ = j
|
||||
}
|
||||
}
|
||||
if (start <= i.ranges[j][1] && end+1 >= i.ranges[j][1]) || (start <= i.ranges[j][0] && end+1 >= i.ranges[j][0]) {
|
||||
if i.ranges[j][1] > end {
|
||||
end = i.ranges[j][1]
|
||||
}
|
||||
maxEndJ = j
|
||||
}
|
||||
if end+1 <= i.ranges[j][0] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if minStartJ < 0 && maxEndJ < 0 {
|
||||
i.ranges = append(i.ranges[:j], append([][2]uint64{{start, end}}, i.ranges[j:]...)...)
|
||||
return
|
||||
}
|
||||
if minStartJ >= 0 {
|
||||
i.ranges[minStartJ][0] = start
|
||||
}
|
||||
if maxEndJ >= 0 {
|
||||
i.ranges[maxEndJ][1] = end
|
||||
}
|
||||
if minStartJ >= 0 && maxEndJ >= 0 && minStartJ != maxEndJ {
|
||||
i.ranges[maxEndJ][0] = start
|
||||
i.ranges = append(i.ranges[:minStartJ], i.ranges[maxEndJ:]...)
|
||||
}
|
||||
}
|
||||
|
||||
// Merge adds all the intervals from the the m Interval to current one.
|
||||
func (i *Intervals) Merge(m *Intervals) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
for _, r := range m.ranges {
|
||||
i.add(r[0], r[1])
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns the first range interval that is not fulfilled. Returned
|
||||
// start and end values are both inclusive, meaning that the whole range
|
||||
// including start and end need to be added in order to full the gap
|
||||
// in intervals.
|
||||
// Returned value for end is 0 if the next interval is after the whole
|
||||
// range that is stored in Intervals. Zero end value represents no limit
|
||||
// on the next interval length.
|
||||
func (i *Intervals) Next() (start, end uint64) {
|
||||
i.mu.RLock()
|
||||
defer i.mu.RUnlock()
|
||||
|
||||
l := len(i.ranges)
|
||||
if l == 0 {
|
||||
return i.start, 0
|
||||
}
|
||||
if i.ranges[0][0] != i.start {
|
||||
return i.start, i.ranges[0][0] - 1
|
||||
}
|
||||
if l == 1 {
|
||||
return i.ranges[0][1] + 1, 0
|
||||
}
|
||||
return i.ranges[0][1] + 1, i.ranges[1][0] - 1
|
||||
}
|
||||
|
||||
// Last returns the value that is at the end of the last interval.
|
||||
func (i *Intervals) Last() (end uint64) {
|
||||
i.mu.RLock()
|
||||
defer i.mu.RUnlock()
|
||||
|
||||
l := len(i.ranges)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
return i.ranges[l-1][1]
|
||||
}
|
||||
|
||||
// String returns a descriptive representation of range intervals
|
||||
// in [] notation, as a list of two element vectors.
|
||||
func (i *Intervals) String() string {
|
||||
return fmt.Sprint(i.ranges)
|
||||
}
|
||||
|
||||
// MarshalBinary encodes Intervals parameters into a semicolon separated list.
|
||||
// The first element in the list is base36-encoded start value. The following
|
||||
// elements are two base36-encoded value ranges separated by comma.
|
||||
func (i *Intervals) MarshalBinary() (data []byte, err error) {
|
||||
d := make([][]byte, len(i.ranges)+1)
|
||||
d[0] = []byte(strconv.FormatUint(i.start, 36))
|
||||
for j := range i.ranges {
|
||||
r := i.ranges[j]
|
||||
d[j+1] = []byte(strconv.FormatUint(r[0], 36) + "," + strconv.FormatUint(r[1], 36))
|
||||
}
|
||||
return bytes.Join(d, []byte(";")), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes data according to the Intervals.MarshalBinary format.
|
||||
func (i *Intervals) UnmarshalBinary(data []byte) (err error) {
|
||||
d := bytes.Split(data, []byte(";"))
|
||||
l := len(d)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
if l >= 1 {
|
||||
i.start, err = strconv.ParseUint(string(d[0]), 36, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
i.ranges = make([][2]uint64, 0, l-1)
|
||||
for j := 1; j < l; j++ {
|
||||
r := bytes.SplitN(d[j], []byte(","), 2)
|
||||
if len(r) < 2 {
|
||||
return fmt.Errorf("range %d has less then 2 elements", j)
|
||||
}
|
||||
start, err := strconv.ParseUint(string(r[0]), 36, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing the first element in range %d: %v", j, err)
|
||||
}
|
||||
end, err := strconv.ParseUint(string(r[1]), 36, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing the second element in range %d: %v", j, err)
|
||||
}
|
||||
i.ranges = append(i.ranges, [2]uint64{start, end})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
395
swarm/network/stream/intervals/intervals_test.go
Normal file
395
swarm/network/stream/intervals/intervals_test.go
Normal file
@@ -0,0 +1,395 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package intervals
|
||||
|
||||
import "testing"
|
||||
|
||||
// Test tests Interval methods Add, Next and Last for various
|
||||
// initial state.
|
||||
func Test(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
startLimit uint64
|
||||
initial [][2]uint64
|
||||
start uint64
|
||||
end uint64
|
||||
expected string
|
||||
nextStart uint64
|
||||
nextEnd uint64
|
||||
last uint64
|
||||
}{
|
||||
{
|
||||
initial: nil,
|
||||
start: 0,
|
||||
end: 0,
|
||||
expected: "[[0 0]]",
|
||||
nextStart: 1,
|
||||
nextEnd: 0,
|
||||
last: 0,
|
||||
},
|
||||
{
|
||||
initial: nil,
|
||||
start: 0,
|
||||
end: 10,
|
||||
expected: "[[0 10]]",
|
||||
nextStart: 11,
|
||||
nextEnd: 0,
|
||||
last: 10,
|
||||
},
|
||||
{
|
||||
initial: nil,
|
||||
start: 5,
|
||||
end: 15,
|
||||
expected: "[[5 15]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 4,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 0}},
|
||||
start: 0,
|
||||
end: 0,
|
||||
expected: "[[0 0]]",
|
||||
nextStart: 1,
|
||||
nextEnd: 0,
|
||||
last: 0,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 0}},
|
||||
start: 5,
|
||||
end: 15,
|
||||
expected: "[[0 0] [5 15]]",
|
||||
nextStart: 1,
|
||||
nextEnd: 4,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 5,
|
||||
end: 15,
|
||||
expected: "[[5 15]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 4,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 5,
|
||||
end: 20,
|
||||
expected: "[[5 20]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 4,
|
||||
last: 20,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 10,
|
||||
end: 20,
|
||||
expected: "[[5 20]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 4,
|
||||
last: 20,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 0,
|
||||
end: 20,
|
||||
expected: "[[0 20]]",
|
||||
nextStart: 21,
|
||||
nextEnd: 0,
|
||||
last: 20,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 2,
|
||||
end: 10,
|
||||
expected: "[[2 15]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 1,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 2,
|
||||
end: 4,
|
||||
expected: "[[2 15]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 1,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 2,
|
||||
end: 5,
|
||||
expected: "[[2 15]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 1,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 2,
|
||||
end: 3,
|
||||
expected: "[[2 3] [5 15]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 1,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{5, 15}},
|
||||
start: 2,
|
||||
end: 4,
|
||||
expected: "[[2 15]]",
|
||||
nextStart: 0,
|
||||
nextEnd: 1,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 1}, {5, 15}},
|
||||
start: 2,
|
||||
end: 4,
|
||||
expected: "[[0 15]]",
|
||||
nextStart: 16,
|
||||
nextEnd: 0,
|
||||
last: 15,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}},
|
||||
start: 2,
|
||||
end: 10,
|
||||
expected: "[[0 10] [15 20]]",
|
||||
nextStart: 11,
|
||||
nextEnd: 14,
|
||||
last: 20,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}},
|
||||
start: 8,
|
||||
end: 18,
|
||||
expected: "[[0 5] [8 20]]",
|
||||
nextStart: 6,
|
||||
nextEnd: 7,
|
||||
last: 20,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}},
|
||||
start: 2,
|
||||
end: 17,
|
||||
expected: "[[0 20]]",
|
||||
nextStart: 21,
|
||||
nextEnd: 0,
|
||||
last: 20,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}},
|
||||
start: 2,
|
||||
end: 25,
|
||||
expected: "[[0 25]]",
|
||||
nextStart: 26,
|
||||
nextEnd: 0,
|
||||
last: 25,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}},
|
||||
start: 5,
|
||||
end: 14,
|
||||
expected: "[[0 20]]",
|
||||
nextStart: 21,
|
||||
nextEnd: 0,
|
||||
last: 20,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}},
|
||||
start: 6,
|
||||
end: 14,
|
||||
expected: "[[0 20]]",
|
||||
nextStart: 21,
|
||||
nextEnd: 0,
|
||||
last: 20,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}},
|
||||
start: 6,
|
||||
end: 29,
|
||||
expected: "[[0 40]]",
|
||||
nextStart: 41,
|
||||
nextEnd: 0,
|
||||
last: 40,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}},
|
||||
start: 3,
|
||||
end: 55,
|
||||
expected: "[[0 60]]",
|
||||
nextStart: 61,
|
||||
nextEnd: 0,
|
||||
last: 60,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}},
|
||||
start: 21,
|
||||
end: 49,
|
||||
expected: "[[0 5] [15 60]]",
|
||||
nextStart: 6,
|
||||
nextEnd: 14,
|
||||
last: 60,
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}},
|
||||
start: 0,
|
||||
end: 100,
|
||||
expected: "[[0 100]]",
|
||||
nextStart: 101,
|
||||
nextEnd: 0,
|
||||
last: 100,
|
||||
},
|
||||
{
|
||||
startLimit: 100,
|
||||
initial: nil,
|
||||
start: 0,
|
||||
end: 0,
|
||||
expected: "[]",
|
||||
nextStart: 100,
|
||||
nextEnd: 0,
|
||||
last: 0,
|
||||
},
|
||||
{
|
||||
startLimit: 100,
|
||||
initial: nil,
|
||||
start: 20,
|
||||
end: 30,
|
||||
expected: "[]",
|
||||
nextStart: 100,
|
||||
nextEnd: 0,
|
||||
last: 0,
|
||||
},
|
||||
{
|
||||
startLimit: 100,
|
||||
initial: nil,
|
||||
start: 50,
|
||||
end: 100,
|
||||
expected: "[[100 100]]",
|
||||
nextStart: 101,
|
||||
nextEnd: 0,
|
||||
last: 100,
|
||||
},
|
||||
{
|
||||
startLimit: 100,
|
||||
initial: nil,
|
||||
start: 50,
|
||||
end: 110,
|
||||
expected: "[[100 110]]",
|
||||
nextStart: 111,
|
||||
nextEnd: 0,
|
||||
last: 110,
|
||||
},
|
||||
{
|
||||
startLimit: 100,
|
||||
initial: nil,
|
||||
start: 120,
|
||||
end: 130,
|
||||
expected: "[[120 130]]",
|
||||
nextStart: 100,
|
||||
nextEnd: 119,
|
||||
last: 130,
|
||||
},
|
||||
{
|
||||
startLimit: 100,
|
||||
initial: nil,
|
||||
start: 120,
|
||||
end: 130,
|
||||
expected: "[[120 130]]",
|
||||
nextStart: 100,
|
||||
nextEnd: 119,
|
||||
last: 130,
|
||||
},
|
||||
} {
|
||||
intervals := NewIntervals(tc.startLimit)
|
||||
intervals.ranges = tc.initial
|
||||
intervals.Add(tc.start, tc.end)
|
||||
got := intervals.String()
|
||||
if got != tc.expected {
|
||||
t.Errorf("interval #%d: expected %s, got %s", i, tc.expected, got)
|
||||
}
|
||||
nextStart, nextEnd := intervals.Next()
|
||||
if nextStart != tc.nextStart {
|
||||
t.Errorf("interval #%d, expected next start %d, got %d", i, tc.nextStart, nextStart)
|
||||
}
|
||||
if nextEnd != tc.nextEnd {
|
||||
t.Errorf("interval #%d, expected next end %d, got %d", i, tc.nextEnd, nextEnd)
|
||||
}
|
||||
last := intervals.Last()
|
||||
if last != tc.last {
|
||||
t.Errorf("interval #%d, expected last %d, got %d", i, tc.last, last)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
initial [][2]uint64
|
||||
merge [][2]uint64
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
initial: nil,
|
||||
merge: nil,
|
||||
expected: "[]",
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{10, 20}},
|
||||
merge: nil,
|
||||
expected: "[[10 20]]",
|
||||
},
|
||||
{
|
||||
initial: nil,
|
||||
merge: [][2]uint64{{15, 25}},
|
||||
expected: "[[15 25]]",
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 100}},
|
||||
merge: [][2]uint64{{150, 250}},
|
||||
expected: "[[0 100] [150 250]]",
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 100}},
|
||||
merge: [][2]uint64{{101, 250}},
|
||||
expected: "[[0 250]]",
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 10}, {30, 40}},
|
||||
merge: [][2]uint64{{20, 25}, {41, 50}},
|
||||
expected: "[[0 10] [20 25] [30 50]]",
|
||||
},
|
||||
{
|
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}},
|
||||
merge: [][2]uint64{{6, 25}},
|
||||
expected: "[[0 25] [30 40] [50 60]]",
|
||||
},
|
||||
} {
|
||||
intervals := NewIntervals(0)
|
||||
intervals.ranges = tc.initial
|
||||
m := NewIntervals(0)
|
||||
m.ranges = tc.merge
|
||||
|
||||
intervals.Merge(m)
|
||||
|
||||
got := intervals.String()
|
||||
if got != tc.expected {
|
||||
t.Errorf("interval #%d: expected %s, got %s", i, tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
80
swarm/network/stream/intervals/store_test.go
Normal file
80
swarm/network/stream/intervals/store_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package intervals
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
)
|
||||
|
||||
var ErrNotFound = errors.New("not found")
|
||||
|
||||
// TestInmemoryStore tests basic functionality of InmemoryStore.
|
||||
func TestInmemoryStore(t *testing.T) {
|
||||
testStore(t, state.NewInmemoryStore())
|
||||
}
|
||||
|
||||
// testStore is a helper function to test various Store implementations.
|
||||
func testStore(t *testing.T, s state.Store) {
|
||||
key1 := "key1"
|
||||
i1 := NewIntervals(0)
|
||||
i1.Add(10, 20)
|
||||
if err := s.Put(key1, i1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
i := &Intervals{}
|
||||
err := s.Get(key1, i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if i.String() != i1.String() {
|
||||
t.Errorf("expected interval %s, got %s", i1, i)
|
||||
}
|
||||
|
||||
key2 := "key2"
|
||||
i2 := NewIntervals(0)
|
||||
i2.Add(10, 20)
|
||||
if err := s.Put(key2, i2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = s.Get(key2, i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if i.String() != i2.String() {
|
||||
t.Errorf("expected interval %s, got %s", i2, i)
|
||||
}
|
||||
|
||||
if err := s.Delete(key1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.Get(key1, i); err != state.ErrNotFound {
|
||||
t.Errorf("expected error %v, got %s", state.ErrNotFound, err)
|
||||
}
|
||||
if err := s.Get(key2, i); err != nil {
|
||||
t.Errorf("expected error %v, got %s", nil, err)
|
||||
}
|
||||
|
||||
if err := s.Delete(key2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.Get(key2, i); err != state.ErrNotFound {
|
||||
t.Errorf("expected error %v, got %s", state.ErrNotFound, err)
|
||||
}
|
||||
}
|
313
swarm/network/stream/intervals_test.go
Normal file
313
swarm/network/stream/intervals_test.go
Normal file
@@ -0,0 +1,313 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
externalStreamName = "externalStream"
|
||||
externalStreamSessionAt uint64 = 50
|
||||
externalStreamMaxKeys uint64 = 100
|
||||
)
|
||||
|
||||
func newIntervalsStreamerService(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
id := ctx.Config.ID
|
||||
addr := toAddr(id)
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
store := stores[id].(*storage.LocalStore)
|
||||
db := storage.NewDBAPI(store)
|
||||
delivery := NewDelivery(kad, db)
|
||||
deliveries[id] = delivery
|
||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
||||
SkipCheck: defaultSkipCheck,
|
||||
})
|
||||
|
||||
r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
|
||||
return newTestExternalClient(db), nil
|
||||
})
|
||||
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
|
||||
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
|
||||
})
|
||||
|
||||
go func() {
|
||||
waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id))
|
||||
}()
|
||||
return &TestExternalRegistry{r}, nil
|
||||
}
|
||||
|
||||
func TestIntervals(t *testing.T) {
|
||||
testIntervals(t, true, nil, false)
|
||||
testIntervals(t, false, NewRange(9, 26), false)
|
||||
testIntervals(t, true, NewRange(9, 26), false)
|
||||
|
||||
testIntervals(t, true, nil, true)
|
||||
testIntervals(t, false, NewRange(9, 26), true)
|
||||
testIntervals(t, true, NewRange(9, 26), true)
|
||||
}
|
||||
|
||||
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
||||
nodes := 2
|
||||
chunkCount := dataChunkCount
|
||||
|
||||
defer setDefaultSkipCheck(defaultSkipCheck)
|
||||
defaultSkipCheck = skipCheck
|
||||
|
||||
toAddr = network.NewAddrFromNodeID
|
||||
conf := &streamTesting.RunConfig{
|
||||
Adapter: *adapter,
|
||||
NodeCount: nodes,
|
||||
ConnLevel: 1,
|
||||
ToAddr: toAddr,
|
||||
Services: services,
|
||||
DefaultService: "intervalsStreamer",
|
||||
}
|
||||
|
||||
sim, teardown, err := streamTesting.NewSimulation(conf)
|
||||
var rpcSubscriptionsWg sync.WaitGroup
|
||||
defer func() {
|
||||
rpcSubscriptionsWg.Wait()
|
||||
teardown()
|
||||
}()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stores = make(map[discover.NodeID]storage.ChunkStore)
|
||||
deliveries = make(map[discover.NodeID]*Delivery)
|
||||
for i, id := range sim.IDs {
|
||||
stores[id] = sim.Stores[i]
|
||||
}
|
||||
|
||||
peerCount = func(id discover.NodeID) int {
|
||||
return 1
|
||||
}
|
||||
|
||||
fileStore := storage.NewFileStore(sim.Stores[0], storage.NewFileStoreParams())
|
||||
size := chunkCount * chunkSize
|
||||
_, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
||||
wait()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
errc := make(chan error, 1)
|
||||
waitPeerErrC = make(chan error)
|
||||
quitC := make(chan struct{})
|
||||
defer close(quitC)
|
||||
|
||||
action := func(ctx context.Context) error {
|
||||
i := 0
|
||||
for err := range waitPeerErrC {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for peers: %s", err)
|
||||
}
|
||||
i++
|
||||
if i == nodes {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
id := sim.IDs[1]
|
||||
|
||||
err := sim.CallClient(id, func(client *rpc.Client) error {
|
||||
|
||||
sid := sim.IDs[0]
|
||||
|
||||
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-doneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(ctx, 100*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err = client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(externalStreamName, "", live), history, Top)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
liveErrC := make(chan error)
|
||||
historyErrC := make(chan error)
|
||||
|
||||
go func() {
|
||||
if !live {
|
||||
close(liveErrC)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
defer func() {
|
||||
liveErrC <- err
|
||||
}()
|
||||
|
||||
// live stream
|
||||
liveHashesChan := make(chan []byte)
|
||||
liveSubscription, err := client.Subscribe(ctx, "stream", liveHashesChan, "getHashes", sid, NewStream(externalStreamName, "", true))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer liveSubscription.Unsubscribe()
|
||||
|
||||
i := externalStreamSessionAt
|
||||
|
||||
// we have subscribed, enable notifications
|
||||
err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", true))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case hash := <-liveHashesChan:
|
||||
h := binary.BigEndian.Uint64(hash)
|
||||
if h != i {
|
||||
err = fmt.Errorf("expected live hash %d, got %d", i, h)
|
||||
return
|
||||
}
|
||||
i++
|
||||
if i > externalStreamMaxKeys {
|
||||
return
|
||||
}
|
||||
case err = <-liveSubscription.Err():
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if live && history == nil {
|
||||
close(historyErrC)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
defer func() {
|
||||
historyErrC <- err
|
||||
}()
|
||||
|
||||
// history stream
|
||||
historyHashesChan := make(chan []byte)
|
||||
historySubscription, err := client.Subscribe(ctx, "stream", historyHashesChan, "getHashes", sid, NewStream(externalStreamName, "", false))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer historySubscription.Unsubscribe()
|
||||
|
||||
var i uint64
|
||||
historyTo := externalStreamMaxKeys
|
||||
if history != nil {
|
||||
i = history.From
|
||||
if history.To != 0 {
|
||||
historyTo = history.To
|
||||
}
|
||||
}
|
||||
|
||||
// we have subscribed, enable notifications
|
||||
err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", false))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case hash := <-historyHashesChan:
|
||||
h := binary.BigEndian.Uint64(hash)
|
||||
if h != i {
|
||||
err = fmt.Errorf("expected history hash %d, got %d", i, h)
|
||||
return
|
||||
}
|
||||
i++
|
||||
if i > historyTo {
|
||||
return
|
||||
}
|
||||
case err = <-historySubscription.Err():
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := <-liveErrC; err != nil {
|
||||
return err
|
||||
}
|
||||
if err := <-historyErrC; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
|
||||
select {
|
||||
case err := <-errc:
|
||||
return false, err
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
default:
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
conf.Step = &simulations.Step{
|
||||
Action: action,
|
||||
Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]),
|
||||
Expect: &simulations.Expectation{
|
||||
Nodes: sim.IDs[1:1],
|
||||
Check: check,
|
||||
},
|
||||
}
|
||||
startedAt := time.Now()
|
||||
timeout := 300 * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
result, err := sim.Run(ctx, conf)
|
||||
finishedAt := time.Now()
|
||||
if err != nil {
|
||||
t.Fatalf("Setting up simulation failed: %v", err)
|
||||
}
|
||||
if result.Error != nil {
|
||||
t.Fatalf("Simulation failed: %s", result.Error)
|
||||
}
|
||||
streamTesting.CheckResult(t, result, startedAt, finishedAt)
|
||||
}
|
370
swarm/network/stream/messages.go
Normal file
370
swarm/network/stream/messages.go
Normal file
@@ -0,0 +1,370 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
// Stream defines a unique stream identifier.
|
||||
type Stream struct {
|
||||
// Name is used for Client and Server functions identification.
|
||||
Name string
|
||||
// Key is the name of specific stream data.
|
||||
Key string
|
||||
// Live defines whether the stream delivers only new data
|
||||
// for the specific stream.
|
||||
Live bool
|
||||
}
|
||||
|
||||
func NewStream(name string, key string, live bool) Stream {
|
||||
return Stream{
|
||||
Name: name,
|
||||
Key: key,
|
||||
Live: live,
|
||||
}
|
||||
}
|
||||
|
||||
// String return a stream id based on all Stream fields.
|
||||
func (s Stream) String() string {
|
||||
t := "h"
|
||||
if s.Live {
|
||||
t = "l"
|
||||
}
|
||||
return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
|
||||
}
|
||||
|
||||
// SubcribeMsg is the protocol msg for requesting a stream(section)
|
||||
type SubscribeMsg struct {
|
||||
Stream Stream
|
||||
History *Range `rlp:"nil"`
|
||||
Priority uint8 // delivered on priority channel
|
||||
}
|
||||
|
||||
// RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
|
||||
// specific stream
|
||||
type RequestSubscriptionMsg struct {
|
||||
Stream Stream
|
||||
History *Range `rlp:"nil"`
|
||||
Priority uint8 // delivered on priority channel
|
||||
}
|
||||
|
||||
func (p *Peer) handleRequestSubscription(req *RequestSubscriptionMsg) (err error) {
|
||||
log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream))
|
||||
return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority)
|
||||
}
|
||||
|
||||
func (p *Peer) handleSubscribeMsg(req *SubscribeMsg) (err error) {
|
||||
metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if e := p.Send(SubscribeErrorMsg{
|
||||
Error: err.Error(),
|
||||
}); e != nil {
|
||||
log.Error("send stream subscribe error message", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Debug("received subscription", "from", p.streamer.addr.ID(), "peer", p.ID(), "stream", req.Stream, "history", req.History)
|
||||
|
||||
f, err := p.streamer.GetServerFunc(req.Stream.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := f(p, req.Stream.Key, req.Stream.Live)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os, err := p.setServer(req.Stream, s, req.Priority)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var from uint64
|
||||
var to uint64
|
||||
if !req.Stream.Live && req.History != nil {
|
||||
from = req.History.From
|
||||
to = req.History.To
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := p.SendOfferedHashes(os, from, to); err != nil {
|
||||
log.Warn("SendOfferedHashes dropping peer", "err", err)
|
||||
p.Drop(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if req.Stream.Live && req.History != nil {
|
||||
// subscribe to the history stream
|
||||
s, err := f(p, req.Stream.Key, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
|
||||
log.Warn("SendOfferedHashes dropping peer", "err", err)
|
||||
p.Drop(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type SubscribeErrorMsg struct {
|
||||
Error string
|
||||
}
|
||||
|
||||
func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
|
||||
return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
|
||||
}
|
||||
|
||||
type UnsubscribeMsg struct {
|
||||
Stream Stream
|
||||
}
|
||||
|
||||
func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
|
||||
return p.removeServer(req.Stream)
|
||||
}
|
||||
|
||||
type QuitMsg struct {
|
||||
Stream Stream
|
||||
}
|
||||
|
||||
func (p *Peer) handleQuitMsg(req *QuitMsg) error {
|
||||
return p.removeClient(req.Stream)
|
||||
}
|
||||
|
||||
// OfferedHashesMsg is the protocol msg for offering to hand over a
|
||||
// stream section
|
||||
type OfferedHashesMsg struct {
|
||||
Stream Stream // name of Stream
|
||||
From, To uint64 // peer and db-specific entry count
|
||||
Hashes []byte // stream of hashes (128)
|
||||
*HandoverProof // HandoverProof
|
||||
}
|
||||
|
||||
// String pretty prints OfferedHashesMsg
|
||||
func (m OfferedHashesMsg) String() string {
|
||||
return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
|
||||
}
|
||||
|
||||
// handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
|
||||
// Filter method
|
||||
func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error {
|
||||
metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
|
||||
|
||||
c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashes := req.Hashes
|
||||
want, err := bv.New(len(hashes) / HashSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < len(hashes); i += HashSize {
|
||||
hash := hashes[i : i+HashSize]
|
||||
|
||||
if wait := c.NeedData(hash); wait != nil {
|
||||
want.Set(i/HashSize, true)
|
||||
wg.Add(1)
|
||||
// create request and wait until the chunk data arrives and is stored
|
||||
go func(w func()) {
|
||||
w()
|
||||
wg.Done()
|
||||
}(wait)
|
||||
}
|
||||
}
|
||||
// done := make(chan bool)
|
||||
// go func() {
|
||||
// wg.Wait()
|
||||
// close(done)
|
||||
// }()
|
||||
// go func() {
|
||||
// select {
|
||||
// case <-done:
|
||||
// s.next <- s.batchDone(p, req, hashes)
|
||||
// case <-time.After(1 * time.Second):
|
||||
// p.Drop(errors.New("timeout waiting for batch to be delivered"))
|
||||
// }
|
||||
// }()
|
||||
go func() {
|
||||
wg.Wait()
|
||||
select {
|
||||
case c.next <- c.batchDone(p, req, hashes):
|
||||
case <-c.quit:
|
||||
}
|
||||
}()
|
||||
// only send wantedKeysMsg if all missing chunks of the previous batch arrived
|
||||
// except
|
||||
if c.stream.Live {
|
||||
c.sessionAt = req.From
|
||||
}
|
||||
from, to := c.nextBatch(req.To + 1)
|
||||
log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
|
||||
if from == to {
|
||||
return nil
|
||||
}
|
||||
|
||||
msg := &WantedHashesMsg{
|
||||
Stream: req.Stream,
|
||||
Want: want.Bytes(),
|
||||
From: from,
|
||||
To: to,
|
||||
}
|
||||
go func() {
|
||||
select {
|
||||
case <-time.After(120 * time.Second):
|
||||
log.Warn("handleOfferedHashesMsg timeout, so dropping peer")
|
||||
p.Drop(errors.New("handle offered hashes timeout"))
|
||||
return
|
||||
case err := <-c.next:
|
||||
if err != nil {
|
||||
log.Warn("c.next dropping peer", "err", err)
|
||||
p.Drop(err)
|
||||
return
|
||||
}
|
||||
case <-c.quit:
|
||||
return
|
||||
}
|
||||
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
|
||||
err := p.SendPriority(msg, c.priority)
|
||||
if err != nil {
|
||||
log.Warn("SendPriority err, so dropping peer", "err", err)
|
||||
p.Drop(err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// WantedHashesMsg is the protocol msg data for signaling which hashes
|
||||
// offered in OfferedHashesMsg downstream peer actually wants sent over
|
||||
type WantedHashesMsg struct {
|
||||
Stream Stream
|
||||
Want []byte // bitvector indicating which keys of the batch needed
|
||||
From, To uint64 // next interval offset - empty if not to be continued
|
||||
}
|
||||
|
||||
// String pretty prints WantedHashesMsg
|
||||
func (m WantedHashesMsg) String() string {
|
||||
return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
|
||||
}
|
||||
|
||||
// handleWantedHashesMsg protocol msg handler
|
||||
// * sends the next batch of unsynced keys
|
||||
// * sends the actual data chunks as per WantedHashesMsg
|
||||
func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error {
|
||||
metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
|
||||
|
||||
log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
|
||||
s, err := p.getServer(req.Stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashes := s.currentBatch
|
||||
// launch in go routine since GetBatch blocks until new hashes arrive
|
||||
go func() {
|
||||
if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
|
||||
log.Warn("SendOfferedHashes dropping peer", "err", err)
|
||||
p.Drop(err)
|
||||
}
|
||||
}()
|
||||
// go p.SendOfferedHashes(s, req.From, req.To)
|
||||
l := len(hashes) / HashSize
|
||||
|
||||
log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
|
||||
want, err := bv.NewFromBytes(req.Want, l)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
if want.Get(i) {
|
||||
metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
|
||||
|
||||
hash := hashes[i*HashSize : (i+1)*HashSize]
|
||||
data, err := s.GetData(hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
|
||||
}
|
||||
chunk := storage.NewChunk(hash, nil)
|
||||
chunk.SData = data
|
||||
if length := len(chunk.SData); length < 9 {
|
||||
log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr)
|
||||
}
|
||||
if err := p.Deliver(chunk, s.priority); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handover represents a statement that the upstream peer hands over the stream section
|
||||
type Handover struct {
|
||||
Stream Stream // name of stream
|
||||
Start, End uint64 // index of hashes
|
||||
Root []byte // Root hash for indexed segment inclusion proofs
|
||||
}
|
||||
|
||||
// HandoverProof represents a signed statement that the upstream peer handed over the stream section
|
||||
type HandoverProof struct {
|
||||
Sig []byte // Sign(Hash(Serialisation(Handover)))
|
||||
*Handover
|
||||
}
|
||||
|
||||
// Takeover represents a statement that downstream peer took over (stored all data)
|
||||
// handed over
|
||||
type Takeover Handover
|
||||
|
||||
// TakeoverProof represents a signed statement that the downstream peer took over
|
||||
// the stream section
|
||||
type TakeoverProof struct {
|
||||
Sig []byte // Sign(Hash(Serialisation(Takeover)))
|
||||
*Takeover
|
||||
}
|
||||
|
||||
// TakeoverProofMsg is the protocol msg sent by downstream peer
|
||||
type TakeoverProofMsg TakeoverProof
|
||||
|
||||
// String pretty prints TakeoverProofMsg
|
||||
func (m TakeoverProofMsg) String() string {
|
||||
return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
|
||||
}
|
||||
|
||||
func (p *Peer) handleTakeoverProofMsg(req *TakeoverProofMsg) error {
|
||||
_, err := p.getServer(req.Stream)
|
||||
// store the strongest takeoverproof for the stream in streamer
|
||||
return err
|
||||
}
|
328
swarm/network/stream/peer.go
Normal file
328
swarm/network/stream/peer.go
Normal file
@@ -0,0 +1,328 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
|
||||
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
var sendTimeout = 30 * time.Second
|
||||
|
||||
type notFoundError struct {
|
||||
t string
|
||||
s Stream
|
||||
}
|
||||
|
||||
func newNotFoundError(t string, s Stream) *notFoundError {
|
||||
return ¬FoundError{t: t, s: s}
|
||||
}
|
||||
|
||||
func (e *notFoundError) Error() string {
|
||||
return fmt.Sprintf("%s not found for stream %q", e.t, e.s)
|
||||
}
|
||||
|
||||
// Peer is the Peer extension for the streaming protocol
|
||||
type Peer struct {
|
||||
*protocols.Peer
|
||||
streamer *Registry
|
||||
pq *pq.PriorityQueue
|
||||
serverMu sync.RWMutex
|
||||
clientMu sync.RWMutex // protects both clients and clientParams
|
||||
servers map[Stream]*server
|
||||
clients map[Stream]*client
|
||||
// clientParams map keeps required client arguments
|
||||
// that are set on Registry.Subscribe and used
|
||||
// on creating a new client in offered hashes handler.
|
||||
clientParams map[Stream]*clientParams
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
// NewPeer is the constructor for Peer
|
||||
func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
|
||||
p := &Peer{
|
||||
Peer: peer,
|
||||
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
|
||||
streamer: streamer,
|
||||
servers: make(map[Stream]*server),
|
||||
clients: make(map[Stream]*client),
|
||||
clientParams: make(map[Stream]*clientParams),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go p.pq.Run(ctx, func(i interface{}) { p.Send(i) })
|
||||
go func() {
|
||||
<-p.quit
|
||||
cancel()
|
||||
}()
|
||||
return p
|
||||
}
|
||||
|
||||
// Deliver sends a storeRequestMsg protocol message to the peer
|
||||
func (p *Peer) Deliver(chunk *storage.Chunk, priority uint8) error {
|
||||
msg := &ChunkDeliveryMsg{
|
||||
Addr: chunk.Addr,
|
||||
SData: chunk.SData,
|
||||
}
|
||||
return p.SendPriority(msg, priority)
|
||||
}
|
||||
|
||||
// SendPriority sends message to the peer using the outgoing priority queue
|
||||
func (p *Peer) SendPriority(msg interface{}, priority uint8) error {
|
||||
defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now())
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), sendTimeout)
|
||||
defer cancel()
|
||||
return p.pq.Push(ctx, msg, int(priority))
|
||||
}
|
||||
|
||||
// SendOfferedHashes sends OfferedHashesMsg protocol msg
|
||||
func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error {
|
||||
hashes, from, to, proof, err := s.SetNextBatch(f, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// true only when quiting
|
||||
if len(hashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if proof == nil {
|
||||
proof = &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
}
|
||||
}
|
||||
s.currentBatch = hashes
|
||||
msg := &OfferedHashesMsg{
|
||||
HandoverProof: proof,
|
||||
Hashes: hashes,
|
||||
From: from,
|
||||
To: to,
|
||||
Stream: s.stream,
|
||||
}
|
||||
log.Trace("Swarm syncer offer batch", "peer", p.ID(), "stream", s.stream, "len", len(hashes), "from", from, "to", to)
|
||||
return p.SendPriority(msg, s.priority)
|
||||
}
|
||||
|
||||
func (p *Peer) getServer(s Stream) (*server, error) {
|
||||
p.serverMu.RLock()
|
||||
defer p.serverMu.RUnlock()
|
||||
|
||||
server := p.servers[s]
|
||||
if server == nil {
|
||||
return nil, newNotFoundError("server", s)
|
||||
}
|
||||
return server, nil
|
||||
}
|
||||
|
||||
func (p *Peer) setServer(s Stream, o Server, priority uint8) (*server, error) {
|
||||
p.serverMu.Lock()
|
||||
defer p.serverMu.Unlock()
|
||||
|
||||
if p.servers[s] != nil {
|
||||
return nil, fmt.Errorf("server %s already registered", s)
|
||||
}
|
||||
os := &server{
|
||||
Server: o,
|
||||
stream: s,
|
||||
priority: priority,
|
||||
}
|
||||
p.servers[s] = os
|
||||
return os, nil
|
||||
}
|
||||
|
||||
func (p *Peer) removeServer(s Stream) error {
|
||||
p.serverMu.Lock()
|
||||
defer p.serverMu.Unlock()
|
||||
|
||||
server, ok := p.servers[s]
|
||||
if !ok {
|
||||
return newNotFoundError("server", s)
|
||||
}
|
||||
server.Close()
|
||||
delete(p.servers, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Peer) getClient(ctx context.Context, s Stream) (c *client, err error) {
|
||||
var params *clientParams
|
||||
func() {
|
||||
p.clientMu.RLock()
|
||||
defer p.clientMu.RUnlock()
|
||||
|
||||
c = p.clients[s]
|
||||
if c != nil {
|
||||
return
|
||||
}
|
||||
params = p.clientParams[s]
|
||||
}()
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
if params != nil {
|
||||
//debug.PrintStack()
|
||||
if err := params.waitClient(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
p.clientMu.RLock()
|
||||
defer p.clientMu.RUnlock()
|
||||
|
||||
c = p.clients[s]
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
return nil, newNotFoundError("client", s)
|
||||
}
|
||||
|
||||
func (p *Peer) getOrSetClient(s Stream, from, to uint64) (c *client, created bool, err error) {
|
||||
p.clientMu.Lock()
|
||||
defer p.clientMu.Unlock()
|
||||
|
||||
c = p.clients[s]
|
||||
if c != nil {
|
||||
return c, false, nil
|
||||
}
|
||||
|
||||
f, err := p.streamer.GetClientFunc(s.Name)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
is, err := f(p, s.Key, s.Live)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
cp, err := p.getClientParams(s)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer func() {
|
||||
if err == nil {
|
||||
if err := p.removeClientParams(s); err != nil {
|
||||
log.Error("stream set client: remove client params", "stream", s, "peer", p, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
intervalsKey := peerStreamIntervalsKey(p, s)
|
||||
if s.Live {
|
||||
// try to find previous history and live intervals and merge live into history
|
||||
historyKey := peerStreamIntervalsKey(p, NewStream(s.Name, s.Key, false))
|
||||
historyIntervals := &intervals.Intervals{}
|
||||
err := p.streamer.intervalsStore.Get(historyKey, historyIntervals)
|
||||
switch err {
|
||||
case nil:
|
||||
liveIntervals := &intervals.Intervals{}
|
||||
err := p.streamer.intervalsStore.Get(intervalsKey, liveIntervals)
|
||||
switch err {
|
||||
case nil:
|
||||
historyIntervals.Merge(liveIntervals)
|
||||
if err := p.streamer.intervalsStore.Put(historyKey, historyIntervals); err != nil {
|
||||
log.Error("stream set client: put history intervals", "stream", s, "peer", p, "err", err)
|
||||
}
|
||||
case state.ErrNotFound:
|
||||
default:
|
||||
log.Error("stream set client: get live intervals", "stream", s, "peer", p, "err", err)
|
||||
}
|
||||
case state.ErrNotFound:
|
||||
default:
|
||||
log.Error("stream set client: get history intervals", "stream", s, "peer", p, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := p.streamer.intervalsStore.Put(intervalsKey, intervals.NewIntervals(from)); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
next := make(chan error, 1)
|
||||
c = &client{
|
||||
Client: is,
|
||||
stream: s,
|
||||
priority: cp.priority,
|
||||
to: cp.to,
|
||||
next: next,
|
||||
quit: make(chan struct{}),
|
||||
intervalsStore: p.streamer.intervalsStore,
|
||||
intervalsKey: intervalsKey,
|
||||
}
|
||||
p.clients[s] = c
|
||||
cp.clientCreated() // unblock all possible getClient calls that are waiting
|
||||
next <- nil // this is to allow wantedKeysMsg before first batch arrives
|
||||
return c, true, nil
|
||||
}
|
||||
|
||||
func (p *Peer) removeClient(s Stream) error {
|
||||
p.clientMu.Lock()
|
||||
defer p.clientMu.Unlock()
|
||||
|
||||
client, ok := p.clients[s]
|
||||
if !ok {
|
||||
return newNotFoundError("client", s)
|
||||
}
|
||||
client.close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Peer) setClientParams(s Stream, params *clientParams) error {
|
||||
p.clientMu.Lock()
|
||||
defer p.clientMu.Unlock()
|
||||
|
||||
if p.clients[s] != nil {
|
||||
return fmt.Errorf("client %s already exists", s)
|
||||
}
|
||||
if p.clientParams[s] != nil {
|
||||
return fmt.Errorf("client params %s already set", s)
|
||||
}
|
||||
p.clientParams[s] = params
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Peer) getClientParams(s Stream) (*clientParams, error) {
|
||||
params := p.clientParams[s]
|
||||
if params == nil {
|
||||
return nil, fmt.Errorf("client params '%v' not provided to peer %v", s, p.ID())
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func (p *Peer) removeClientParams(s Stream) error {
|
||||
_, ok := p.clientParams[s]
|
||||
if !ok {
|
||||
return newNotFoundError("client params", s)
|
||||
}
|
||||
delete(p.clientParams, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Peer) close() {
|
||||
for _, s := range p.servers {
|
||||
s.Close()
|
||||
}
|
||||
}
|
791
swarm/network/stream/snapshot_retrieval_test.go
Normal file
791
swarm/network/stream/snapshot_retrieval_test.go
Normal file
@@ -0,0 +1,791 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
//constants for random file generation
|
||||
const (
|
||||
minFileSize = 2
|
||||
maxFileSize = 40
|
||||
)
|
||||
|
||||
func initRetrievalTest() {
|
||||
//global func to get overlay address from discover ID
|
||||
toAddr = func(id discover.NodeID) *network.BzzAddr {
|
||||
addr := network.NewAddrFromNodeID(id)
|
||||
return addr
|
||||
}
|
||||
//global func to create local store
|
||||
createStoreFunc = createTestLocalStorageForId
|
||||
//local stores
|
||||
stores = make(map[discover.NodeID]storage.ChunkStore)
|
||||
//data directories for each node and store
|
||||
datadirs = make(map[discover.NodeID]string)
|
||||
//deliveries for each node
|
||||
deliveries = make(map[discover.NodeID]*Delivery)
|
||||
//global retrieve func
|
||||
getRetrieveFunc = func(id discover.NodeID) func(chunk *storage.Chunk) error {
|
||||
return func(chunk *storage.Chunk) error {
|
||||
skipCheck := true
|
||||
return deliveries[id].RequestFromPeers(chunk.Addr[:], skipCheck)
|
||||
}
|
||||
}
|
||||
//registries, map of discover.NodeID to its streamer
|
||||
registries = make(map[discover.NodeID]*TestRegistry)
|
||||
//not needed for this test but required from common_test for NewStreamService
|
||||
waitPeerErrC = make(chan error)
|
||||
//also not needed for this test but required for NewStreamService
|
||||
peerCount = func(id discover.NodeID) int {
|
||||
if ids[0] == id || ids[len(ids)-1] == id {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
}
|
||||
|
||||
//This test is a retrieval test for nodes.
|
||||
//A configurable number of nodes can be
|
||||
//provided to the test.
|
||||
//Files are uploaded to nodes, other nodes try to retrieve the file
|
||||
//Number of nodes can be provided via commandline too.
|
||||
func TestFileRetrieval(t *testing.T) {
|
||||
if *nodes != 0 {
|
||||
fileRetrievalTest(t, *nodes)
|
||||
} else {
|
||||
nodeCnt := []int{16}
|
||||
//if the `longrunning` flag has been provided
|
||||
//run more test combinations
|
||||
if *longrunning {
|
||||
nodeCnt = append(nodeCnt, 32, 64, 128)
|
||||
}
|
||||
for _, n := range nodeCnt {
|
||||
fileRetrievalTest(t, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//This test is a retrieval test for nodes.
|
||||
//One node is randomly selected to be the pivot node.
|
||||
//A configurable number of chunks and nodes can be
|
||||
//provided to the test, the number of chunks is uploaded
|
||||
//to the pivot node and other nodes try to retrieve the chunk(s).
|
||||
//Number of chunks and nodes can be provided via commandline too.
|
||||
func TestRetrieval(t *testing.T) {
|
||||
//if nodes/chunks have been provided via commandline,
|
||||
//run the tests with these values
|
||||
if *nodes != 0 && *chunks != 0 {
|
||||
retrievalTest(t, *chunks, *nodes)
|
||||
} else {
|
||||
var nodeCnt []int
|
||||
var chnkCnt []int
|
||||
//if the `longrunning` flag has been provided
|
||||
//run more test combinations
|
||||
if *longrunning {
|
||||
nodeCnt = []int{16, 32, 128}
|
||||
chnkCnt = []int{4, 32, 256}
|
||||
} else {
|
||||
//default test
|
||||
nodeCnt = []int{16}
|
||||
chnkCnt = []int{32}
|
||||
}
|
||||
for _, n := range nodeCnt {
|
||||
for _, c := range chnkCnt {
|
||||
retrievalTest(t, c, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Every test runs 3 times, a live, a history, and a live AND history
|
||||
func fileRetrievalTest(t *testing.T, nodeCount int) {
|
||||
//test live and NO history
|
||||
log.Info("Testing live and no history", "nodeCount", nodeCount)
|
||||
live = true
|
||||
history = false
|
||||
err := runFileRetrievalTest(nodeCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//test history only
|
||||
log.Info("Testing history only", "nodeCount", nodeCount)
|
||||
live = false
|
||||
history = true
|
||||
err = runFileRetrievalTest(nodeCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//finally test live and history
|
||||
log.Info("Testing live and history", "nodeCount", nodeCount)
|
||||
live = true
|
||||
err = runFileRetrievalTest(nodeCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
//Every test runs 3 times, a live, a history, and a live AND history
|
||||
func retrievalTest(t *testing.T, chunkCount int, nodeCount int) {
|
||||
//test live and NO history
|
||||
log.Info("Testing live and no history", "chunkCount", chunkCount, "nodeCount", nodeCount)
|
||||
live = true
|
||||
history = false
|
||||
err := runRetrievalTest(chunkCount, nodeCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//test history only
|
||||
log.Info("Testing history only", "chunkCount", chunkCount, "nodeCount", nodeCount)
|
||||
live = false
|
||||
history = true
|
||||
err = runRetrievalTest(chunkCount, nodeCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//finally test live and history
|
||||
log.Info("Testing live and history", "chunkCount", chunkCount, "nodeCount", nodeCount)
|
||||
live = true
|
||||
err = runRetrievalTest(chunkCount, nodeCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
The upload is done by dependency to the global
|
||||
`live` and `history` variables;
|
||||
|
||||
If `live` is set, first stream subscriptions are established,
|
||||
then files are uploaded to nodes.
|
||||
|
||||
If `history` is enabled, first upload files, then build up subscriptions.
|
||||
|
||||
The test loads a snapshot file to construct the swarm network,
|
||||
assuming that the snapshot file identifies a healthy
|
||||
kademlia network. Nevertheless a health check runs in the
|
||||
simulation's `action` function.
|
||||
|
||||
The snapshot should have 'streamer' in its service list.
|
||||
*/
|
||||
func runFileRetrievalTest(nodeCount int) error {
|
||||
//for every run (live, history), int the variables
|
||||
initRetrievalTest()
|
||||
//the ids of the snapshot nodes, initiate only now as we need nodeCount
|
||||
ids = make([]discover.NodeID, nodeCount)
|
||||
//channel to check for disconnection errors
|
||||
disconnectC := make(chan error)
|
||||
//channel to close disconnection watcher routine
|
||||
quitC := make(chan struct{})
|
||||
//the test conf (using same as in `snapshot_sync_test`
|
||||
conf = &synctestConfig{}
|
||||
//map of overlay address to discover ID
|
||||
conf.addrToIdMap = make(map[string]discover.NodeID)
|
||||
//array where the generated chunk hashes will be stored
|
||||
conf.hashes = make([]storage.Address, 0)
|
||||
//load nodes from the snapshot file
|
||||
net, err := initNetWithSnapshot(nodeCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var rpcSubscriptionsWg sync.WaitGroup
|
||||
//do cleanup after test is terminated
|
||||
defer func() {
|
||||
//shutdown the snapshot network
|
||||
net.Shutdown()
|
||||
//after the test, clean up local stores initialized with createLocalStoreForId
|
||||
localStoreCleanup()
|
||||
//finally clear all data directories
|
||||
datadirsCleanup()
|
||||
}()
|
||||
//get the nodes of the network
|
||||
nodes := net.GetNodes()
|
||||
//iterate over all nodes...
|
||||
for c := 0; c < len(nodes); c++ {
|
||||
//create an array of discovery nodeIDS
|
||||
ids[c] = nodes[c].ID()
|
||||
a := network.ToOverlayAddr(ids[c].Bytes())
|
||||
//append it to the array of all overlay addresses
|
||||
conf.addrs = append(conf.addrs, a)
|
||||
conf.addrToIdMap[string(a)] = ids[c]
|
||||
}
|
||||
|
||||
//needed for healthy call
|
||||
ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs)
|
||||
|
||||
//an array for the random files
|
||||
var randomFiles []string
|
||||
//channel to signal when the upload has finished
|
||||
uploadFinished := make(chan struct{})
|
||||
//channel to trigger new node checks
|
||||
trigger := make(chan discover.NodeID)
|
||||
//simulation action
|
||||
action := func(ctx context.Context) error {
|
||||
//first run the health check on all nodes,
|
||||
//wait until nodes are all healthy
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
healthy := true
|
||||
for _, id := range ids {
|
||||
r := registries[id]
|
||||
//PeerPot for this node
|
||||
addr := common.Bytes2Hex(r.addr.OAddr)
|
||||
pp := ppmap[addr]
|
||||
//call Healthy RPC
|
||||
h := r.delivery.overlay.Healthy(pp)
|
||||
//print info
|
||||
log.Debug(r.delivery.overlay.String())
|
||||
log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
|
||||
if !h.GotNN || !h.Full {
|
||||
healthy = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if healthy {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if history {
|
||||
log.Info("Uploading for history")
|
||||
//If testing only history, we upload the chunk(s) first
|
||||
conf.hashes, randomFiles, err = uploadFilesToNodes(nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//variables needed to wait for all subscriptions established before uploading
|
||||
errc := make(chan error)
|
||||
|
||||
//now setup and start event watching in order to know when we can upload
|
||||
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second)
|
||||
defer watchCancel()
|
||||
|
||||
log.Info("Setting up stream subscription")
|
||||
//We need two iterations, one to subscribe to the subscription events
|
||||
//(so we know when setup phase is finished), and one to
|
||||
//actually run the stream subscriptions. We can't do it in the same iteration,
|
||||
//because while the first nodes in the loop are setting up subscriptions,
|
||||
//the latter ones have not subscribed to listen to peer events yet,
|
||||
//and then we miss events.
|
||||
|
||||
//first iteration: setup disconnection watcher and subscribe to peer events
|
||||
for j, id := range ids {
|
||||
log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j))
|
||||
client, err := net.GetNode(id).Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC)
|
||||
// doneC is nil, the error happened which is sent to errc channel, already
|
||||
if wsDoneC == nil {
|
||||
continue
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-wsDoneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
|
||||
//watch for peers disconnecting
|
||||
wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-wdDoneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
//second iteration: start syncing and setup stream subscriptions
|
||||
for j, id := range ids {
|
||||
log.Trace(fmt.Sprintf("Start syncing and stream subscriptions: %d", j))
|
||||
client, err := net.GetNode(id).Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//start syncing!
|
||||
var cnt int
|
||||
err = client.CallContext(ctx, &cnt, "stream_startSyncing")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//increment the number of subscriptions we need to wait for
|
||||
//by the count returned from startSyncing (SYNC subscriptions)
|
||||
subscriptionCount += cnt
|
||||
//now also add the number of RETRIEVAL_REQUEST subscriptions
|
||||
for snid := range registries[id].peers {
|
||||
subscriptionCount++
|
||||
err = client.CallContext(ctx, nil, "stream_subscribeStream", snid, NewStream(swarmChunkServerStreamName, "", false), nil, Top)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//now wait until the number of expected subscriptions has been finished
|
||||
//`watchSubscriptionEvents` will write with a `nil` value to errc
|
||||
//every time a `SubscriptionMsg` has been received
|
||||
for err := range errc {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//`nil` received, decrement count
|
||||
subscriptionCount--
|
||||
//all subscriptions received
|
||||
if subscriptionCount == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Stream subscriptions successfully requested, action terminated")
|
||||
|
||||
if live {
|
||||
//upload generated files to nodes
|
||||
var hashes []storage.Address
|
||||
var rfiles []string
|
||||
hashes, rfiles, err = uploadFilesToNodes(nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.hashes = append(conf.hashes, hashes...)
|
||||
randomFiles = append(randomFiles, rfiles...)
|
||||
//signal to the trigger loop that the upload has finished
|
||||
uploadFinished <- struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//check defines what will be checked during the test
|
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
case e := <-disconnectC:
|
||||
log.Error(e.Error())
|
||||
return false, fmt.Errorf("Disconnect event detected, network unhealthy")
|
||||
default:
|
||||
}
|
||||
log.Trace(fmt.Sprintf("Checking node: %s", id))
|
||||
//if there are more than one chunk, test only succeeds if all expected chunks are found
|
||||
allSuccess := true
|
||||
|
||||
//check on the node's FileStore (netstore)
|
||||
fileStore := registries[id].fileStore
|
||||
//check all chunks
|
||||
for i, hash := range conf.hashes {
|
||||
reader, _ := fileStore.Retrieve(hash)
|
||||
//check that we can read the file size and that it corresponds to the generated file size
|
||||
if s, err := reader.Size(nil); err != nil || s != int64(len(randomFiles[i])) {
|
||||
allSuccess = false
|
||||
log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
|
||||
} else {
|
||||
log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
|
||||
}
|
||||
}
|
||||
|
||||
return allSuccess, nil
|
||||
}
|
||||
|
||||
//for each tick, run the checks on all nodes
|
||||
timingTicker := time.NewTicker(5 * time.Second)
|
||||
defer timingTicker.Stop()
|
||||
go func() {
|
||||
//for live upload, we should wait for uploads to have finished
|
||||
//before starting to trigger the checks, due to file size
|
||||
if live {
|
||||
<-uploadFinished
|
||||
}
|
||||
for range timingTicker.C {
|
||||
for i := 0; i < len(ids); i++ {
|
||||
log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i]))
|
||||
trigger <- ids[i]
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting simulation run...")
|
||||
|
||||
timeout := MaxTimeout * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
//run the simulation
|
||||
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
|
||||
Action: action,
|
||||
Trigger: trigger,
|
||||
Expect: &simulations.Expectation{
|
||||
Nodes: ids,
|
||||
Check: check,
|
||||
},
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
The test generates the given number of chunks.
|
||||
|
||||
The upload is done by dependency to the global
|
||||
`live` and `history` variables;
|
||||
|
||||
If `live` is set, first stream subscriptions are established, then
|
||||
upload to a random node.
|
||||
|
||||
If `history` is enabled, first upload then build up subscriptions.
|
||||
|
||||
The test loads a snapshot file to construct the swarm network,
|
||||
assuming that the snapshot file identifies a healthy
|
||||
kademlia network. Nevertheless a health check runs in the
|
||||
simulation's `action` function.
|
||||
|
||||
The snapshot should have 'streamer' in its service list.
|
||||
*/
|
||||
func runRetrievalTest(chunkCount int, nodeCount int) error {
|
||||
//for every run (live, history), int the variables
|
||||
initRetrievalTest()
|
||||
//the ids of the snapshot nodes, initiate only now as we need nodeCount
|
||||
ids = make([]discover.NodeID, nodeCount)
|
||||
//channel to check for disconnection errors
|
||||
disconnectC := make(chan error)
|
||||
//channel to close disconnection watcher routine
|
||||
quitC := make(chan struct{})
|
||||
//the test conf (using same as in `snapshot_sync_test`
|
||||
conf = &synctestConfig{}
|
||||
//map of overlay address to discover ID
|
||||
conf.addrToIdMap = make(map[string]discover.NodeID)
|
||||
//array where the generated chunk hashes will be stored
|
||||
conf.hashes = make([]storage.Address, 0)
|
||||
//load nodes from the snapshot file
|
||||
net, err := initNetWithSnapshot(nodeCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var rpcSubscriptionsWg sync.WaitGroup
|
||||
//do cleanup after test is terminated
|
||||
defer func() {
|
||||
//shutdown the snapshot network
|
||||
net.Shutdown()
|
||||
//after the test, clean up local stores initialized with createLocalStoreForId
|
||||
localStoreCleanup()
|
||||
//finally clear all data directories
|
||||
datadirsCleanup()
|
||||
}()
|
||||
//get the nodes of the network
|
||||
nodes := net.GetNodes()
|
||||
//select one index at random...
|
||||
idx := rand.Intn(len(nodes))
|
||||
//...and get the the node at that index
|
||||
//this is the node selected for upload
|
||||
uploadNode := nodes[idx]
|
||||
//iterate over all nodes...
|
||||
for c := 0; c < len(nodes); c++ {
|
||||
//create an array of discovery nodeIDS
|
||||
ids[c] = nodes[c].ID()
|
||||
a := network.ToOverlayAddr(ids[c].Bytes())
|
||||
//append it to the array of all overlay addresses
|
||||
conf.addrs = append(conf.addrs, a)
|
||||
conf.addrToIdMap[string(a)] = ids[c]
|
||||
}
|
||||
|
||||
//needed for healthy call
|
||||
ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs)
|
||||
|
||||
trigger := make(chan discover.NodeID)
|
||||
//simulation action
|
||||
action := func(ctx context.Context) error {
|
||||
//first run the health check on all nodes,
|
||||
//wait until nodes are all healthy
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
healthy := true
|
||||
for _, id := range ids {
|
||||
r := registries[id]
|
||||
//PeerPot for this node
|
||||
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes()))
|
||||
pp := ppmap[addr]
|
||||
//call Healthy RPC
|
||||
h := r.delivery.overlay.Healthy(pp)
|
||||
//print info
|
||||
log.Debug(r.delivery.overlay.String())
|
||||
log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
|
||||
if !h.GotNN || !h.Full {
|
||||
healthy = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if healthy {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if history {
|
||||
log.Info("Uploading for history")
|
||||
//If testing only history, we upload the chunk(s) first
|
||||
conf.hashes, err = uploadFileToSingleNodeStore(uploadNode.ID(), chunkCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//variables needed to wait for all subscriptions established before uploading
|
||||
errc := make(chan error)
|
||||
|
||||
//now setup and start event watching in order to know when we can upload
|
||||
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second)
|
||||
defer watchCancel()
|
||||
|
||||
log.Info("Setting up stream subscription")
|
||||
//We need two iterations, one to subscribe to the subscription events
|
||||
//(so we know when setup phase is finished), and one to
|
||||
//actually run the stream subscriptions. We can't do it in the same iteration,
|
||||
//because while the first nodes in the loop are setting up subscriptions,
|
||||
//the latter ones have not subscribed to listen to peer events yet,
|
||||
//and then we miss events.
|
||||
|
||||
//first iteration: setup disconnection watcher and subscribe to peer events
|
||||
for j, id := range ids {
|
||||
log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j))
|
||||
client, err := net.GetNode(id).Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//check for `SubscribeMsg` events to know when setup phase is complete
|
||||
wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC)
|
||||
// doneC is nil, the error happened which is sent to errc channel, already
|
||||
if wsDoneC == nil {
|
||||
continue
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-wsDoneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
|
||||
//watch for peers disconnecting
|
||||
wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-wdDoneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
//second iteration: start syncing and setup stream subscriptions
|
||||
for j, id := range ids {
|
||||
log.Trace(fmt.Sprintf("Start syncing and stream subscriptions: %d", j))
|
||||
client, err := net.GetNode(id).Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//start syncing!
|
||||
var cnt int
|
||||
err = client.CallContext(ctx, &cnt, "stream_startSyncing")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//increment the number of subscriptions we need to wait for
|
||||
//by the count returned from startSyncing (SYNC subscriptions)
|
||||
subscriptionCount += cnt
|
||||
//now also add the number of RETRIEVAL_REQUEST subscriptions
|
||||
for snid := range registries[id].peers {
|
||||
subscriptionCount++
|
||||
err = client.CallContext(ctx, nil, "stream_subscribeStream", snid, NewStream(swarmChunkServerStreamName, "", false), nil, Top)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//now wait until the number of expected subscriptions has been finished
|
||||
//`watchSubscriptionEvents` will write with a `nil` value to errc
|
||||
//every time a `SubscriptionMsg` has been received
|
||||
for err := range errc {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//`nil` received, decrement count
|
||||
subscriptionCount--
|
||||
//all subscriptions received
|
||||
if subscriptionCount == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Stream subscriptions successfully requested, action terminated")
|
||||
|
||||
if live {
|
||||
//now upload the chunks to the selected random single node
|
||||
chnks, err := uploadFileToSingleNodeStore(uploadNode.ID(), chunkCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.hashes = append(conf.hashes, chnks...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
chunkSize := storage.DefaultChunkSize
|
||||
|
||||
//check defines what will be checked during the test
|
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
|
||||
|
||||
//don't check the uploader node
|
||||
if id == uploadNode.ID() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
case e := <-disconnectC:
|
||||
log.Error(e.Error())
|
||||
return false, fmt.Errorf("Disconnect event detected, network unhealthy")
|
||||
default:
|
||||
}
|
||||
log.Trace(fmt.Sprintf("Checking node: %s", id))
|
||||
//if there are more than one chunk, test only succeeds if all expected chunks are found
|
||||
allSuccess := true
|
||||
|
||||
//check on the node's FileStore (netstore)
|
||||
fileStore := registries[id].fileStore
|
||||
//check all chunks
|
||||
for _, chnk := range conf.hashes {
|
||||
reader, _ := fileStore.Retrieve(chnk)
|
||||
//assuming that reading the Size of the chunk is enough to know we found it
|
||||
if s, err := reader.Size(nil); err != nil || s != chunkSize {
|
||||
allSuccess = false
|
||||
log.Warn("Retrieve error", "err", err, "chunk", chnk, "nodeId", id)
|
||||
} else {
|
||||
log.Debug(fmt.Sprintf("Chunk %x found", chnk))
|
||||
}
|
||||
}
|
||||
return allSuccess, nil
|
||||
}
|
||||
|
||||
//for each tick, run the checks on all nodes
|
||||
timingTicker := time.NewTicker(5 * time.Second)
|
||||
defer timingTicker.Stop()
|
||||
go func() {
|
||||
for range timingTicker.C {
|
||||
for i := 0; i < len(ids); i++ {
|
||||
log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i]))
|
||||
trigger <- ids[i]
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting simulation run...")
|
||||
|
||||
timeout := MaxTimeout * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
//run the simulation
|
||||
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
|
||||
Action: action,
|
||||
Trigger: trigger,
|
||||
Expect: &simulations.Expectation{
|
||||
Nodes: ids,
|
||||
Check: check,
|
||||
},
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//upload generated files to nodes
|
||||
//every node gets one file uploaded
|
||||
func uploadFilesToNodes(nodes []*simulations.Node) ([]storage.Address, []string, error) {
|
||||
nodeCnt := len(nodes)
|
||||
log.Debug(fmt.Sprintf("Uploading %d files to nodes", nodeCnt))
|
||||
//array holding generated files
|
||||
rfiles := make([]string, nodeCnt)
|
||||
//array holding the root hashes of the files
|
||||
rootAddrs := make([]storage.Address, nodeCnt)
|
||||
|
||||
var err error
|
||||
//for every node, generate a file and upload
|
||||
for i, n := range nodes {
|
||||
id := n.ID()
|
||||
fileStore := registries[id].fileStore
|
||||
//generate a file
|
||||
rfiles[i], err = generateRandomFile()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
//store it (upload it) on the FileStore
|
||||
rk, wait, err := fileStore.Store(strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false)
|
||||
log.Debug("Uploaded random string file to node")
|
||||
wait()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
rootAddrs[i] = rk
|
||||
}
|
||||
return rootAddrs, rfiles, nil
|
||||
}
|
||||
|
||||
//generate a random file (string)
|
||||
func generateRandomFile() (string, error) {
|
||||
//generate a random file size between minFileSize and maxFileSize
|
||||
fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize
|
||||
log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize))
|
||||
b := make([]byte, fileSize*1024)
|
||||
_, err := crand.Read(b)
|
||||
if err != nil {
|
||||
log.Error("Error generating random file.", "err", err)
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
719
swarm/network/stream/snapshot_sync_test.go
Normal file
719
swarm/network/stream/snapshot_sync_test.go
Normal file
@@ -0,0 +1,719 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const testMinProxBinSize = 2
|
||||
const MaxTimeout = 600
|
||||
|
||||
var (
|
||||
pof = pot.DefaultPof(256)
|
||||
|
||||
conf *synctestConfig
|
||||
ids []discover.NodeID
|
||||
datadirs map[discover.NodeID]string
|
||||
ppmap map[string]*network.PeerPot
|
||||
|
||||
live bool
|
||||
history bool
|
||||
|
||||
longrunning = flag.Bool("longrunning", false, "do run long-running tests")
|
||||
)
|
||||
|
||||
type synctestConfig struct {
|
||||
addrs [][]byte
|
||||
hashes []storage.Address
|
||||
idToChunksMap map[discover.NodeID][]int
|
||||
chunksToNodesMap map[string][]int
|
||||
addrToIdMap map[string]discover.NodeID
|
||||
}
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
}
|
||||
|
||||
//common_test needs to initialize the test in a init() func
|
||||
//in order for adapters to register the NewStreamerService;
|
||||
//this service is dependent on some global variables
|
||||
//we thus need to initialize first as init() as well.
|
||||
func initSyncTest() {
|
||||
//assign the toAddr func so NewStreamerService can build the addr
|
||||
toAddr = func(id discover.NodeID) *network.BzzAddr {
|
||||
addr := network.NewAddrFromNodeID(id)
|
||||
return addr
|
||||
}
|
||||
//global func to create local store
|
||||
if *useMockStore {
|
||||
createStoreFunc = createMockStore
|
||||
} else {
|
||||
createStoreFunc = createTestLocalStorageForId
|
||||
}
|
||||
//local stores
|
||||
stores = make(map[discover.NodeID]storage.ChunkStore)
|
||||
//data directories for each node and store
|
||||
datadirs = make(map[discover.NodeID]string)
|
||||
//deliveries for each node
|
||||
deliveries = make(map[discover.NodeID]*Delivery)
|
||||
//registries, map of discover.NodeID to its streamer
|
||||
registries = make(map[discover.NodeID]*TestRegistry)
|
||||
//not needed for this test but required from common_test for NewStreamService
|
||||
waitPeerErrC = make(chan error)
|
||||
//also not needed for this test but required for NewStreamService
|
||||
peerCount = func(id discover.NodeID) int {
|
||||
if ids[0] == id || ids[len(ids)-1] == id {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
if *useMockStore {
|
||||
createGlobalStore()
|
||||
}
|
||||
}
|
||||
|
||||
//This test is a syncing test for nodes.
|
||||
//One node is randomly selected to be the pivot node.
|
||||
//A configurable number of chunks and nodes can be
|
||||
//provided to the test, the number of chunks is uploaded
|
||||
//to the pivot node, and we check that nodes get the chunks
|
||||
//they are expected to store based on the syncing protocol.
|
||||
//Number of chunks and nodes can be provided via commandline too.
|
||||
func TestSyncing(t *testing.T) {
|
||||
//if nodes/chunks have been provided via commandline,
|
||||
//run the tests with these values
|
||||
if *nodes != 0 && *chunks != 0 {
|
||||
log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
|
||||
testSyncing(t, *chunks, *nodes)
|
||||
} else {
|
||||
var nodeCnt []int
|
||||
var chnkCnt []int
|
||||
//if the `longrunning` flag has been provided
|
||||
//run more test combinations
|
||||
if *longrunning {
|
||||
chnkCnt = []int{1, 8, 32, 256, 1024}
|
||||
nodeCnt = []int{16, 32, 64, 128, 256}
|
||||
} else {
|
||||
//default test
|
||||
chnkCnt = []int{4, 32}
|
||||
nodeCnt = []int{32, 16}
|
||||
}
|
||||
for _, chnk := range chnkCnt {
|
||||
for _, n := range nodeCnt {
|
||||
log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
|
||||
testSyncing(t, chnk, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Do run the tests
|
||||
//Every test runs 3 times, a live, a history, and a live AND history
|
||||
func testSyncing(t *testing.T, chunkCount int, nodeCount int) {
|
||||
//test live and NO history
|
||||
log.Info("Testing live and no history")
|
||||
live = true
|
||||
history = false
|
||||
err := runSyncTest(chunkCount, nodeCount, live, history)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//test history only
|
||||
log.Info("Testing history only")
|
||||
live = false
|
||||
history = true
|
||||
err = runSyncTest(chunkCount, nodeCount, live, history)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//finally test live and history
|
||||
log.Info("Testing live and history")
|
||||
live = true
|
||||
err = runSyncTest(chunkCount, nodeCount, live, history)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
The test generates the given number of chunks
|
||||
|
||||
The upload is done by dependency to the global
|
||||
`live` and `history` variables;
|
||||
|
||||
If `live` is set, first stream subscriptions are established, then
|
||||
upload to a random node.
|
||||
|
||||
If `history` is enabled, first upload then build up subscriptions.
|
||||
|
||||
For every chunk generated, the nearest node addresses
|
||||
are identified, we verify that the nodes closer to the
|
||||
chunk addresses actually do have the chunks in their local stores.
|
||||
|
||||
The test loads a snapshot file to construct the swarm network,
|
||||
assuming that the snapshot file identifies a healthy
|
||||
kademlia network. The snapshot should have 'streamer' in its service list.
|
||||
|
||||
For every test run, a series of three tests will be executed:
|
||||
- a LIVE test first, where first subscriptions are established,
|
||||
then a file (random chunks) is uploaded
|
||||
- a HISTORY test, where the file is uploaded first, and then
|
||||
the subscriptions are established
|
||||
- a crude LIVE AND HISTORY test last, where (different) chunks
|
||||
are uploaded twice, once before and once after subscriptions
|
||||
*/
|
||||
func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error {
|
||||
initSyncTest()
|
||||
//the ids of the snapshot nodes, initiate only now as we need nodeCount
|
||||
ids = make([]discover.NodeID, nodeCount)
|
||||
//initialize the test struct
|
||||
conf = &synctestConfig{}
|
||||
//map of discover ID to indexes of chunks expected at that ID
|
||||
conf.idToChunksMap = make(map[discover.NodeID][]int)
|
||||
//map of overlay address to discover ID
|
||||
conf.addrToIdMap = make(map[string]discover.NodeID)
|
||||
//array where the generated chunk hashes will be stored
|
||||
conf.hashes = make([]storage.Address, 0)
|
||||
//channel to trigger node checks in the simulation
|
||||
trigger := make(chan discover.NodeID)
|
||||
//channel to check for disconnection errors
|
||||
disconnectC := make(chan error)
|
||||
//channel to close disconnection watcher routine
|
||||
quitC := make(chan struct{})
|
||||
|
||||
//load nodes from the snapshot file
|
||||
net, err := initNetWithSnapshot(nodeCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var rpcSubscriptionsWg sync.WaitGroup
|
||||
//do cleanup after test is terminated
|
||||
defer func() {
|
||||
// close quitC channel to signall all goroutines to clanup
|
||||
// before calling simulation network shutdown.
|
||||
close(quitC)
|
||||
//wait for all rpc subscriptions to unsubscribe
|
||||
rpcSubscriptionsWg.Wait()
|
||||
//shutdown the snapshot network
|
||||
net.Shutdown()
|
||||
//after the test, clean up local stores initialized with createLocalStoreForId
|
||||
localStoreCleanup()
|
||||
//finally clear all data directories
|
||||
datadirsCleanup()
|
||||
}()
|
||||
//get the nodes of the network
|
||||
nodes := net.GetNodes()
|
||||
//select one index at random...
|
||||
idx := rand.Intn(len(nodes))
|
||||
//...and get the the node at that index
|
||||
//this is the node selected for upload
|
||||
node := nodes[idx]
|
||||
|
||||
log.Info("Initializing test config")
|
||||
//iterate over all nodes...
|
||||
for c := 0; c < len(nodes); c++ {
|
||||
//create an array of discovery node IDs
|
||||
ids[c] = nodes[c].ID()
|
||||
//get the kademlia overlay address from this ID
|
||||
a := network.ToOverlayAddr(ids[c].Bytes())
|
||||
//append it to the array of all overlay addresses
|
||||
conf.addrs = append(conf.addrs, a)
|
||||
//the proximity calculation is on overlay addr,
|
||||
//the p2p/simulations check func triggers on discover.NodeID,
|
||||
//so we need to know which overlay addr maps to which nodeID
|
||||
conf.addrToIdMap[string(a)] = ids[c]
|
||||
}
|
||||
log.Info("Test config successfully initialized")
|
||||
|
||||
//only needed for healthy call when debugging
|
||||
ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs)
|
||||
|
||||
//define the action to be performed before the test checks: start syncing
|
||||
action := func(ctx context.Context) error {
|
||||
//first run the health check on all nodes,
|
||||
//wait until nodes are all healthy
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
healthy := true
|
||||
for _, id := range ids {
|
||||
r := registries[id]
|
||||
//PeerPot for this node
|
||||
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes()))
|
||||
pp := ppmap[addr]
|
||||
//call Healthy RPC
|
||||
h := r.delivery.overlay.Healthy(pp)
|
||||
//print info
|
||||
log.Debug(r.delivery.overlay.String())
|
||||
log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
|
||||
if !h.GotNN || !h.Full {
|
||||
healthy = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if healthy {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if history {
|
||||
log.Info("Uploading for history")
|
||||
//If testing only history, we upload the chunk(s) first
|
||||
chunks, err := uploadFileToSingleNodeStore(node.ID(), chunkCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.hashes = append(conf.hashes, chunks...)
|
||||
//finally map chunks to the closest addresses
|
||||
mapKeysToNodes(conf)
|
||||
}
|
||||
|
||||
//variables needed to wait for all subscriptions established before uploading
|
||||
errc := make(chan error)
|
||||
|
||||
//now setup and start event watching in order to know when we can upload
|
||||
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second)
|
||||
defer watchCancel()
|
||||
|
||||
log.Info("Setting up stream subscription")
|
||||
|
||||
//We need two iterations, one to subscribe to the subscription events
|
||||
//(so we know when setup phase is finished), and one to
|
||||
//actually run the stream subscriptions. We can't do it in the same iteration,
|
||||
//because while the first nodes in the loop are setting up subscriptions,
|
||||
//the latter ones have not subscribed to listen to peer events yet,
|
||||
//and then we miss events.
|
||||
|
||||
//first iteration: setup disconnection watcher and subscribe to peer events
|
||||
for j, id := range ids {
|
||||
log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j))
|
||||
client, err := net.GetNode(id).Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC)
|
||||
// doneC is nil, the error happened which is sent to errc channel, already
|
||||
if wsDoneC == nil {
|
||||
continue
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-wsDoneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
|
||||
//watch for peers disconnecting
|
||||
wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-wdDoneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
//second iteration: start syncing
|
||||
for j, id := range ids {
|
||||
log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
|
||||
client, err := net.GetNode(id).Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//start syncing!
|
||||
var cnt int
|
||||
err = client.CallContext(ctx, &cnt, "stream_startSyncing")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//increment the number of subscriptions we need to wait for
|
||||
//by the count returned from startSyncing (SYNC subscriptions)
|
||||
subscriptionCount += cnt
|
||||
}
|
||||
|
||||
//now wait until the number of expected subscriptions has been finished
|
||||
//`watchSubscriptionEvents` will write with a `nil` value to errc
|
||||
for err := range errc {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//`nil` received, decrement count
|
||||
subscriptionCount--
|
||||
//all subscriptions received
|
||||
if subscriptionCount == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Stream subscriptions successfully requested")
|
||||
if live {
|
||||
//now upload the chunks to the selected random single node
|
||||
hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.hashes = append(conf.hashes, hashes...)
|
||||
//finally map chunks to the closest addresses
|
||||
log.Debug(fmt.Sprintf("Uploaded chunks for live syncing: %v", conf.hashes))
|
||||
mapKeysToNodes(conf)
|
||||
log.Info(fmt.Sprintf("Uploaded %d chunks to random single node", chunkCount))
|
||||
}
|
||||
|
||||
log.Info("Action terminated")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//check defines what will be checked during the test
|
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
case e := <-disconnectC:
|
||||
log.Error(e.Error())
|
||||
return false, fmt.Errorf("Disconnect event detected, network unhealthy")
|
||||
default:
|
||||
}
|
||||
log.Trace(fmt.Sprintf("Checking node: %s", id))
|
||||
//select the local store for the given node
|
||||
//if there are more than one chunk, test only succeeds if all expected chunks are found
|
||||
allSuccess := true
|
||||
|
||||
//all the chunk indexes which are supposed to be found for this node
|
||||
localChunks := conf.idToChunksMap[id]
|
||||
//for each expected chunk, check if it is in the local store
|
||||
for _, ch := range localChunks {
|
||||
//get the real chunk by the index in the index array
|
||||
chunk := conf.hashes[ch]
|
||||
log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
|
||||
//check if the expected chunk is indeed in the localstore
|
||||
var err error
|
||||
if *useMockStore {
|
||||
if globalStore == nil {
|
||||
return false, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
|
||||
}
|
||||
//use the globalStore if the mockStore should be used; in that case,
|
||||
//the complete localStore stack is bypassed for getting the chunk
|
||||
_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
|
||||
} else {
|
||||
//use the actual localstore
|
||||
lstore := stores[id]
|
||||
_, err = lstore.Get(chunk)
|
||||
}
|
||||
if err != nil {
|
||||
log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
|
||||
allSuccess = false
|
||||
} else {
|
||||
log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
|
||||
}
|
||||
}
|
||||
|
||||
return allSuccess, nil
|
||||
}
|
||||
|
||||
//for each tick, run the checks on all nodes
|
||||
timingTicker := time.NewTicker(time.Second * 1)
|
||||
defer timingTicker.Stop()
|
||||
go func() {
|
||||
for range timingTicker.C {
|
||||
for i := 0; i < len(ids); i++ {
|
||||
log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i]))
|
||||
trigger <- ids[i]
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting simulation run...")
|
||||
|
||||
timeout := MaxTimeout * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
//run the simulation
|
||||
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
|
||||
Action: action,
|
||||
Trigger: trigger,
|
||||
Expect: &simulations.Expectation{
|
||||
Nodes: ids,
|
||||
Check: check,
|
||||
},
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
log.Info("Simulation terminated")
|
||||
return nil
|
||||
}
|
||||
|
||||
//the server func to start syncing
|
||||
//issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
|
||||
//the kademlia's `EachBin` function.
|
||||
//returns the number of subscriptions requested
|
||||
func (r *TestRegistry) StartSyncing(ctx context.Context) (int, error) {
|
||||
var err error
|
||||
|
||||
if log.Lvl(*loglevel) == log.LvlDebug {
|
||||
//PeerPot for this node
|
||||
addr := common.Bytes2Hex(r.addr.OAddr)
|
||||
pp := ppmap[addr]
|
||||
//call Healthy RPC
|
||||
h := r.delivery.overlay.Healthy(pp)
|
||||
//print info
|
||||
log.Debug(r.delivery.overlay.String())
|
||||
log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
|
||||
}
|
||||
|
||||
kad, ok := r.delivery.overlay.(*network.Kademlia)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("Not a Kademlia!")
|
||||
}
|
||||
|
||||
subCnt := 0
|
||||
//iterate over each bin and solicit needed subscription to bins
|
||||
kad.EachBin(r.addr.Over(), pof, 0, func(conn network.OverlayConn, po int) bool {
|
||||
//identify begin and start index of the bin(s) we want to subscribe to
|
||||
log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), conf.addrToIdMap[string(conn.Address())], po))
|
||||
var histRange *Range
|
||||
if history {
|
||||
histRange = &Range{}
|
||||
}
|
||||
|
||||
subCnt++
|
||||
err = r.RequestSubscription(conf.addrToIdMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), live), histRange, Top)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
})
|
||||
return subCnt, nil
|
||||
}
|
||||
|
||||
//map chunk keys to addresses which are responsible
|
||||
func mapKeysToNodes(conf *synctestConfig) {
|
||||
kmap := make(map[string][]int)
|
||||
nodemap := make(map[string][]int)
|
||||
//build a pot for chunk hashes
|
||||
np := pot.NewPot(nil, 0)
|
||||
indexmap := make(map[string]int)
|
||||
for i, a := range conf.addrs {
|
||||
indexmap[string(a)] = i
|
||||
np, _, _ = pot.Add(np, a, pof)
|
||||
}
|
||||
//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
|
||||
log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
|
||||
for i := 0; i < len(conf.hashes); i++ {
|
||||
pl := 256 //highest possible proximity
|
||||
var nns []int
|
||||
np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
|
||||
a := val.([]byte)
|
||||
if pl < 256 && pl != po {
|
||||
return false
|
||||
}
|
||||
if pl == 256 || pl == po {
|
||||
log.Trace(fmt.Sprintf("appending %s", conf.addrToIdMap[string(a)]))
|
||||
nns = append(nns, indexmap[string(a)])
|
||||
nodemap[string(a)] = append(nodemap[string(a)], i)
|
||||
}
|
||||
if pl == 256 && len(nns) >= testMinProxBinSize {
|
||||
//maxProxBinSize has been reached at this po, so save it
|
||||
//we will add all other nodes at the same po
|
||||
pl = po
|
||||
}
|
||||
return true
|
||||
})
|
||||
kmap[string(conf.hashes[i])] = nns
|
||||
}
|
||||
for addr, chunks := range nodemap {
|
||||
//this selects which chunks are expected to be found with the given node
|
||||
conf.idToChunksMap[conf.addrToIdMap[addr]] = chunks
|
||||
}
|
||||
log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
|
||||
conf.chunksToNodesMap = kmap
|
||||
}
|
||||
|
||||
//upload a file(chunks) to a single local node store
|
||||
func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int) ([]storage.Address, error) {
|
||||
log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
|
||||
lstore := stores[id]
|
||||
size := chunkSize
|
||||
fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
|
||||
var rootAddrs []storage.Address
|
||||
for i := 0; i < chunkCount; i++ {
|
||||
rk, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
||||
wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootAddrs = append(rootAddrs, (rk))
|
||||
}
|
||||
|
||||
return rootAddrs, nil
|
||||
}
|
||||
|
||||
//initialize a network from a snapshot
|
||||
func initNetWithSnapshot(nodeCount int) (*simulations.Network, error) {
|
||||
|
||||
var a adapters.NodeAdapter
|
||||
//add the streamer service to the node adapter
|
||||
|
||||
if *adapter == "exec" {
|
||||
dirname, err := ioutil.TempDir(".", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a = adapters.NewExecAdapter(dirname)
|
||||
} else if *adapter == "tcp" {
|
||||
a = adapters.NewTCPAdapter(services)
|
||||
} else if *adapter == "sim" {
|
||||
a = adapters.NewSimAdapter(services)
|
||||
}
|
||||
|
||||
log.Info("Setting up Snapshot network")
|
||||
|
||||
net := simulations.NewNetwork(a, &simulations.NetworkConfig{
|
||||
ID: "0",
|
||||
DefaultService: "streamer",
|
||||
})
|
||||
|
||||
f, err := os.Open(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
jsonbyte, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var snap simulations.Snapshot
|
||||
err = json.Unmarshal(jsonbyte, &snap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//the snapshot probably has the property EnableMsgEvents not set
|
||||
//just in case, set it to true!
|
||||
//(we need this to wait for messages before uploading)
|
||||
for _, n := range snap.Nodes {
|
||||
n.Node.Config.EnableMsgEvents = true
|
||||
}
|
||||
|
||||
log.Info("Waiting for p2p connections to be established...")
|
||||
|
||||
//now we can load the snapshot
|
||||
err = net.Load(&snap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("Snapshot loaded")
|
||||
return net, nil
|
||||
}
|
||||
|
||||
//we want to wait for subscriptions to be established before uploading to test
|
||||
//that live syncing is working correctly
|
||||
func watchSubscriptionEvents(ctx context.Context, id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) (doneC <-chan struct{}) {
|
||||
events := make(chan *p2p.PeerEvent)
|
||||
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
errc <- fmt.Errorf("error getting peer events for node %v: %s", id, err)
|
||||
return
|
||||
}
|
||||
c := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
log.Trace("watch subscription events: unsubscribe", "id", id)
|
||||
sub.Unsubscribe()
|
||||
close(c)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-quitC:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
select {
|
||||
case errc <- ctx.Err():
|
||||
case <-quitC:
|
||||
}
|
||||
return
|
||||
case e := <-events:
|
||||
//just catch SubscribeMsg
|
||||
if e.Type == p2p.PeerEventTypeMsgRecv && e.Protocol == "stream" && e.MsgCode != nil && *e.MsgCode == 4 {
|
||||
errc <- nil
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
if err != nil {
|
||||
select {
|
||||
case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err):
|
||||
case <-quitC:
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return c
|
||||
}
|
||||
|
||||
//create a local store for the given node
|
||||
func createTestLocalStorageForId(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
|
||||
var datadir string
|
||||
var err error
|
||||
datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
datadirs[id] = datadir
|
||||
var store storage.ChunkStore
|
||||
params := storage.NewDefaultLocalStoreParams()
|
||||
params.ChunkDbPath = datadir
|
||||
params.BaseKey = addr.Over()
|
||||
store, err = storage.NewTestLocalStoreForAddr(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return store, nil
|
||||
}
|
739
swarm/network/stream/stream.go
Normal file
739
swarm/network/stream/stream.go
Normal file
@@ -0,0 +1,739 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
|
||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
Low uint8 = iota
|
||||
Mid
|
||||
High
|
||||
Top
|
||||
PriorityQueue // number of queues
|
||||
PriorityQueueCap = 32 // queue capacity
|
||||
HashSize = 32
|
||||
)
|
||||
|
||||
// Registry registry for outgoing and incoming streamer constructors
|
||||
type Registry struct {
|
||||
api *API
|
||||
addr *network.BzzAddr
|
||||
skipCheck bool
|
||||
clientMu sync.RWMutex
|
||||
serverMu sync.RWMutex
|
||||
peersMu sync.RWMutex
|
||||
serverFuncs map[string]func(*Peer, string, bool) (Server, error)
|
||||
clientFuncs map[string]func(*Peer, string, bool) (Client, error)
|
||||
peers map[discover.NodeID]*Peer
|
||||
delivery *Delivery
|
||||
intervalsStore state.Store
|
||||
doRetrieve bool
|
||||
}
|
||||
|
||||
// RegistryOptions holds optional values for NewRegistry constructor.
|
||||
type RegistryOptions struct {
|
||||
SkipCheck bool
|
||||
DoSync bool
|
||||
DoRetrieve bool
|
||||
SyncUpdateDelay time.Duration
|
||||
}
|
||||
|
||||
// NewRegistry is Streamer constructor
|
||||
func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, intervalsStore state.Store, options *RegistryOptions) *Registry {
|
||||
if options == nil {
|
||||
options = &RegistryOptions{}
|
||||
}
|
||||
if options.SyncUpdateDelay <= 0 {
|
||||
options.SyncUpdateDelay = 15 * time.Second
|
||||
}
|
||||
streamer := &Registry{
|
||||
addr: addr,
|
||||
skipCheck: options.SkipCheck,
|
||||
serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)),
|
||||
clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)),
|
||||
peers: make(map[discover.NodeID]*Peer),
|
||||
delivery: delivery,
|
||||
intervalsStore: intervalsStore,
|
||||
doRetrieve: options.DoRetrieve,
|
||||
}
|
||||
streamer.api = NewAPI(streamer)
|
||||
delivery.getPeer = streamer.getPeer
|
||||
streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, _ bool) (Server, error) {
|
||||
return NewSwarmChunkServer(delivery.db), nil
|
||||
})
|
||||
streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
|
||||
return NewSwarmSyncerClient(p, delivery.db, false, NewStream(swarmChunkServerStreamName, t, live))
|
||||
})
|
||||
RegisterSwarmSyncerServer(streamer, db)
|
||||
RegisterSwarmSyncerClient(streamer, db)
|
||||
|
||||
if options.DoSync {
|
||||
// latestIntC function ensures that
|
||||
// - receiving from the in chan is not blocked by processing inside the for loop
|
||||
// - the latest int value is delivered to the loop after the processing is done
|
||||
// In context of NeighbourhoodDepthC:
|
||||
// after the syncing is done updating inside the loop, we do not need to update on the intermediate
|
||||
// depth changes, only to the latest one
|
||||
latestIntC := func(in <-chan int) <-chan int {
|
||||
out := make(chan int, 1)
|
||||
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
for i := range in {
|
||||
select {
|
||||
case <-out:
|
||||
default:
|
||||
}
|
||||
out <- i
|
||||
}
|
||||
}()
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
go func() {
|
||||
// wait for kademlia table to be healthy
|
||||
time.Sleep(options.SyncUpdateDelay)
|
||||
|
||||
kad := streamer.delivery.overlay.(*network.Kademlia)
|
||||
depthC := latestIntC(kad.NeighbourhoodDepthC())
|
||||
addressBookSizeC := latestIntC(kad.AddrCountC())
|
||||
|
||||
// initial requests for syncing subscription to peers
|
||||
streamer.updateSyncing()
|
||||
|
||||
for depth := range depthC {
|
||||
log.Debug("Kademlia neighbourhood depth change", "depth", depth)
|
||||
|
||||
// Prevent too early sync subscriptions by waiting until there are no
|
||||
// new peers connecting. Sync streams updating will be done after no
|
||||
// peers are connected for at least SyncUpdateDelay period.
|
||||
timer := time.NewTimer(options.SyncUpdateDelay)
|
||||
// Hard limit to sync update delay, preventing long delays
|
||||
// on a very dynamic network
|
||||
maxTimer := time.NewTimer(3 * time.Minute)
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-maxTimer.C:
|
||||
// force syncing update when a hard timeout is reached
|
||||
log.Trace("Sync subscriptions update on hard timeout")
|
||||
// request for syncing subscription to new peers
|
||||
streamer.updateSyncing()
|
||||
break loop
|
||||
case <-timer.C:
|
||||
// start syncing as no new peers has been added to kademlia
|
||||
// for some time
|
||||
log.Trace("Sync subscriptions update")
|
||||
// request for syncing subscription to new peers
|
||||
streamer.updateSyncing()
|
||||
break loop
|
||||
case size := <-addressBookSizeC:
|
||||
log.Trace("Kademlia address book size changed on depth change", "size", size)
|
||||
// new peers has been added to kademlia,
|
||||
// reset the timer to prevent early sync subscriptions
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(options.SyncUpdateDelay)
|
||||
}
|
||||
}
|
||||
timer.Stop()
|
||||
maxTimer.Stop()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return streamer
|
||||
}
|
||||
|
||||
// RegisterClient registers an incoming streamer constructor
|
||||
func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) {
|
||||
r.clientMu.Lock()
|
||||
defer r.clientMu.Unlock()
|
||||
|
||||
r.clientFuncs[stream] = f
|
||||
}
|
||||
|
||||
// RegisterServer registers an outgoing streamer constructor
|
||||
func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) {
|
||||
r.serverMu.Lock()
|
||||
defer r.serverMu.Unlock()
|
||||
|
||||
r.serverFuncs[stream] = f
|
||||
}
|
||||
|
||||
// GetClient accessor for incoming streamer constructors
|
||||
func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) {
|
||||
r.clientMu.RLock()
|
||||
defer r.clientMu.RUnlock()
|
||||
|
||||
f := r.clientFuncs[stream]
|
||||
if f == nil {
|
||||
return nil, fmt.Errorf("stream %v not registered", stream)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// GetServer accessor for incoming streamer constructors
|
||||
func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) {
|
||||
r.serverMu.RLock()
|
||||
defer r.serverMu.RUnlock()
|
||||
|
||||
f := r.serverFuncs[stream]
|
||||
if f == nil {
|
||||
return nil, fmt.Errorf("stream %v not registered", stream)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (r *Registry) RequestSubscription(peerId discover.NodeID, s Stream, h *Range, prio uint8) error {
|
||||
// check if the stream is registered
|
||||
if _, err := r.GetServerFunc(s.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peer := r.getPeer(peerId)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found %v", peerId)
|
||||
}
|
||||
|
||||
if _, err := peer.getServer(s); err != nil {
|
||||
if e, ok := err.(*notFoundError); ok && e.t == "server" {
|
||||
// request subscription only if the server for this stream is not created
|
||||
log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h)
|
||||
return peer.Send(&RequestSubscriptionMsg{
|
||||
Stream: s,
|
||||
History: h,
|
||||
Priority: prio,
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe initiates the streamer
|
||||
func (r *Registry) Subscribe(peerId discover.NodeID, s Stream, h *Range, priority uint8) error {
|
||||
// check if the stream is registered
|
||||
if _, err := r.GetClientFunc(s.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peer := r.getPeer(peerId)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found %v", peerId)
|
||||
}
|
||||
|
||||
var to uint64
|
||||
if !s.Live && h != nil {
|
||||
to = h.To
|
||||
}
|
||||
|
||||
err := peer.setClientParams(s, newClientParams(priority, to))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.Live && h != nil {
|
||||
if err := peer.setClientParams(
|
||||
getHistoryStream(s),
|
||||
newClientParams(getHistoryPriority(priority), h.To),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
msg := &SubscribeMsg{
|
||||
Stream: s,
|
||||
History: h,
|
||||
Priority: priority,
|
||||
}
|
||||
log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
|
||||
|
||||
return peer.SendPriority(msg, priority)
|
||||
}
|
||||
|
||||
func (r *Registry) Unsubscribe(peerId discover.NodeID, s Stream) error {
|
||||
peer := r.getPeer(peerId)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found %v", peerId)
|
||||
}
|
||||
|
||||
msg := &UnsubscribeMsg{
|
||||
Stream: s,
|
||||
}
|
||||
log.Debug("Unsubscribe ", "peer", peerId, "stream", s)
|
||||
|
||||
if err := peer.Send(msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return peer.removeClient(s)
|
||||
}
|
||||
|
||||
// Quit sends the QuitMsg to the peer to remove the
|
||||
// stream peer client and terminate the streaming.
|
||||
func (r *Registry) Quit(peerId discover.NodeID, s Stream) error {
|
||||
peer := r.getPeer(peerId)
|
||||
if peer == nil {
|
||||
log.Debug("stream quit: peer not found", "peer", peerId, "stream", s)
|
||||
// if the peer is not found, abort the request
|
||||
return nil
|
||||
}
|
||||
|
||||
msg := &QuitMsg{
|
||||
Stream: s,
|
||||
}
|
||||
log.Debug("Quit ", "peer", peerId, "stream", s)
|
||||
|
||||
return peer.Send(msg)
|
||||
}
|
||||
|
||||
func (r *Registry) Retrieve(chunk *storage.Chunk) error {
|
||||
return r.delivery.RequestFromPeers(chunk.Addr[:], r.skipCheck)
|
||||
}
|
||||
|
||||
func (r *Registry) NodeInfo() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Registry) PeerInfo(id discover.NodeID) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Registry) Close() error {
|
||||
return r.intervalsStore.Close()
|
||||
}
|
||||
|
||||
func (r *Registry) getPeer(peerId discover.NodeID) *Peer {
|
||||
r.peersMu.RLock()
|
||||
defer r.peersMu.RUnlock()
|
||||
|
||||
return r.peers[peerId]
|
||||
}
|
||||
|
||||
func (r *Registry) setPeer(peer *Peer) {
|
||||
r.peersMu.Lock()
|
||||
r.peers[peer.ID()] = peer
|
||||
metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
|
||||
r.peersMu.Unlock()
|
||||
}
|
||||
|
||||
func (r *Registry) deletePeer(peer *Peer) {
|
||||
r.peersMu.Lock()
|
||||
delete(r.peers, peer.ID())
|
||||
metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
|
||||
r.peersMu.Unlock()
|
||||
}
|
||||
|
||||
func (r *Registry) peersCount() (c int) {
|
||||
r.peersMu.Lock()
|
||||
c = len(r.peers)
|
||||
r.peersMu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Run protocol run function
|
||||
func (r *Registry) Run(p *network.BzzPeer) error {
|
||||
sp := NewPeer(p.Peer, r)
|
||||
r.setPeer(sp)
|
||||
defer r.deletePeer(sp)
|
||||
defer close(sp.quit)
|
||||
defer sp.close()
|
||||
|
||||
if r.doRetrieve {
|
||||
err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", false), nil, Top)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sp.Run(sp.HandleMsg)
|
||||
}
|
||||
|
||||
// updateSyncing subscribes to SYNC streams by iterating over the
|
||||
// kademlia connections and bins. If there are existing SYNC streams
|
||||
// and they are no longer required after iteration, request to Quit
|
||||
// them will be send to appropriate peers.
|
||||
func (r *Registry) updateSyncing() {
|
||||
// if overlay in not Kademlia, panic
|
||||
kad := r.delivery.overlay.(*network.Kademlia)
|
||||
|
||||
// map of all SYNC streams for all peers
|
||||
// used at the and of the function to remove servers
|
||||
// that are not needed anymore
|
||||
subs := make(map[discover.NodeID]map[Stream]struct{})
|
||||
r.peersMu.RLock()
|
||||
for id, peer := range r.peers {
|
||||
peer.serverMu.RLock()
|
||||
for stream := range peer.servers {
|
||||
if stream.Name == "SYNC" {
|
||||
if _, ok := subs[id]; !ok {
|
||||
subs[id] = make(map[Stream]struct{})
|
||||
}
|
||||
subs[id][stream] = struct{}{}
|
||||
}
|
||||
}
|
||||
peer.serverMu.RUnlock()
|
||||
}
|
||||
r.peersMu.RUnlock()
|
||||
|
||||
// request subscriptions for all nodes and bins
|
||||
kad.EachBin(r.addr.Over(), pot.DefaultPof(256), 0, func(conn network.OverlayConn, bin int) bool {
|
||||
p := conn.(network.Peer)
|
||||
log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), p.ID(), bin))
|
||||
|
||||
// bin is always less then 256 and it is safe to convert it to type uint8
|
||||
stream := NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true)
|
||||
if streams, ok := subs[p.ID()]; ok {
|
||||
// delete live and history streams from the map, so that it won't be removed with a Quit request
|
||||
delete(streams, stream)
|
||||
delete(streams, getHistoryStream(stream))
|
||||
}
|
||||
err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
|
||||
if err != nil {
|
||||
log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// remove SYNC servers that do not need to be subscribed
|
||||
for id, streams := range subs {
|
||||
if len(streams) == 0 {
|
||||
continue
|
||||
}
|
||||
peer := r.getPeer(id)
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
for stream := range streams {
|
||||
log.Debug("Remove sync server", "peer", id, "stream", stream)
|
||||
err := r.Quit(peer.ID(), stream)
|
||||
if err != nil && err != p2p.ErrShuttingDown {
|
||||
log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
peer := protocols.NewPeer(p, rw, Spec)
|
||||
bzzPeer := network.NewBzzTestPeer(peer, r.addr)
|
||||
r.delivery.overlay.On(bzzPeer)
|
||||
defer r.delivery.overlay.Off(bzzPeer)
|
||||
return r.Run(bzzPeer)
|
||||
}
|
||||
|
||||
// HandleMsg is the message handler that delegates incoming messages
|
||||
func (p *Peer) HandleMsg(msg interface{}) error {
|
||||
switch msg := msg.(type) {
|
||||
|
||||
case *SubscribeMsg:
|
||||
return p.handleSubscribeMsg(msg)
|
||||
|
||||
case *SubscribeErrorMsg:
|
||||
return p.handleSubscribeErrorMsg(msg)
|
||||
|
||||
case *UnsubscribeMsg:
|
||||
return p.handleUnsubscribeMsg(msg)
|
||||
|
||||
case *OfferedHashesMsg:
|
||||
return p.handleOfferedHashesMsg(msg)
|
||||
|
||||
case *TakeoverProofMsg:
|
||||
return p.handleTakeoverProofMsg(msg)
|
||||
|
||||
case *WantedHashesMsg:
|
||||
return p.handleWantedHashesMsg(msg)
|
||||
|
||||
case *ChunkDeliveryMsg:
|
||||
return p.streamer.delivery.handleChunkDeliveryMsg(p, msg)
|
||||
|
||||
case *RetrieveRequestMsg:
|
||||
return p.streamer.delivery.handleRetrieveRequestMsg(p, msg)
|
||||
|
||||
case *RequestSubscriptionMsg:
|
||||
return p.handleRequestSubscription(msg)
|
||||
|
||||
case *QuitMsg:
|
||||
return p.handleQuitMsg(msg)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown message type: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
type server struct {
|
||||
Server
|
||||
stream Stream
|
||||
priority uint8
|
||||
currentBatch []byte
|
||||
}
|
||||
|
||||
// Server interface for outgoing peer Streamer
|
||||
type Server interface {
|
||||
SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error)
|
||||
GetData([]byte) ([]byte, error)
|
||||
Close()
|
||||
}
|
||||
|
||||
type client struct {
|
||||
Client
|
||||
stream Stream
|
||||
priority uint8
|
||||
sessionAt uint64
|
||||
to uint64
|
||||
next chan error
|
||||
quit chan struct{}
|
||||
|
||||
intervalsKey string
|
||||
intervalsStore state.Store
|
||||
}
|
||||
|
||||
func peerStreamIntervalsKey(p *Peer, s Stream) string {
|
||||
return p.ID().String() + s.String()
|
||||
}
|
||||
|
||||
func (c client) AddInterval(start, end uint64) (err error) {
|
||||
i := &intervals.Intervals{}
|
||||
err = c.intervalsStore.Get(c.intervalsKey, i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.Add(start, end)
|
||||
return c.intervalsStore.Put(c.intervalsKey, i)
|
||||
}
|
||||
|
||||
func (c client) NextInterval() (start, end uint64, err error) {
|
||||
i := &intervals.Intervals{}
|
||||
err = c.intervalsStore.Get(c.intervalsKey, i)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
start, end = i.Next()
|
||||
return start, end, nil
|
||||
}
|
||||
|
||||
// Client interface for incoming peer Streamer
|
||||
type Client interface {
|
||||
NeedData([]byte) func()
|
||||
BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error)
|
||||
Close()
|
||||
}
|
||||
|
||||
func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) {
|
||||
if c.to > 0 && from >= c.to {
|
||||
return 0, 0
|
||||
}
|
||||
if c.stream.Live {
|
||||
return from, 0
|
||||
} else if from >= c.sessionAt {
|
||||
if c.to > 0 {
|
||||
return from, c.to
|
||||
}
|
||||
return from, math.MaxUint64
|
||||
}
|
||||
nextFrom, nextTo, err := c.NextInterval()
|
||||
if err != nil {
|
||||
log.Error("next intervals", "stream", c.stream)
|
||||
return
|
||||
}
|
||||
if nextTo > c.to {
|
||||
nextTo = c.to
|
||||
}
|
||||
if nextTo == 0 {
|
||||
nextTo = c.sessionAt
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error {
|
||||
if tf := c.BatchDone(req.Stream, req.From, hashes, req.Root); tf != nil {
|
||||
tp, err := tf()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.SendPriority(tp, c.priority); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.to > 0 && tp.Takeover.End >= c.to {
|
||||
return p.streamer.Unsubscribe(p.Peer.ID(), req.Stream)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// TODO: make a test case for testing if the interval is added when the batch is done
|
||||
if err := c.AddInterval(req.From, req.To); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *client) close() {
|
||||
select {
|
||||
case <-c.quit:
|
||||
default:
|
||||
close(c.quit)
|
||||
}
|
||||
c.Close()
|
||||
}
|
||||
|
||||
// clientParams store parameters for the new client
|
||||
// between a subscription and initial offered hashes request handling.
|
||||
type clientParams struct {
|
||||
priority uint8
|
||||
to uint64
|
||||
// signal when the client is created
|
||||
clientCreatedC chan struct{}
|
||||
}
|
||||
|
||||
func newClientParams(priority uint8, to uint64) *clientParams {
|
||||
return &clientParams{
|
||||
priority: priority,
|
||||
to: to,
|
||||
clientCreatedC: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clientParams) waitClient(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-c.clientCreatedC:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clientParams) clientCreated() {
|
||||
close(c.clientCreatedC)
|
||||
}
|
||||
|
||||
// Spec is the spec of the streamer protocol
|
||||
var Spec = &protocols.Spec{
|
||||
Name: "stream",
|
||||
Version: 4,
|
||||
MaxMsgSize: 10 * 1024 * 1024,
|
||||
Messages: []interface{}{
|
||||
UnsubscribeMsg{},
|
||||
OfferedHashesMsg{},
|
||||
WantedHashesMsg{},
|
||||
TakeoverProofMsg{},
|
||||
SubscribeMsg{},
|
||||
RetrieveRequestMsg{},
|
||||
ChunkDeliveryMsg{},
|
||||
SubscribeErrorMsg{},
|
||||
RequestSubscriptionMsg{},
|
||||
QuitMsg{},
|
||||
},
|
||||
}
|
||||
|
||||
func (r *Registry) Protocols() []p2p.Protocol {
|
||||
return []p2p.Protocol{
|
||||
{
|
||||
Name: Spec.Name,
|
||||
Version: Spec.Version,
|
||||
Length: Spec.Length(),
|
||||
Run: r.runProtocol,
|
||||
// NodeInfo: ,
|
||||
// PeerInfo: ,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Registry) APIs() []rpc.API {
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "stream",
|
||||
Version: "3.0",
|
||||
Service: r.api,
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Registry) Start(server *p2p.Server) error {
|
||||
log.Info("Streamer started")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Registry) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type Range struct {
|
||||
From, To uint64
|
||||
}
|
||||
|
||||
func NewRange(from, to uint64) *Range {
|
||||
return &Range{
|
||||
From: from,
|
||||
To: to,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Range) String() string {
|
||||
return fmt.Sprintf("%v-%v", r.From, r.To)
|
||||
}
|
||||
|
||||
func getHistoryPriority(priority uint8) uint8 {
|
||||
if priority == 0 {
|
||||
return 0
|
||||
}
|
||||
return priority - 1
|
||||
}
|
||||
|
||||
func getHistoryStream(s Stream) Stream {
|
||||
return NewStream(s.Name, s.Key, false)
|
||||
}
|
||||
|
||||
type API struct {
|
||||
streamer *Registry
|
||||
}
|
||||
|
||||
func NewAPI(r *Registry) *API {
|
||||
return &API{
|
||||
streamer: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (api *API) SubscribeStream(peerId discover.NodeID, s Stream, history *Range, priority uint8) error {
|
||||
return api.streamer.Subscribe(peerId, s, history, priority)
|
||||
}
|
||||
|
||||
func (api *API) UnsubscribeStream(peerId discover.NodeID, s Stream) error {
|
||||
return api.streamer.Unsubscribe(peerId, s)
|
||||
}
|
684
swarm/network/stream/streamer_test.go
Normal file
684
swarm/network/stream/streamer_test.go
Normal file
@@ -0,0 +1,684 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
)
|
||||
|
||||
func TestStreamerSubscribe(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stream := NewStream("foo", "", true)
|
||||
err = streamer.Subscribe(tester.IDs[0], stream, NewRange(0, 0), Top)
|
||||
if err == nil || err.Error() != "stream foo not registered" {
|
||||
t.Fatalf("Expected error %v, got %v", "stream foo not registered", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamerRequestSubscription(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stream := NewStream("foo", "", false)
|
||||
err = streamer.RequestSubscription(tester.IDs[0], stream, &Range{}, Top)
|
||||
if err == nil || err.Error() != "stream foo not registered" {
|
||||
t.Fatalf("Expected error %v, got %v", "stream foo not registered", err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
hash0 = sha3.Sum256([]byte{0})
|
||||
hash1 = sha3.Sum256([]byte{1})
|
||||
hash2 = sha3.Sum256([]byte{2})
|
||||
hashesTmp = append(hash0[:], hash1[:]...)
|
||||
hashes = append(hashesTmp, hash2[:]...)
|
||||
)
|
||||
|
||||
type testClient struct {
|
||||
t string
|
||||
wait0 chan bool
|
||||
wait2 chan bool
|
||||
batchDone chan bool
|
||||
receivedHashes map[string][]byte
|
||||
}
|
||||
|
||||
func newTestClient(t string) *testClient {
|
||||
return &testClient{
|
||||
t: t,
|
||||
wait0: make(chan bool),
|
||||
wait2: make(chan bool),
|
||||
batchDone: make(chan bool),
|
||||
receivedHashes: make(map[string][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
func (self *testClient) NeedData(hash []byte) func() {
|
||||
self.receivedHashes[string(hash)] = hash
|
||||
if bytes.Equal(hash, hash0[:]) {
|
||||
return func() {
|
||||
<-self.wait0
|
||||
}
|
||||
} else if bytes.Equal(hash, hash2[:]) {
|
||||
return func() {
|
||||
<-self.wait2
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *testClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
|
||||
close(self.batchDone)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *testClient) Close() {}
|
||||
|
||||
type testServer struct {
|
||||
t string
|
||||
}
|
||||
|
||||
func newTestServer(t string) *testServer {
|
||||
return &testServer{
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (self *testServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
|
||||
return make([]byte, HashSize), from + 1, to + 1, nil, nil
|
||||
}
|
||||
|
||||
func (self *testServer) GetData([]byte) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (self *testServer) Close() {
|
||||
}
|
||||
|
||||
func TestStreamerDownstreamSubscribeUnsubscribeMsgExchange(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
|
||||
return newTestClient(t), nil
|
||||
})
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
stream := NewStream("foo", "", true)
|
||||
err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(
|
||||
p2ptest.Exchange{
|
||||
Label: "Subscribe message",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 4,
|
||||
Msg: &SubscribeMsg{
|
||||
Stream: stream,
|
||||
History: NewRange(5, 8),
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
},
|
||||
// trigger OfferedHashesMsg to actually create the client
|
||||
p2ptest.Exchange{
|
||||
Label: "OfferedHashes message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
Hashes: hashes,
|
||||
From: 5,
|
||||
To: 8,
|
||||
Stream: stream,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 2,
|
||||
Msg: &WantedHashesMsg{
|
||||
Stream: stream,
|
||||
Want: []byte{5},
|
||||
From: 9,
|
||||
To: 0,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = streamer.Unsubscribe(peerID, stream)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Unsubscribe message",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &UnsubscribeMsg{
|
||||
Stream: stream,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamerUpstreamSubscribeUnsubscribeMsgExchange(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stream := NewStream("foo", "", false)
|
||||
|
||||
streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
|
||||
return newTestServer(t), nil
|
||||
})
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Subscribe message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 4,
|
||||
Msg: &SubscribeMsg{
|
||||
Stream: stream,
|
||||
History: NewRange(5, 8),
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
Stream: stream,
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
Hashes: make([]byte, HashSize),
|
||||
From: 6,
|
||||
To: 9,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "unsubscribe message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &UnsubscribeMsg{
|
||||
Stream: stream,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamerUpstreamSubscribeUnsubscribeMsgExchangeLive(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stream := NewStream("foo", "", true)
|
||||
|
||||
streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
|
||||
return newTestServer(t), nil
|
||||
})
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Subscribe message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 4,
|
||||
Msg: &SubscribeMsg{
|
||||
Stream: stream,
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
Stream: stream,
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
Hashes: make([]byte, HashSize),
|
||||
From: 1,
|
||||
To: 1,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "unsubscribe message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &UnsubscribeMsg{
|
||||
Stream: stream,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamerUpstreamSubscribeErrorMsgExchange(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
|
||||
return newTestServer(t), nil
|
||||
})
|
||||
|
||||
stream := NewStream("bar", "", true)
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Subscribe message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 4,
|
||||
Msg: &SubscribeMsg{
|
||||
Stream: stream,
|
||||
History: NewRange(5, 8),
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 7,
|
||||
Msg: &SubscribeErrorMsg{
|
||||
Error: "stream bar not registered",
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamerUpstreamSubscribeLiveAndHistory(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stream := NewStream("foo", "", true)
|
||||
|
||||
streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
|
||||
return &testServer{
|
||||
t: t,
|
||||
}, nil
|
||||
})
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Subscribe message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 4,
|
||||
Msg: &SubscribeMsg{
|
||||
Stream: stream,
|
||||
History: NewRange(5, 8),
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
Stream: NewStream("foo", "", false),
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
Hashes: make([]byte, HashSize),
|
||||
From: 6,
|
||||
To: 9,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
Stream: stream,
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
From: 1,
|
||||
To: 1,
|
||||
Hashes: make([]byte, HashSize),
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamerDownstreamOfferedHashesMsgExchange(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stream := NewStream("foo", "", true)
|
||||
|
||||
var tc *testClient
|
||||
|
||||
streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
|
||||
tc = newTestClient(t)
|
||||
return tc, nil
|
||||
})
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Subscribe message",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 4,
|
||||
Msg: &SubscribeMsg{
|
||||
Stream: stream,
|
||||
History: NewRange(5, 8),
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
},
|
||||
p2ptest.Exchange{
|
||||
Label: "WantedHashes message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
Hashes: hashes,
|
||||
From: 5,
|
||||
To: 8,
|
||||
Stream: stream,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 2,
|
||||
Msg: &WantedHashesMsg{
|
||||
Stream: stream,
|
||||
Want: []byte{5},
|
||||
From: 9,
|
||||
To: 0,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(tc.receivedHashes) != 3 {
|
||||
t.Fatalf("Expected number of received hashes %v, got %v", 3, len(tc.receivedHashes))
|
||||
}
|
||||
|
||||
close(tc.wait0)
|
||||
|
||||
timeout := time.NewTimer(100 * time.Millisecond)
|
||||
defer timeout.Stop()
|
||||
|
||||
select {
|
||||
case <-tc.batchDone:
|
||||
t.Fatal("batch done early")
|
||||
case <-timeout.C:
|
||||
}
|
||||
|
||||
close(tc.wait2)
|
||||
|
||||
timeout2 := time.NewTimer(10000 * time.Millisecond)
|
||||
defer timeout2.Stop()
|
||||
|
||||
select {
|
||||
case <-tc.batchDone:
|
||||
case <-timeout2.C:
|
||||
t.Fatal("timeout waiting batchdone call")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStreamerRequestSubscriptionQuitMsgExchange(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t)
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
|
||||
return newTestServer(t), nil
|
||||
})
|
||||
|
||||
peerID := tester.IDs[0]
|
||||
|
||||
stream := NewStream("foo", "", true)
|
||||
err = streamer.RequestSubscription(peerID, stream, NewRange(5, 8), Top)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(
|
||||
p2ptest.Exchange{
|
||||
Label: "RequestSubscription message",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 8,
|
||||
Msg: &RequestSubscriptionMsg{
|
||||
Stream: stream,
|
||||
History: NewRange(5, 8),
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
},
|
||||
p2ptest.Exchange{
|
||||
Label: "Subscribe message",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 4,
|
||||
Msg: &SubscribeMsg{
|
||||
Stream: stream,
|
||||
History: NewRange(5, 8),
|
||||
Priority: Top,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
Stream: NewStream("foo", "", false),
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
Hashes: make([]byte, HashSize),
|
||||
From: 6,
|
||||
To: 9,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &OfferedHashesMsg{
|
||||
Stream: stream,
|
||||
HandoverProof: &HandoverProof{
|
||||
Handover: &Handover{},
|
||||
},
|
||||
From: 1,
|
||||
To: 1,
|
||||
Hashes: make([]byte, HashSize),
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = streamer.Quit(peerID, stream)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Quit message",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 9,
|
||||
Msg: &QuitMsg{
|
||||
Stream: stream,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
historyStream := getHistoryStream(stream)
|
||||
|
||||
err = streamer.Quit(peerID, historyStream)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{
|
||||
Label: "Quit message",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 9,
|
||||
Msg: &QuitMsg{
|
||||
Stream: historyStream,
|
||||
},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
297
swarm/network/stream/syncer.go
Normal file
297
swarm/network/stream/syncer.go
Normal file
@@ -0,0 +1,297 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
// BatchSize = 2
|
||||
BatchSize = 128
|
||||
)
|
||||
|
||||
// SwarmSyncerServer implements an Server for history syncing on bins
|
||||
// offered streams:
|
||||
// * live request delivery with or without checkback
|
||||
// * (live/non-live historical) chunk syncing per proximity bin
|
||||
type SwarmSyncerServer struct {
|
||||
po uint8
|
||||
db *storage.DBAPI
|
||||
sessionAt uint64
|
||||
start uint64
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
// NewSwarmSyncerServer is contructor for SwarmSyncerServer
|
||||
func NewSwarmSyncerServer(live bool, po uint8, db *storage.DBAPI) (*SwarmSyncerServer, error) {
|
||||
sessionAt := db.CurrentBucketStorageIndex(po)
|
||||
var start uint64
|
||||
if live {
|
||||
start = sessionAt
|
||||
}
|
||||
return &SwarmSyncerServer{
|
||||
po: po,
|
||||
db: db,
|
||||
sessionAt: sessionAt,
|
||||
start: start,
|
||||
quit: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func RegisterSwarmSyncerServer(streamer *Registry, db *storage.DBAPI) {
|
||||
streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, live bool) (Server, error) {
|
||||
po, err := ParseSyncBinKey(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSwarmSyncerServer(live, po, db)
|
||||
})
|
||||
// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
|
||||
// return NewOutgoingProvableSwarmSyncer(po, db)
|
||||
// })
|
||||
}
|
||||
|
||||
// Close needs to be called on a stream server
|
||||
func (s *SwarmSyncerServer) Close() {
|
||||
close(s.quit)
|
||||
}
|
||||
|
||||
// GetSection retrieves the actual chunk from localstore
|
||||
func (s *SwarmSyncerServer) GetData(key []byte) ([]byte, error) {
|
||||
chunk, err := s.db.Get(storage.Address(key))
|
||||
if err == storage.ErrFetching {
|
||||
<-chunk.ReqC
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return chunk.SData, nil
|
||||
}
|
||||
|
||||
// GetBatch retrieves the next batch of hashes from the dbstore
|
||||
func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
|
||||
var batch []byte
|
||||
i := 0
|
||||
if from == 0 {
|
||||
from = s.start
|
||||
}
|
||||
if to <= from || from >= s.sessionAt {
|
||||
to = math.MaxUint64
|
||||
}
|
||||
var ticker *time.Ticker
|
||||
defer func() {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
}()
|
||||
var wait bool
|
||||
for {
|
||||
if wait {
|
||||
if ticker == nil {
|
||||
ticker = time.NewTicker(1000 * time.Millisecond)
|
||||
}
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-s.quit:
|
||||
return nil, 0, 0, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
|
||||
err := s.db.Iterator(from, to, s.po, func(addr storage.Address, idx uint64) bool {
|
||||
batch = append(batch, addr[:]...)
|
||||
i++
|
||||
to = idx
|
||||
return i < BatchSize
|
||||
})
|
||||
if err != nil {
|
||||
return nil, 0, 0, nil, err
|
||||
}
|
||||
if len(batch) > 0 {
|
||||
break
|
||||
}
|
||||
wait = true
|
||||
}
|
||||
|
||||
log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.db.CurrentBucketStorageIndex(s.po))
|
||||
return batch, from, to, nil, nil
|
||||
}
|
||||
|
||||
// SwarmSyncerClient
|
||||
type SwarmSyncerClient struct {
|
||||
sessionAt uint64
|
||||
nextC chan struct{}
|
||||
sessionRoot storage.Address
|
||||
sessionReader storage.LazySectionReader
|
||||
retrieveC chan *storage.Chunk
|
||||
storeC chan *storage.Chunk
|
||||
db *storage.DBAPI
|
||||
// chunker storage.Chunker
|
||||
currentRoot storage.Address
|
||||
requestFunc func(chunk *storage.Chunk)
|
||||
end, start uint64
|
||||
peer *Peer
|
||||
ignoreExistingRequest bool
|
||||
stream Stream
|
||||
}
|
||||
|
||||
// NewSwarmSyncerClient is a contructor for provable data exchange syncer
|
||||
func NewSwarmSyncerClient(p *Peer, db *storage.DBAPI, ignoreExistingRequest bool, stream Stream) (*SwarmSyncerClient, error) {
|
||||
return &SwarmSyncerClient{
|
||||
db: db,
|
||||
peer: p,
|
||||
ignoreExistingRequest: ignoreExistingRequest,
|
||||
stream: stream,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
|
||||
// func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Key, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
|
||||
// retrieveC := make(storage.Chunk, chunksCap)
|
||||
// RunChunkRequestor(p, retrieveC)
|
||||
// storeC := make(storage.Chunk, chunksCap)
|
||||
// RunChunkStorer(store, storeC)
|
||||
// s := &SwarmSyncerClient{
|
||||
// po: po,
|
||||
// priority: priority,
|
||||
// sessionAt: sessionAt,
|
||||
// start: index,
|
||||
// end: index,
|
||||
// nextC: make(chan struct{}, 1),
|
||||
// intervals: intervals,
|
||||
// sessionRoot: sessionRoot,
|
||||
// sessionReader: chunker.Join(sessionRoot, retrieveC),
|
||||
// retrieveC: retrieveC,
|
||||
// storeC: storeC,
|
||||
// }
|
||||
// return s
|
||||
// }
|
||||
|
||||
// // StartSyncing is called on the Peer to start the syncing process
|
||||
// // the idea is that it is called only after kademlia is close to healthy
|
||||
// func StartSyncing(s *Streamer, peerId discover.NodeID, po uint8, nn bool) {
|
||||
// lastPO := po
|
||||
// if nn {
|
||||
// lastPO = maxPO
|
||||
// }
|
||||
//
|
||||
// for i := po; i <= lastPO; i++ {
|
||||
// s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
|
||||
// s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
|
||||
// }
|
||||
// }
|
||||
|
||||
// RegisterSwarmSyncerClient registers the client constructor function for
|
||||
// to handle incoming sync streams
|
||||
func RegisterSwarmSyncerClient(streamer *Registry, db *storage.DBAPI) {
|
||||
streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
|
||||
return NewSwarmSyncerClient(p, db, true, NewStream("SYNC", t, live))
|
||||
})
|
||||
}
|
||||
|
||||
// NeedData
|
||||
func (s *SwarmSyncerClient) NeedData(key []byte) (wait func()) {
|
||||
chunk, _ := s.db.GetOrCreateRequest(key)
|
||||
// TODO: we may want to request from this peer anyway even if the request exists
|
||||
|
||||
// ignoreExistingRequest is temporary commented out until its functionality is verified.
|
||||
// For now, this optimization can be disabled.
|
||||
if chunk.ReqC == nil { //|| (s.ignoreExistingRequest && !created) {
|
||||
return nil
|
||||
}
|
||||
// create request and wait until the chunk data arrives and is stored
|
||||
return func() {
|
||||
chunk.WaitToStore()
|
||||
}
|
||||
}
|
||||
|
||||
// BatchDone
|
||||
func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte, root []byte) func() (*TakeoverProof, error) {
|
||||
// TODO: reenable this with putter/getter refactored code
|
||||
// if s.chunker != nil {
|
||||
// return func() (*TakeoverProof, error) { return s.TakeoverProof(stream, from, hashes, root) }
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) {
|
||||
// for provable syncer currentRoot is non-zero length
|
||||
// TODO: reenable this with putter/getter
|
||||
// if s.chunker != nil {
|
||||
// if from > s.sessionAt { // for live syncing currentRoot is always updated
|
||||
// //expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC)
|
||||
// expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// if !bytes.Equal(root, expRoot) {
|
||||
// return nil, fmt.Errorf("HandoverProof mismatch")
|
||||
// }
|
||||
// s.currentRoot = root
|
||||
// } else {
|
||||
// expHashes := make([]byte, len(hashes))
|
||||
// _, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize))
|
||||
// if err != nil && err != io.EOF {
|
||||
// return nil, err
|
||||
// }
|
||||
// if !bytes.Equal(expHashes, hashes) {
|
||||
// return nil, errors.New("invalid proof")
|
||||
// }
|
||||
// }
|
||||
// return nil, nil
|
||||
// }
|
||||
s.end += uint64(len(hashes)) / HashSize
|
||||
takeover := &Takeover{
|
||||
Stream: stream,
|
||||
Start: s.start,
|
||||
End: s.end,
|
||||
Root: root,
|
||||
}
|
||||
// serialise and sign
|
||||
return &TakeoverProof{
|
||||
Takeover: takeover,
|
||||
Sig: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *SwarmSyncerClient) Close() {}
|
||||
|
||||
// base for parsing and formating sync bin key
|
||||
// it must be 2 <= base <= 36
|
||||
const syncBinKeyBase = 36
|
||||
|
||||
// FormatSyncBinKey returns a string representation of
|
||||
// Kademlia bin number to be used as key for SYNC stream.
|
||||
func FormatSyncBinKey(bin uint8) string {
|
||||
return strconv.FormatUint(uint64(bin), syncBinKeyBase)
|
||||
}
|
||||
|
||||
// ParseSyncBinKey parses the string representation
|
||||
// and returns the Kademlia bin number.
|
||||
func ParseSyncBinKey(s string) (uint8, error) {
|
||||
bin, err := strconv.ParseUint(s, syncBinKeyBase, 8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint8(bin), nil
|
||||
}
|
264
swarm/network/stream/syncer_test.go
Normal file
264
swarm/network/stream/syncer_test.go
Normal file
@@ -0,0 +1,264 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const dataChunkCount = 200
|
||||
|
||||
func TestSyncerSimulation(t *testing.T) {
|
||||
testSyncBetweenNodes(t, 2, 1, dataChunkCount, true, 1)
|
||||
testSyncBetweenNodes(t, 4, 1, dataChunkCount, true, 1)
|
||||
testSyncBetweenNodes(t, 8, 1, dataChunkCount, true, 1)
|
||||
testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1)
|
||||
}
|
||||
|
||||
func createMockStore(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
|
||||
var err error
|
||||
address := common.BytesToAddress(id.Bytes())
|
||||
mockStore := globalStore.NewNodeStore(address)
|
||||
params := storage.NewDefaultLocalStoreParams()
|
||||
datadirs[id], err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params.Init(datadirs[id])
|
||||
params.BaseKey = addr.Over()
|
||||
lstore, err := storage.NewLocalStore(params, mockStore)
|
||||
return lstore, nil
|
||||
}
|
||||
|
||||
func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) {
|
||||
defer setDefaultSkipCheck(defaultSkipCheck)
|
||||
defaultSkipCheck = skipCheck
|
||||
//data directories for each node and store
|
||||
datadirs = make(map[discover.NodeID]string)
|
||||
if *useMockStore {
|
||||
createStoreFunc = createMockStore
|
||||
createGlobalStore()
|
||||
} else {
|
||||
createStoreFunc = createTestLocalStorageFromSim
|
||||
}
|
||||
defer datadirsCleanup()
|
||||
|
||||
registries = make(map[discover.NodeID]*TestRegistry)
|
||||
toAddr = func(id discover.NodeID) *network.BzzAddr {
|
||||
addr := network.NewAddrFromNodeID(id)
|
||||
//hack to put addresses in same space
|
||||
addr.OAddr[0] = byte(0)
|
||||
return addr
|
||||
}
|
||||
conf := &streamTesting.RunConfig{
|
||||
Adapter: *adapter,
|
||||
NodeCount: nodes,
|
||||
ConnLevel: conns,
|
||||
ToAddr: toAddr,
|
||||
Services: services,
|
||||
EnableMsgEvents: false,
|
||||
}
|
||||
// HACK: these are global variables in the test so that they are available for
|
||||
// the service constructor function
|
||||
// TODO: will this work with exec/docker adapter?
|
||||
// localstore of nodes made available for action and check calls
|
||||
stores = make(map[discover.NodeID]storage.ChunkStore)
|
||||
deliveries = make(map[discover.NodeID]*Delivery)
|
||||
// create context for simulation run
|
||||
timeout := 30 * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// defer cancel should come before defer simulation teardown
|
||||
defer cancel()
|
||||
|
||||
// create simulation network with the config
|
||||
sim, teardown, err := streamTesting.NewSimulation(conf)
|
||||
var rpcSubscriptionsWg sync.WaitGroup
|
||||
defer func() {
|
||||
rpcSubscriptionsWg.Wait()
|
||||
teardown()
|
||||
}()
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeIndex := make(map[discover.NodeID]int)
|
||||
for i, id := range sim.IDs {
|
||||
nodeIndex[id] = i
|
||||
if !*useMockStore {
|
||||
stores[id] = sim.Stores[i]
|
||||
sim.Stores[i] = stores[id]
|
||||
}
|
||||
}
|
||||
// peerCount function gives the number of peer connections for a nodeID
|
||||
// this is needed for the service run function to wait until
|
||||
// each protocol instance runs and the streamer peers are available
|
||||
peerCount = func(id discover.NodeID) int {
|
||||
if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
waitPeerErrC = make(chan error)
|
||||
|
||||
// create DBAPI-s for all nodes
|
||||
dbs := make([]*storage.DBAPI, nodes)
|
||||
for i := 0; i < nodes; i++ {
|
||||
dbs[i] = storage.NewDBAPI(sim.Stores[i].(*storage.LocalStore))
|
||||
}
|
||||
|
||||
// collect hashes in po 1 bin for each node
|
||||
hashes := make([][]storage.Address, nodes)
|
||||
totalHashes := 0
|
||||
hashCounts := make([]int, nodes)
|
||||
for i := nodes - 1; i >= 0; i-- {
|
||||
if i < nodes-1 {
|
||||
hashCounts[i] = hashCounts[i+1]
|
||||
}
|
||||
dbs[i].Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
|
||||
hashes[i] = append(hashes[i], addr)
|
||||
totalHashes++
|
||||
hashCounts[i]++
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// errc is error channel for simulation
|
||||
errc := make(chan error, 1)
|
||||
quitC := make(chan struct{})
|
||||
defer close(quitC)
|
||||
|
||||
// action is subscribe
|
||||
action := func(ctx context.Context) error {
|
||||
// need to wait till an aynchronous process registers the peers in streamer.peers
|
||||
// that is used by Subscribe
|
||||
// the global peerCount function tells how many connections each node has
|
||||
// TODO: this is to be reimplemented with peerEvent watcher without global var
|
||||
i := 0
|
||||
for err := range waitPeerErrC {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for peers: %s", err)
|
||||
}
|
||||
i++
|
||||
if i == nodes {
|
||||
break
|
||||
}
|
||||
}
|
||||
// each node Subscribes to each other's swarmChunkServerStreamName
|
||||
for j := 0; j < nodes-1; j++ {
|
||||
id := sim.IDs[j]
|
||||
sim.Stores[j] = stores[id]
|
||||
err := sim.CallClient(id, func(client *rpc.Client) error {
|
||||
// report disconnect events to the error channel cos peers should not disconnect
|
||||
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcSubscriptionsWg.Add(1)
|
||||
go func() {
|
||||
<-doneC
|
||||
rpcSubscriptionsWg.Done()
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
// start syncing, i.e., subscribe to upstream peers po 1 bin
|
||||
sid := sim.IDs[j+1]
|
||||
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// here we distribute chunks of a random file into stores 1...nodes
|
||||
rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
|
||||
size := chunkCount * chunkSize
|
||||
_, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
||||
// need to wait cos we then immediately collect the relevant bin content
|
||||
wait()
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// this makes sure check is not called before the previous call finishes
|
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
|
||||
select {
|
||||
case err := <-errc:
|
||||
return false, err
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
i := nodeIndex[id]
|
||||
var total, found int
|
||||
|
||||
for j := i; j < nodes; j++ {
|
||||
total += len(hashes[j])
|
||||
for _, key := range hashes[j] {
|
||||
chunk, err := dbs[i].Get(key)
|
||||
if err == storage.ErrFetching {
|
||||
<-chunk.ReqC
|
||||
} else if err != nil {
|
||||
continue
|
||||
}
|
||||
// needed for leveldb not to be closed?
|
||||
// chunk.WaitToStore()
|
||||
found++
|
||||
}
|
||||
}
|
||||
log.Debug("sync check", "node", id, "index", i, "bin", po, "found", found, "total", total)
|
||||
return total == found, nil
|
||||
}
|
||||
|
||||
conf.Step = &simulations.Step{
|
||||
Action: action,
|
||||
Trigger: streamTesting.Trigger(500*time.Millisecond, quitC, sim.IDs[0:nodes-1]...),
|
||||
Expect: &simulations.Expectation{
|
||||
Nodes: sim.IDs[0:1],
|
||||
Check: check,
|
||||
},
|
||||
}
|
||||
startedAt := time.Now()
|
||||
result, err := sim.Run(ctx, conf)
|
||||
finishedAt := time.Now()
|
||||
if err != nil {
|
||||
t.Fatalf("Setting up simulation failed: %v", err)
|
||||
}
|
||||
if result.Error != nil {
|
||||
t.Fatalf("Simulation failed: %s", result.Error)
|
||||
}
|
||||
streamTesting.CheckResult(t, result, startedAt, finishedAt)
|
||||
}
|
1
swarm/network/stream/testing/snapshot_128.json
Normal file
1
swarm/network/stream/testing/snapshot_128.json
Normal file
File diff suppressed because one or more lines are too long
1
swarm/network/stream/testing/snapshot_16.json
Normal file
1
swarm/network/stream/testing/snapshot_16.json
Normal file
File diff suppressed because one or more lines are too long
1
swarm/network/stream/testing/snapshot_256.json
Normal file
1
swarm/network/stream/testing/snapshot_256.json
Normal file
File diff suppressed because one or more lines are too long
1
swarm/network/stream/testing/snapshot_32.json
Normal file
1
swarm/network/stream/testing/snapshot_32.json
Normal file
File diff suppressed because one or more lines are too long
1
swarm/network/stream/testing/snapshot_64.json
Normal file
1
swarm/network/stream/testing/snapshot_64.json
Normal file
File diff suppressed because one or more lines are too long
293
swarm/network/stream/testing/testing.go
Normal file
293
swarm/network/stream/testing/testing.go
Normal file
@@ -0,0 +1,293 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
type Simulation struct {
|
||||
Net *simulations.Network
|
||||
Stores []storage.ChunkStore
|
||||
Addrs []network.Addr
|
||||
IDs []discover.NodeID
|
||||
}
|
||||
|
||||
func SetStores(addrs ...network.Addr) ([]storage.ChunkStore, func(), error) {
|
||||
var datadirs []string
|
||||
stores := make([]storage.ChunkStore, len(addrs))
|
||||
var err error
|
||||
for i, addr := range addrs {
|
||||
var datadir string
|
||||
datadir, err = ioutil.TempDir("", "streamer")
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
var store storage.ChunkStore
|
||||
params := storage.NewDefaultLocalStoreParams()
|
||||
params.Init(datadir)
|
||||
params.BaseKey = addr.Over()
|
||||
store, err = storage.NewTestLocalStoreForAddr(params)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
datadirs = append(datadirs, datadir)
|
||||
stores[i] = store
|
||||
}
|
||||
teardown := func() {
|
||||
for i, datadir := range datadirs {
|
||||
stores[i].Close()
|
||||
os.RemoveAll(datadir)
|
||||
}
|
||||
}
|
||||
return stores, teardown, err
|
||||
}
|
||||
|
||||
func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) {
|
||||
teardown = func() {}
|
||||
switch adapterType {
|
||||
case "sim":
|
||||
adapter = adapters.NewSimAdapter(services)
|
||||
case "exec":
|
||||
baseDir, err0 := ioutil.TempDir("", "swarm-test")
|
||||
if err0 != nil {
|
||||
return nil, teardown, err0
|
||||
}
|
||||
teardown = func() { os.RemoveAll(baseDir) }
|
||||
adapter = adapters.NewExecAdapter(baseDir)
|
||||
case "docker":
|
||||
adapter, err = adapters.NewDockerAdapter()
|
||||
if err != nil {
|
||||
return nil, teardown, err
|
||||
}
|
||||
default:
|
||||
return nil, teardown, errors.New("adapter needs to be one of sim, exec, docker")
|
||||
}
|
||||
return adapter, teardown, nil
|
||||
}
|
||||
|
||||
func CheckResult(t *testing.T, result *simulations.StepResult, startedAt, finishedAt time.Time) {
|
||||
t.Logf("Simulation passed in %s", result.FinishedAt.Sub(result.StartedAt))
|
||||
if len(result.Passes) > 1 {
|
||||
var min, max time.Duration
|
||||
var sum int
|
||||
for _, pass := range result.Passes {
|
||||
duration := pass.Sub(result.StartedAt)
|
||||
if sum == 0 || duration < min {
|
||||
min = duration
|
||||
}
|
||||
if duration > max {
|
||||
max = duration
|
||||
}
|
||||
sum += int(duration.Nanoseconds())
|
||||
}
|
||||
t.Logf("Min: %s, Max: %s, Average: %s", min, max, time.Duration(sum/len(result.Passes))*time.Nanosecond)
|
||||
}
|
||||
t.Logf("Setup: %s, Shutdown: %s", result.StartedAt.Sub(startedAt), finishedAt.Sub(result.FinishedAt))
|
||||
}
|
||||
|
||||
type RunConfig struct {
|
||||
Adapter string
|
||||
Step *simulations.Step
|
||||
NodeCount int
|
||||
ConnLevel int
|
||||
ToAddr func(discover.NodeID) *network.BzzAddr
|
||||
Services adapters.Services
|
||||
DefaultService string
|
||||
EnableMsgEvents bool
|
||||
}
|
||||
|
||||
func NewSimulation(conf *RunConfig) (*Simulation, func(), error) {
|
||||
// create network
|
||||
nodes := conf.NodeCount
|
||||
adapter, adapterTeardown, err := NewAdapter(conf.Adapter, conf.Services)
|
||||
if err != nil {
|
||||
return nil, adapterTeardown, err
|
||||
}
|
||||
defaultService := "streamer"
|
||||
if conf.DefaultService != "" {
|
||||
defaultService = conf.DefaultService
|
||||
}
|
||||
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
|
||||
ID: "0",
|
||||
DefaultService: defaultService,
|
||||
})
|
||||
teardown := func() {
|
||||
adapterTeardown()
|
||||
net.Shutdown()
|
||||
}
|
||||
ids := make([]discover.NodeID, nodes)
|
||||
addrs := make([]network.Addr, nodes)
|
||||
// start nodes
|
||||
for i := 0; i < nodes; i++ {
|
||||
nodeconf := adapters.RandomNodeConfig()
|
||||
nodeconf.EnableMsgEvents = conf.EnableMsgEvents
|
||||
node, err := net.NewNodeWithConfig(nodeconf)
|
||||
if err != nil {
|
||||
return nil, teardown, fmt.Errorf("error creating node: %s", err)
|
||||
}
|
||||
ids[i] = node.ID()
|
||||
addrs[i] = conf.ToAddr(ids[i])
|
||||
}
|
||||
// set nodes number of Stores available
|
||||
stores, storeTeardown, err := SetStores(addrs...)
|
||||
teardown = func() {
|
||||
net.Shutdown()
|
||||
adapterTeardown()
|
||||
storeTeardown()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, teardown, err
|
||||
}
|
||||
s := &Simulation{
|
||||
Net: net,
|
||||
Stores: stores,
|
||||
IDs: ids,
|
||||
Addrs: addrs,
|
||||
}
|
||||
return s, teardown, nil
|
||||
}
|
||||
|
||||
func (s *Simulation) Run(ctx context.Context, conf *RunConfig) (*simulations.StepResult, error) {
|
||||
// bring up nodes, launch the servive
|
||||
nodes := conf.NodeCount
|
||||
conns := conf.ConnLevel
|
||||
for i := 0; i < nodes; i++ {
|
||||
if err := s.Net.Start(s.IDs[i]); err != nil {
|
||||
return nil, fmt.Errorf("error starting node %s: %s", s.IDs[i].TerminalString(), err)
|
||||
}
|
||||
}
|
||||
// run a simulation which connects the 10 nodes in a chain
|
||||
wg := sync.WaitGroup{}
|
||||
for i := range s.IDs {
|
||||
// collect the overlay addresses, to
|
||||
for j := 0; j < conns; j++ {
|
||||
var k int
|
||||
if j == 0 {
|
||||
k = i - 1
|
||||
} else {
|
||||
k = rand.Intn(len(s.IDs))
|
||||
}
|
||||
if i > 0 {
|
||||
wg.Add(1)
|
||||
go func(i, k int) {
|
||||
defer wg.Done()
|
||||
s.Net.Connect(s.IDs[i], s.IDs[k])
|
||||
}(i, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
log.Info(fmt.Sprintf("simulation with %v nodes", len(s.Addrs)))
|
||||
|
||||
// create an only locally retrieving FileStore for the pivot node to test
|
||||
// if retriee requests have arrived
|
||||
result := simulations.NewSimulation(s.Net).Run(ctx, conf.Step)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// WatchDisconnections subscribes to admin peerEvents and sends peer event drop
|
||||
// errors to the errc channel. Channel quitC signals the termination of the event loop.
|
||||
// Returned doneC will be closed after the rpc subscription is unsubscribed,
|
||||
// signaling that simulations network is safe to shutdown.
|
||||
func WatchDisconnections(id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) (doneC <-chan struct{}, err error) {
|
||||
events := make(chan *p2p.PeerEvent)
|
||||
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting peer events for node %v: %s", id, err)
|
||||
}
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
defer func() {
|
||||
log.Trace("watch disconnections: unsubscribe", "id", id)
|
||||
sub.Unsubscribe()
|
||||
close(c)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-quitC:
|
||||
return
|
||||
case e := <-events:
|
||||
if e.Type == p2p.PeerEventTypeDrop {
|
||||
select {
|
||||
case errc <- fmt.Errorf("peerEvent for node %v: %v", id, e):
|
||||
case <-quitC:
|
||||
return
|
||||
}
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
if err != nil {
|
||||
select {
|
||||
case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err):
|
||||
case <-quitC:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func Trigger(d time.Duration, quitC chan struct{}, ids ...discover.NodeID) chan discover.NodeID {
|
||||
trigger := make(chan discover.NodeID)
|
||||
go func() {
|
||||
defer close(trigger)
|
||||
ticker := time.NewTicker(d)
|
||||
defer ticker.Stop()
|
||||
// we are only testing the pivot node (net.Nodes[0])
|
||||
for range ticker.C {
|
||||
for _, id := range ids {
|
||||
select {
|
||||
case trigger <- id:
|
||||
case <-quitC:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return trigger
|
||||
}
|
||||
|
||||
func (sim *Simulation) CallClient(id discover.NodeID, f func(*rpc.Client) error) error {
|
||||
node := sim.Net.GetNode(id)
|
||||
if node == nil {
|
||||
return fmt.Errorf("unknown node: %s", id)
|
||||
}
|
||||
client, err := node.Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node client: %s", err)
|
||||
}
|
||||
return f(client)
|
||||
}
|
Reference in New Issue
Block a user