swarm: codebase split from go-ethereum (#1405)

This commit is contained in:
Rafael Matias
2019-06-03 12:28:18 +02:00
committed by Anton Evangelatov
parent 7a22da98b9
commit b046760db1
1540 changed files with 4654 additions and 129393 deletions

View File

@ -0,0 +1,17 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discovery

View File

@ -0,0 +1,536 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discovery
import (
"context"
"flag"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"time"
"github.com/ethersphere/swarm/testutil"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethersphere/swarm/network"
"github.com/ethersphere/swarm/state"
colorable "github.com/mattn/go-colorable"
)
// serviceName is used with the exec adapter so the exec'd binary knows which
// service to execute
const serviceName = "discovery"
const testNeighbourhoodSize = 2
const discoveryPersistenceDatadir = "discovery_persistence_test_store"
var discoveryPersistencePath = path.Join(os.TempDir(), discoveryPersistenceDatadir)
var discoveryEnabled = true
var persistenceEnabled = false
var services = adapters.Services{
serviceName: newService,
}
func cleanDbStores() error {
entries, err := ioutil.ReadDir(os.TempDir())
if err != nil {
return err
}
for _, f := range entries {
if strings.HasPrefix(f.Name(), discoveryPersistenceDatadir) {
os.RemoveAll(path.Join(os.TempDir(), f.Name()))
}
}
return nil
}
func getDbStore(nodeID string) (*state.DBStore, error) {
if _, err := os.Stat(discoveryPersistencePath + "_" + nodeID); os.IsNotExist(err) {
log.Info(fmt.Sprintf("directory for nodeID %s does not exist. creating...", nodeID))
ioutil.TempDir("", discoveryPersistencePath+"_"+nodeID)
}
log.Info(fmt.Sprintf("opening storage directory for nodeID %s", nodeID))
store, err := state.NewDBStore(discoveryPersistencePath + "_" + nodeID)
if err != nil {
return nil, err
}
return store, nil
}
var (
nodeCount = flag.Int("nodes", defaultNodeCount(), "number of nodes to create (default 32)")
initCount = flag.Int("conns", 1, "number of originally connected peers (default 1)")
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
rawlog = flag.Bool("rawlog", false, "remove terminal formatting from logs")
)
func defaultNodeCount() int {
if testutil.RaceEnabled {
return 8
}
return 32
}
func init() {
flag.Parse()
// register the discovery service which will run as a devp2p
// protocol when using the exec adapter
adapters.RegisterServices(services)
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(!*rawlog))))
}
// Benchmarks to test the average time it takes for an N-node ring
// to full a healthy kademlia topology
func BenchmarkDiscovery_8_1(b *testing.B) { benchmarkDiscovery(b, 8, 1) }
func BenchmarkDiscovery_16_1(b *testing.B) { benchmarkDiscovery(b, 16, 1) }
func BenchmarkDiscovery_32_1(b *testing.B) { benchmarkDiscovery(b, 32, 1) }
func BenchmarkDiscovery_64_1(b *testing.B) { benchmarkDiscovery(b, 64, 1) }
func BenchmarkDiscovery_128_1(b *testing.B) { benchmarkDiscovery(b, 128, 1) }
func BenchmarkDiscovery_256_1(b *testing.B) { benchmarkDiscovery(b, 256, 1) }
func BenchmarkDiscovery_8_2(b *testing.B) { benchmarkDiscovery(b, 8, 2) }
func BenchmarkDiscovery_16_2(b *testing.B) { benchmarkDiscovery(b, 16, 2) }
func BenchmarkDiscovery_32_2(b *testing.B) { benchmarkDiscovery(b, 32, 2) }
func BenchmarkDiscovery_64_2(b *testing.B) { benchmarkDiscovery(b, 64, 2) }
func BenchmarkDiscovery_128_2(b *testing.B) { benchmarkDiscovery(b, 128, 2) }
func BenchmarkDiscovery_256_2(b *testing.B) { benchmarkDiscovery(b, 256, 2) }
func BenchmarkDiscovery_8_4(b *testing.B) { benchmarkDiscovery(b, 8, 4) }
func BenchmarkDiscovery_16_4(b *testing.B) { benchmarkDiscovery(b, 16, 4) }
func BenchmarkDiscovery_32_4(b *testing.B) { benchmarkDiscovery(b, 32, 4) }
func BenchmarkDiscovery_64_4(b *testing.B) { benchmarkDiscovery(b, 64, 4) }
func BenchmarkDiscovery_128_4(b *testing.B) { benchmarkDiscovery(b, 128, 4) }
func BenchmarkDiscovery_256_4(b *testing.B) { benchmarkDiscovery(b, 256, 4) }
func TestDiscoverySimulationExecAdapter(t *testing.T) {
testDiscoverySimulationExecAdapter(t, *nodeCount, *initCount)
}
func testDiscoverySimulationExecAdapter(t *testing.T, nodes, conns int) {
baseDir, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(baseDir)
testDiscoverySimulation(t, nodes, conns, adapters.NewExecAdapter(baseDir))
}
func TestDiscoverySimulationSimAdapter(t *testing.T) {
testDiscoverySimulationSimAdapter(t, *nodeCount, *initCount)
}
func TestDiscoveryPersistenceSimulationSimAdapter(t *testing.T) {
testDiscoveryPersistenceSimulationSimAdapter(t, *nodeCount, *initCount)
}
func testDiscoveryPersistenceSimulationSimAdapter(t *testing.T, nodes, conns int) {
testDiscoveryPersistenceSimulation(t, nodes, conns, adapters.NewSimAdapter(services))
}
func testDiscoverySimulationSimAdapter(t *testing.T, nodes, conns int) {
testDiscoverySimulation(t, nodes, conns, adapters.NewSimAdapter(services))
}
func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) {
startedAt := time.Now()
result, err := discoverySimulation(nodes, conns, adapter)
if err != nil {
t.Fatalf("Setting up simulation failed: %v", err)
}
if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error)
}
t.Logf("Simulation with %d nodes passed in %s", nodes, result.FinishedAt.Sub(result.StartedAt))
var min, max time.Duration
var sum int
for _, pass := range result.Passes {
duration := pass.Sub(result.StartedAt)
if sum == 0 || duration < min {
min = duration
}
if duration > max {
max = duration
}
sum += int(duration.Nanoseconds())
}
t.Logf("Min: %s, Max: %s, Average: %s", min, max, time.Duration(sum/len(result.Passes))*time.Nanosecond)
finishedAt := time.Now()
t.Logf("Setup: %s, shutdown: %s", result.StartedAt.Sub(startedAt), finishedAt.Sub(result.FinishedAt))
}
func testDiscoveryPersistenceSimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) map[int][]byte {
persistenceEnabled = true
discoveryEnabled = true
result, err := discoveryPersistenceSimulation(nodes, conns, adapter)
if err != nil {
t.Fatalf("Setting up simulation failed: %v", err)
}
if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error)
}
t.Logf("Simulation with %d nodes passed in %s", nodes, result.FinishedAt.Sub(result.StartedAt))
// set the discovery and persistence flags again to default so other
// tests will not be affected
discoveryEnabled = true
persistenceEnabled = false
return nil
}
func benchmarkDiscovery(b *testing.B, nodes, conns int) {
for i := 0; i < b.N; i++ {
result, err := discoverySimulation(nodes, conns, adapters.NewSimAdapter(services))
if err != nil {
b.Fatalf("setting up simulation failed: %v", err)
}
if result.Error != nil {
b.Logf("simulation failed: %s", result.Error)
}
}
}
func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simulations.StepResult, error) {
// create network
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
ID: "0",
DefaultService: serviceName,
})
defer net.Shutdown()
trigger := make(chan enode.ID)
ids := make([]enode.ID, nodes)
for i := 0; i < nodes; i++ {
conf := adapters.RandomNodeConfig()
node, err := net.NewNodeWithConfig(conf)
if err != nil {
return nil, fmt.Errorf("error starting node: %s", err)
}
if err := net.Start(node.ID()); err != nil {
return nil, fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
}
if err := triggerChecks(trigger, net, node.ID()); err != nil {
return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
}
ids[i] = node.ID()
}
// run a simulation which connects the 10 nodes in a ring and waits
// for full peer discovery
var addrs [][]byte
action := func(ctx context.Context) error {
return nil
}
for i := range ids {
// collect the overlay addresses, to
addrs = append(addrs, ids[i].Bytes())
}
err := net.ConnectNodesChain(nil)
if err != nil {
return nil, err
}
log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
// construct the peer pot, so that kademlia health can be checked
ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
check := func(ctx context.Context, id enode.ID) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
default:
}
node := net.GetNode(id)
if node == nil {
return false, fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return false, fmt.Errorf("error getting node client: %s", err)
}
healthy := &network.Health{}
if err := client.Call(&healthy, "hive_getHealthInfo", ppmap[common.Bytes2Hex(id.Bytes())]); err != nil {
return false, fmt.Errorf("error getting node health: %s", err)
}
log.Debug(fmt.Sprintf("node %4s healthy: connected nearest neighbours: %v, know nearest neighbours: %v,\n\n%v", id, healthy.ConnectNN, healthy.KnowNN, healthy.Hive))
return healthy.KnowNN && healthy.ConnectNN, nil
}
// 64 nodes ~ 1min
// 128 nodes ~
timeout := 300 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
Action: action,
Trigger: trigger,
Expect: &simulations.Expectation{
Nodes: ids,
Check: check,
},
})
if result.Error != nil {
return result, nil
}
return result, nil
}
func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simulations.StepResult, error) {
cleanDbStores()
defer cleanDbStores()
// create network
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
ID: "0",
DefaultService: serviceName,
})
defer net.Shutdown()
trigger := make(chan enode.ID)
ids := make([]enode.ID, nodes)
var addrs [][]byte
for i := 0; i < nodes; i++ {
conf := adapters.RandomNodeConfig()
node, err := net.NewNodeWithConfig(conf)
if err != nil {
panic(err)
}
if err != nil {
return nil, fmt.Errorf("error starting node: %s", err)
}
if err := net.Start(node.ID()); err != nil {
return nil, fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
}
if err := triggerChecks(trigger, net, node.ID()); err != nil {
return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
}
// TODO we shouldn't be equating underaddr and overaddr like this, as they are not the same in production
ids[i] = node.ID()
a := ids[i].Bytes()
addrs = append(addrs, a)
}
// run a simulation which connects the 10 nodes in a ring and waits
// for full peer discovery
var restartTime time.Time
action := func(ctx context.Context) error {
ticker := time.NewTicker(500 * time.Millisecond)
for range ticker.C {
isHealthy := true
for _, id := range ids {
//call Healthy RPC
node := net.GetNode(id)
if node == nil {
return fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return fmt.Errorf("error getting node client: %s", err)
}
healthy := &network.Health{}
addr := id.String()
ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
if err := client.Call(&healthy, "hive_getHealthInfo", ppmap[common.Bytes2Hex(id.Bytes())]); err != nil {
return fmt.Errorf("error getting node health: %s", err)
}
log.Info(fmt.Sprintf("NODE: %s, IS HEALTHY: %t", addr, healthy.ConnectNN && healthy.KnowNN && healthy.CountKnowNN > 0))
var nodeStr string
if err := client.Call(&nodeStr, "hive_string"); err != nil {
return fmt.Errorf("error getting node string %s", err)
}
log.Info(nodeStr)
if !healthy.ConnectNN || healthy.CountKnowNN == 0 {
isHealthy = false
break
}
}
if isHealthy {
break
}
}
ticker.Stop()
log.Info("reached healthy kademlia. starting to shutdown nodes.")
shutdownStarted := time.Now()
// stop all ids, then start them again
for _, id := range ids {
node := net.GetNode(id)
if err := net.Stop(node.ID()); err != nil {
return fmt.Errorf("error stopping node %s: %s", node.ID().TerminalString(), err)
}
}
log.Info(fmt.Sprintf("shutting down nodes took: %s", time.Since(shutdownStarted)))
persistenceEnabled = true
discoveryEnabled = false
restartTime = time.Now()
for _, id := range ids {
node := net.GetNode(id)
if err := net.Start(node.ID()); err != nil {
return fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
}
if err := triggerChecks(trigger, net, node.ID()); err != nil {
return fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
}
}
log.Info(fmt.Sprintf("restarting nodes took: %s", time.Since(restartTime)))
return nil
}
net.ConnectNodesChain(nil)
log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
// construct the peer pot, so that kademlia health can be checked
check := func(ctx context.Context, id enode.ID) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
default:
}
node := net.GetNode(id)
if node == nil {
return false, fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return false, fmt.Errorf("error getting node client: %s", err)
}
healthy := &network.Health{}
ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
if err := client.Call(&healthy, "hive_getHealthInfo", ppmap[common.Bytes2Hex(id.Bytes())]); err != nil {
return false, fmt.Errorf("error getting node health: %s", err)
}
log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v", id, healthy.ConnectNN, healthy.KnowNN))
return healthy.KnowNN && healthy.ConnectNN, nil
}
// 64 nodes ~ 1min
// 128 nodes ~
timeout := 300 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
Action: action,
Trigger: trigger,
Expect: &simulations.Expectation{
Nodes: ids,
Check: check,
},
})
if result.Error != nil {
return result, nil
}
return result, nil
}
// triggerChecks triggers a simulation step check whenever a peer is added or
// removed from the given node, and also every second to avoid a race between
// peer events and kademlia becoming healthy
func triggerChecks(trigger chan enode.ID, net *simulations.Network, id enode.ID) error {
node := net.GetNode(id)
if node == nil {
return fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return err
}
events := make(chan *p2p.PeerEvent)
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
if err != nil {
return fmt.Errorf("error getting peer events for node %v: %s", id, err)
}
go func() {
defer sub.Unsubscribe()
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case <-events:
trigger <- id
case <-tick.C:
trigger <- id
case err := <-sub.Err():
if err != nil {
log.Error(fmt.Sprintf("error getting peer events for node %v", id), "err", err)
}
return
}
}
}()
return nil
}
func newService(ctx *adapters.ServiceContext) (node.Service, error) {
addr := network.NewAddr(ctx.Config.Node())
kp := network.NewKadParams()
kp.NeighbourhoodSize = testNeighbourhoodSize
if ctx.Config.Reachable != nil {
kp.Reachable = func(o *network.BzzAddr) bool {
return ctx.Config.Reachable(o.ID())
}
}
kad := network.NewKademlia(addr.Over(), kp)
hp := network.NewHiveParams()
hp.KeepAliveInterval = time.Duration(200) * time.Millisecond
hp.Discovery = discoveryEnabled
log.Info(fmt.Sprintf("discovery for nodeID %s is %t", ctx.Config.ID.String(), hp.Discovery))
config := &network.BzzConfig{
OverlayAddr: addr.Over(),
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
if persistenceEnabled {
log.Info(fmt.Sprintf("persistence enabled for nodeID %s", ctx.Config.ID.String()))
store, err := getDbStore(ctx.Config.ID.String())
if err != nil {
return nil, err
}
return network.NewBzz(config, kad, store, nil, nil), nil
}
return network.NewBzz(config, kad, nil, nil, nil), nil
}

View File

@ -0,0 +1 @@
{"nodes":[{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}},{"node":{"config":null,"up":false}}],"conns":[{"one":"c04a0c47cb0c522ecf28d8841e93721e73f58790b30e92382816a4b453be2988","other":"d9283e5247a18d6564b3581217e9f4d9c93a4359944894c00bb2b22c690faadc","up":true},{"one":"dd99c11abe2abae112d64d902b96fe0c75243ea67eca759a2769058a30cc0e77","other":"c04a0c47cb0c522ecf28d8841e93721e73f58790b30e92382816a4b453be2988","up":true},{"one":"4f5dad2aa4f26ac5a23d4fbcc807296b474eab77761db6594debd60ef4287aed","other":"dd99c11abe2abae112d64d902b96fe0c75243ea67eca759a2769058a30cc0e77","up":true},{"one":"4f47f4e176d1c9f78d9a7e19723689ffe2a0603004a3d4506a2349e55a56fc17","other":"4f5dad2aa4f26ac5a23d4fbcc807296b474eab77761db6594debd60ef4287aed","up":true},{"one":"20b6a1be2cb8f966151682350e029d4f8da8ee92de10a2a1cb1727d110acebfa","other":"4f47f4e176d1c9f78d9a7e19723689ffe2a0603004a3d4506a2349e55a56fc17","up":true},{"one":"50cb92e77710582fa9cbee7a54cf25c95fd27d8d54b13ba5520a50139c309a22","other":"20b6a1be2cb8f966151682350e029d4f8da8ee92de10a2a1cb1727d110acebfa","up":true},{"one":"319dc901f99940f1339c540bc36fbabb10a96d326b13b9d7f53e7496980e2996","other":"50cb92e77710582fa9cbee7a54cf25c95fd27d8d54b13ba5520a50139c309a22","up":true},{"one":"dc285b6436a8bfd4d2e586d478b18d3fe7b705ce0b4fb27a651adcf6d27984f1","other":"319dc901f99940f1339c540bc36fbabb10a96d326b13b9d7f53e7496980e2996","up":true},{"one":"974dbe511377280f945a53a194b4bb397875b10b1ecb119a92425bbb16db68f1","other":"dc285b6436a8bfd4d2e586d478b18d3fe7b705ce0b4fb27a651adcf6d27984f1","up":true},{"one":"d9283e5247a18d6564b3581217e9f4d9c93a4359944894c00bb2b22c690faadc","other":"974dbe511377280f945a53a194b4bb397875b10b1ecb119a92425bbb16db68f1","up":true}]}

View File

@ -0,0 +1,144 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// You can run this simulation using
//
// go run ./swarm/network/simulations/overlay.go
package main
import (
"flag"
"fmt"
"net/http"
"runtime"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethersphere/swarm/network"
"github.com/ethersphere/swarm/state"
colorable "github.com/mattn/go-colorable"
)
var (
noDiscovery = flag.Bool("no-discovery", false, "disable discovery (useful if you want to load a snapshot)")
vmodule = flag.String("vmodule", "", "log filters for logger via Vmodule")
verbosity = flag.Int("verbosity", 0, "log filters for logger via Vmodule")
httpSimPort = 8888
)
func init() {
flag.Parse()
//initialize the logger
//this is a demonstration on how to use Vmodule for filtering logs
//provide -vmodule as param, and comma-separated values, e.g.:
//-vmodule overlay_test.go=4,simulations=3
//above examples sets overlay_test.go logs to level 4, while packages ending with "simulations" to 3
if *vmodule != "" {
//only enable the pattern matching handler if the flag has been provided
glogger := log.NewGlogHandler(log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))
if *verbosity > 0 {
glogger.Verbosity(log.Lvl(*verbosity))
}
glogger.Vmodule(*vmodule)
log.Root().SetHandler(glogger)
}
}
type Simulation struct {
mtx sync.Mutex
stores map[enode.ID]state.Store
}
func NewSimulation() *Simulation {
return &Simulation{
stores: make(map[enode.ID]state.Store),
}
}
func (s *Simulation) NewService(ctx *adapters.ServiceContext) (node.Service, error) {
node := ctx.Config.Node()
s.mtx.Lock()
store, ok := s.stores[node.ID()]
if !ok {
store = state.NewInmemoryStore()
s.stores[node.ID()] = store
}
s.mtx.Unlock()
addr := network.NewAddr(node)
kp := network.NewKadParams()
kp.NeighbourhoodSize = 2
kp.MaxBinSize = 4
kp.MinBinSize = 1
kp.MaxRetries = 1000
kp.RetryExponent = 2
kp.RetryInterval = 1000000
kad := network.NewKademlia(addr.Over(), kp)
hp := network.NewHiveParams()
hp.Discovery = !*noDiscovery
hp.KeepAliveInterval = 300 * time.Millisecond
config := &network.BzzConfig{
OverlayAddr: addr.Over(),
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
return network.NewBzz(config, kad, store, nil, nil), nil
}
//create the simulation network
func newSimulationNetwork() *simulations.Network {
s := NewSimulation()
services := adapters.Services{
"overlay": s.NewService,
}
adapter := adapters.NewSimAdapter(services)
simNetwork := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
DefaultService: "overlay",
})
return simNetwork
}
//return a new http server
func newOverlaySim(sim *simulations.Network) *simulations.Server {
return simulations.NewServer(sim)
}
// var server
func main() {
//cpu optimization
runtime.GOMAXPROCS(runtime.NumCPU())
//run the sim
runOverlaySim()
}
func runOverlaySim() {
//create the simulation network
net := newSimulationNetwork()
//create a http server with it
sim := newOverlaySim(net)
log.Info(fmt.Sprintf("starting simulation server on 0.0.0.0:%d...", httpSimPort))
//start the HTTP server
http.ListenAndServe(fmt.Sprintf(":%d", httpSimPort), sim)
}

View File

@ -0,0 +1,194 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethersphere/swarm/log"
)
var (
nodeCount = 10
)
//This test is used to test the overlay simulation.
//As the simulation is executed via a main, it is easily missed on changes
//An automated test will prevent that
//The test just connects to the simulations, starts the network,
//starts the mocker, gets the number of nodes, and stops it again.
//It also provides a documentation on the steps needed by frontends
//to use the simulations
func TestOverlaySim(t *testing.T) {
//start the simulation
log.Info("Start simulation backend")
//get the simulation networ; needed to subscribe for up events
net := newSimulationNetwork()
//create the overlay simulation
sim := newOverlaySim(net)
//create a http test server with it
srv := httptest.NewServer(sim)
defer srv.Close()
log.Debug("Http simulation server started. Start simulation network")
//start the simulation network (initialization of simulation)
resp, err := http.Post(srv.URL+"/start", "application/json", nil)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Expected Status Code %d, got %d", http.StatusOK, resp.StatusCode)
}
log.Debug("Start mocker")
//start the mocker, needs a node count and an ID
resp, err = http.PostForm(srv.URL+"/mocker/start",
url.Values{
"node-count": {fmt.Sprintf("%d", nodeCount)},
"mocker-type": {simulations.GetMockerList()[0]},
})
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
reason, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
t.Fatalf("Expected Status Code %d, got %d, response body %s", http.StatusOK, resp.StatusCode, string(reason))
}
//variables needed to wait for nodes being up
var upCount int
trigger := make(chan enode.ID)
//wait for all nodes to be up
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
//start watching node up events...
go watchSimEvents(net, ctx, trigger)
//...and wait until all expected up events (nodeCount) have been received
LOOP:
for {
select {
case <-trigger:
//new node up event received, increase counter
upCount++
//all expected node up events received
if upCount == nodeCount {
break LOOP
}
case <-ctx.Done():
t.Fatalf("Timed out waiting for up events")
}
}
//at this point we can query the server
log.Info("Get number of nodes")
//get the number of nodes
resp, err = http.Get(srv.URL + "/nodes")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
//unmarshal number of nodes from JSON response
var nodesArr []simulations.Node
err = json.Unmarshal(b, &nodesArr)
if err != nil {
t.Fatal(err)
}
//check if number of nodes received is same as sent
if len(nodesArr) != nodeCount {
t.Fatal(fmt.Errorf("Expected %d number of nodes, got %d", nodeCount, len(nodesArr)))
}
//need to let it run for a little while, otherwise stopping it immediately can crash due running nodes
//wanting to connect to already stopped nodes
time.Sleep(1 * time.Second)
log.Info("Stop the network")
//stop the network
resp, err = http.Post(srv.URL+"/stop", "application/json", nil)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
log.Info("Reset the network")
//reset the network (removes all nodes and connections)
resp, err = http.Post(srv.URL+"/reset", "application/json", nil)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
}
//watch for events so we know when all nodes are up
func watchSimEvents(net *simulations.Network, ctx context.Context, trigger chan enode.ID) {
events := make(chan *simulations.Event)
sub := net.Events().Subscribe(events)
defer sub.Unsubscribe()
for {
select {
case ev := <-events:
//only catch node up events
if ev.Type == simulations.EventTypeNode {
if ev.Node.Up() {
log.Debug("got node up event", "event", ev, "node", ev.Node.Config.ID)
select {
case trigger <- ev.Node.Config.ID:
case <-ctx.Done():
return
}
}
}
case <-ctx.Done():
return
}
}
}