* simv2: wip * simulation: exec adapter start/stop * simulation: add node status to exec adapter * simulation: initial simulation code * simulation: exec adapter, configure path to executable * simulation: initial docker adapter * simulation: wip kubernetes adapter * simulation: kubernetes adapter proxy * simulation: implement GetAll/StartAll/StopAll * simulation: kuberentes adapter - set env vars and resource limits * simulation: discovery test * simulation: remove port definitions within docker adapter * simulation: simplify wait for healthy loop * simulation: get nat ip addr from interface * simulation: pull docker images automatically * simulation: NodeStatus -> NodeInfo * simulation: move discovery test to example dir * simulation: example snapshot usage * simulation: add goclient specific simulation * simulation: add peer connections to snapshot * simulation: close rpc client * simulation: don't export kubernetes proxy server * simulation: merge simulation code * simulation: don't export nodemap * simulation: rename SimulationSnapshot -> Snapshot * simulation: linting fixes * simulation: add k8s available helper func * simulation: vendor * simulation: fix 'no non-test Go files' when building * simulation: remove errors from interface methods where non were returned * simulation: run getHealthInfo check in parallel
133 lines
3.2 KiB
Go
133 lines
3.2 KiB
Go
package cluster
|
|
|
|
import (
|
|
"encoding/json"
|
|
"flag"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"os"
|
|
"testing"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
"github.com/ethersphere/swarm/simulation"
|
|
colorable "github.com/mattn/go-colorable"
|
|
)
|
|
|
|
var (
|
|
nodes = flag.Int("nodes", 20, "number of nodes to create")
|
|
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
|
rawlog = flag.Bool("rawlog", false, "remove terminal formatting from logs")
|
|
)
|
|
|
|
func init() {
|
|
flag.Parse()
|
|
log.PrintOrigins(true)
|
|
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(!*rawlog))))
|
|
}
|
|
|
|
func TestCluster(t *testing.T) {
|
|
nodeCount := *nodes
|
|
|
|
// Test exec adapter
|
|
t.Run("exec", func(t *testing.T) {
|
|
execPath := "../../../build/bin/swarm"
|
|
|
|
if _, err := os.Stat(execPath); err != nil {
|
|
if os.IsNotExist(err) {
|
|
t.Skip("swarm binary not found. build it before running the test")
|
|
}
|
|
}
|
|
|
|
tmpdir, err := ioutil.TempDir("", "test-sim-exec")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer os.RemoveAll(tmpdir)
|
|
adapter, err := simulation.NewExecAdapter(simulation.ExecAdapterConfig{
|
|
ExecutablePath: execPath,
|
|
BaseDataDirectory: tmpdir,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("could not create exec adapter: %v", err)
|
|
}
|
|
startSimulation(t, adapter, nodeCount)
|
|
})
|
|
|
|
// Test docker adapter
|
|
t.Run("docker", func(t *testing.T) {
|
|
config := simulation.DefaultDockerAdapterConfig()
|
|
if !simulation.IsDockerAvailable(config.DaemonAddr) {
|
|
t.Skip("docker is not available, skipping test")
|
|
}
|
|
config.DockerImage = "ethersphere/swarm:edge"
|
|
adapter, err := simulation.NewDockerAdapter(config)
|
|
if err != nil {
|
|
t.Fatalf("could not create docker adapter: %v", err)
|
|
}
|
|
startSimulation(t, adapter, nodeCount)
|
|
})
|
|
|
|
// Test kubernetes adapter
|
|
t.Run("kubernetes", func(t *testing.T) {
|
|
config := simulation.DefaultKubernetesAdapterConfig()
|
|
if !simulation.IsKubernetesAvailable(config.KubeConfigPath) {
|
|
t.Skip("kubernetes is not available, skipping test")
|
|
}
|
|
config.Namespace = "simulation-test"
|
|
config.DockerImage = "ethersphere/swarm:edge"
|
|
adapter, err := simulation.NewKubernetesAdapter(config)
|
|
if err != nil {
|
|
t.Fatalf("could not create kubernetes adapter: %v", err)
|
|
}
|
|
startSimulation(t, adapter, nodeCount)
|
|
})
|
|
}
|
|
|
|
func startSimulation(t *testing.T, adapter simulation.Adapter, count int) {
|
|
sim := simulation.NewSimulation(adapter)
|
|
|
|
defer sim.StopAll()
|
|
|
|
// Common args used by all nodes
|
|
commonArgs := []string{
|
|
"--bzznetworkid", "599",
|
|
}
|
|
|
|
// Start a cluster with 'count' nodes and a bootnode
|
|
nodes, err := sim.CreateClusterWithBootnode("test", count, commonArgs)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Wait for all nodes to be considered healthy
|
|
err = sim.WaitForHealthyNetwork()
|
|
if err != nil {
|
|
t.Errorf("Failed to get healthy network: %v", err)
|
|
}
|
|
|
|
// Check hive output on the first node
|
|
client, err := sim.RPCClient(nodes[0].Info().ID)
|
|
if err != nil {
|
|
t.Errorf("Failed to get rpc client: %v", err)
|
|
}
|
|
|
|
var hive string
|
|
err = client.Call(&hive, "bzz_hive")
|
|
if err != nil {
|
|
t.Errorf("could not get hive info: %v", err)
|
|
}
|
|
|
|
snap, err := sim.Snapshot()
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
b, err := json.Marshal(snap)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
fmt.Println(string(b))
|
|
|
|
fmt.Println(hive)
|
|
}
|