swarm: ctx propagation; bmt fixes; pss generic notification framework (#17150)

* cmd/swarm: minor cli flag text adjustments

* swarm/api/http: sticky footer for swarm landing page using flex

* swarm/api/http: sticky footer for error pages and fix for multiple choices

* cmd/swarm, swarm/storage, swarm: fix  mingw on windows test issues

* cmd/swarm: update description of swarm cmd

* swarm: added network ID test

* cmd/swarm: support for smoke tests on the production swarm cluster

* cmd/swarm/swarm-smoke: simplify cluster logic as per suggestion

* swarm: propagate ctx to internal apis (#754)

* swarm/metrics: collect disk measurements

* swarm/bmt: fix io.Writer interface

  * Write now tolerates arbitrary variable buffers
  * added variable buffer tests
  * Write loop and finalise optimisation
  * refactor / rename
  * add tests for empty input

* swarm/pss: (UPDATE) Generic notifications package (#744)

swarm/pss: Generic package for creating pss notification svcs

* swarm: Adding context to more functions

* swarm/api: change colour of landing page in templates

* swarm/api: change landing page to react to enter keypress
This commit is contained in:
Anton Evangelatov
2018-07-09 14:11:49 +02:00
committed by Balint Gabor
parent 30bdf817a0
commit b3711af051
49 changed files with 1630 additions and 488 deletions

View File

@@ -250,7 +250,7 @@ func (r *TestRegistry) APIs() []rpc.API {
}
func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
r, _ := fileStore.Retrieve(hash)
r, _ := fileStore.Retrieve(context.TODO(), hash)
buf := make([]byte, 1024)
var n int
var total int64

View File

@@ -345,9 +345,13 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
// here we distribute chunks of a random file into Stores of nodes 1 to nodes
rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
size := chunkCount * chunkSize
fileHash, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
ctx := context.TODO()
fileHash, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
// wait until all chunks stored
wait()
if err != nil {
t.Fatal(err.Error())
}
err = wait(ctx)
if err != nil {
t.Fatal(err.Error())
}
@@ -627,9 +631,13 @@ Loop:
hashes := make([]storage.Address, chunkCount)
for i := 0; i < chunkCount; i++ {
// create actual size real chunks
hash, wait, err := remoteFileStore.Store(io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
ctx := context.TODO()
hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
if err != nil {
b.Fatalf("expected no error. got %v", err)
}
// wait until all chunks stored
wait()
err = wait(ctx)
if err != nil {
b.Fatalf("expected no error. got %v", err)
}

View File

@@ -117,8 +117,12 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
fileStore := storage.NewFileStore(sim.Stores[0], storage.NewFileStoreParams())
size := chunkCount * chunkSize
_, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
wait()
ctx := context.TODO()
_, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
if err != nil {
t.Fatal(err)
}
err = wait(ctx)
if err != nil {
t.Fatal(err)
}

View File

@@ -410,7 +410,7 @@ func runFileRetrievalTest(nodeCount int) error {
fileStore := registries[id].fileStore
//check all chunks
for i, hash := range conf.hashes {
reader, _ := fileStore.Retrieve(hash)
reader, _ := fileStore.Retrieve(context.TODO(), hash)
//check that we can read the file size and that it corresponds to the generated file size
if s, err := reader.Size(nil); err != nil || s != int64(len(randomFiles[i])) {
allSuccess = false
@@ -697,7 +697,7 @@ func runRetrievalTest(chunkCount int, nodeCount int) error {
fileStore := registries[id].fileStore
//check all chunks
for _, chnk := range conf.hashes {
reader, _ := fileStore.Retrieve(chnk)
reader, _ := fileStore.Retrieve(context.TODO(), chnk)
//assuming that reading the Size of the chunk is enough to know we found it
if s, err := reader.Size(nil); err != nil || s != chunkSize {
allSuccess = false
@@ -765,9 +765,13 @@ func uploadFilesToNodes(nodes []*simulations.Node) ([]storage.Address, []string,
return nil, nil, err
}
//store it (upload it) on the FileStore
rk, wait, err := fileStore.Store(strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false)
ctx := context.TODO()
rk, wait, err := fileStore.Store(ctx, strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false)
log.Debug("Uploaded random string file to node")
wait()
if err != nil {
return nil, nil, err
}
err = wait(ctx)
if err != nil {
return nil, nil, err
}

View File

@@ -581,8 +581,12 @@ func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int) ([]storage.
fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
var rootAddrs []storage.Address
for i := 0; i < chunkCount; i++ {
rk, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
wait()
ctx := context.TODO()
rk, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
if err != nil {
return nil, err
}
err = wait(ctx)
if err != nil {
return nil, err
}

View File

@@ -202,9 +202,12 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
// here we distribute chunks of a random file into stores 1...nodes
rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
size := chunkCount * chunkSize
_, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
_, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
if err != nil {
t.Fatal(err.Error())
}
// need to wait cos we then immediately collect the relevant bin content
wait()
wait(ctx)
if err != nil {
t.Fatal(err.Error())
}