cmd/swarm: fix resource leaks in tests (#19443)

* swarm/api: fix file descriptor leak in NewTestSwarmServer

Swarm storage (localstore) was not closed. That resulted a
"too many open files" error if `TestClientUploadDownloadRawEncrypted`
was run with `-count 1000`.

* cmd/swarm: speed up StartNewNodes() by parallelization

Reduce cluster startup time from 13s to 7s.

* swarm/api: disable flaky TestClientUploadDownloadRawEncrypted with -race

* swarm/storage: disable flaky TestLDBStoreCollectGarbage (-race)

With race detection turned on the disabled cases often fail with:
"ldbstore_test.go:535: expected surplus chunk 150 to be missing, but got no error"

* cmd/swarm: fix process leak in TestACT and TestSwarmUp

Each test run we start 3 nodes, but we did not terminate them. So
those 3 nodes continued eating up 1.2GB (3.4GB with -race) after test
completion.

6b6c4d1c27 changed how we start clusters
to speed up tests. The changeset merged together test cases
and introduced a global cluster. But "forgot" about termination.

Let's get rid of "global cluster" so we have a clear owner of
termination (some time sacrifice), while leaving subtests to use the
same cluster.
This commit is contained in:
Ferenc Szabo
2019-04-11 12:44:15 +02:00
committed by Viktor Trón
parent 54dfce8af7
commit 26b50e3ebe
6 changed files with 97 additions and 66 deletions

View File

@ -59,15 +59,6 @@ func init() {
const clusterSize = 3
var clusteronce sync.Once
var cluster *testCluster
func initCluster(t *testing.T) {
clusteronce.Do(func() {
cluster = newTestCluster(t, clusterSize)
})
}
func serverFunc(api *api.API) swarmhttp.TestServer {
return swarmhttp.NewServer(api, "")
}
@ -165,10 +156,8 @@ outer:
}
func (c *testCluster) Shutdown() {
for _, node := range c.Nodes {
node.Shutdown()
}
os.RemoveAll(c.TmpDir)
c.Stop()
c.Cleanup()
}
func (c *testCluster) Stop() {
@ -179,16 +168,35 @@ func (c *testCluster) Stop() {
func (c *testCluster) StartNewNodes(t *testing.T, size int) {
c.Nodes = make([]*testNode, 0, size)
errors := make(chan error, size)
nodes := make(chan *testNode, size)
for i := 0; i < size; i++ {
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
if err := os.Mkdir(dir, 0700); err != nil {
t.Fatal(err)
go func(nodeIndex int) {
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", nodeIndex))
if err := os.Mkdir(dir, 0700); err != nil {
errors <- err
return
}
node := newTestNode(t, dir)
node.Name = fmt.Sprintf("swarm%02d", nodeIndex)
nodes <- node
}(i)
}
for i := 0; i < size; i++ {
select {
case node := <-nodes:
c.Nodes = append(c.Nodes, node)
case err := <-errors:
t.Error(err)
}
}
node := newTestNode(t, dir)
node.Name = fmt.Sprintf("swarm%02d", i)
c.Nodes = append(c.Nodes, node)
if t.Failed() {
c.Shutdown()
t.FailNow()
}
}