all: gofmt -w -s (#15419)
This commit is contained in:
committed by
Felix Lange
parent
bfdc0fa362
commit
9619a61024
@ -244,25 +244,25 @@ func TestClientFileList(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := map[string][]string{
|
||||
"": []string{"dir1/", "dir2/", "file1.txt", "file2.txt"},
|
||||
"file": []string{"file1.txt", "file2.txt"},
|
||||
"file1": []string{"file1.txt"},
|
||||
"file2.txt": []string{"file2.txt"},
|
||||
"file12": []string{},
|
||||
"dir": []string{"dir1/", "dir2/"},
|
||||
"dir1": []string{"dir1/"},
|
||||
"dir1/": []string{"dir1/file3.txt", "dir1/file4.txt"},
|
||||
"dir1/file": []string{"dir1/file3.txt", "dir1/file4.txt"},
|
||||
"dir1/file3.txt": []string{"dir1/file3.txt"},
|
||||
"dir1/file34": []string{},
|
||||
"dir2/": []string{"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
|
||||
"dir2/file": []string{"dir2/file5.txt"},
|
||||
"dir2/dir": []string{"dir2/dir3/", "dir2/dir4/"},
|
||||
"dir2/dir3/": []string{"dir2/dir3/file6.txt"},
|
||||
"dir2/dir4/": []string{"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
||||
"dir2/dir4/file": []string{"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
||||
"dir2/dir4/file7.txt": []string{"dir2/dir4/file7.txt"},
|
||||
"dir2/dir4/file78": []string{},
|
||||
"": {"dir1/", "dir2/", "file1.txt", "file2.txt"},
|
||||
"file": {"file1.txt", "file2.txt"},
|
||||
"file1": {"file1.txt"},
|
||||
"file2.txt": {"file2.txt"},
|
||||
"file12": {},
|
||||
"dir": {"dir1/", "dir2/"},
|
||||
"dir1": {"dir1/"},
|
||||
"dir1/": {"dir1/file3.txt", "dir1/file4.txt"},
|
||||
"dir1/file": {"dir1/file3.txt", "dir1/file4.txt"},
|
||||
"dir1/file3.txt": {"dir1/file3.txt"},
|
||||
"dir1/file34": {},
|
||||
"dir2/": {"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
|
||||
"dir2/file": {"dir2/file5.txt"},
|
||||
"dir2/dir": {"dir2/dir3/", "dir2/dir4/"},
|
||||
"dir2/dir3/": {"dir2/dir3/file6.txt"},
|
||||
"dir2/dir4/": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
||||
"dir2/dir4/file": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
||||
"dir2/dir4/file7.txt": {"dir2/dir4/file7.txt"},
|
||||
"dir2/dir4/file78": {},
|
||||
}
|
||||
for prefix, expected := range tests {
|
||||
actual := ls(prefix)
|
||||
|
@ -50,7 +50,6 @@ data_{i} := size(subtree_{i}) || key_{j} || key_{j+1} .... || key_{j+n-1}
|
||||
The underlying hash function is configurable
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
Tree chunker is a concrete implementation of data chunking.
|
||||
This chunker works in a simple way, it builds a tree out of the document so that each node either represents a chunk of real data or a chunk of data representing an branching non-leaf node of the tree. In particular each such non-leaf chunk will represent is a concatenation of the hash of its respective children. This scheme simultaneously guarantees data integrity as well as self addressing. Abstract nodes are transparent since their represented size component is strictly greater than their maximum data size, since they encode a subtree.
|
||||
@ -61,17 +60,17 @@ The hashing itself does use extra copies and allocation though, since it does ne
|
||||
|
||||
var (
|
||||
errAppendOppNotSuported = errors.New("Append operation not supported")
|
||||
errOperationTimedOut = errors.New("operation timed out")
|
||||
errOperationTimedOut = errors.New("operation timed out")
|
||||
)
|
||||
|
||||
type TreeChunker struct {
|
||||
branches int64
|
||||
hashFunc SwarmHasher
|
||||
// calculated
|
||||
hashSize int64 // self.hashFunc.New().Size()
|
||||
chunkSize int64 // hashSize* branches
|
||||
workerCount int64 // the number of worker routines used
|
||||
workerLock sync.RWMutex // lock for the worker count
|
||||
hashSize int64 // self.hashFunc.New().Size()
|
||||
chunkSize int64 // hashSize* branches
|
||||
workerCount int64 // the number of worker routines used
|
||||
workerLock sync.RWMutex // lock for the worker count
|
||||
}
|
||||
|
||||
func NewTreeChunker(params *ChunkerParams) (self *TreeChunker) {
|
||||
@ -124,7 +123,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
|
||||
panic("chunker must be initialised")
|
||||
}
|
||||
|
||||
|
||||
jobC := make(chan *hashJob, 2*ChunkProcessors)
|
||||
wg := &sync.WaitGroup{}
|
||||
errC := make(chan error)
|
||||
@ -164,7 +162,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
|
||||
close(errC)
|
||||
}()
|
||||
|
||||
|
||||
defer close(quitC)
|
||||
select {
|
||||
case err := <-errC:
|
||||
@ -172,7 +169,7 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
|
||||
return nil, err
|
||||
}
|
||||
case <-time.NewTimer(splitTimeout).C:
|
||||
return nil,errOperationTimedOut
|
||||
return nil, errOperationTimedOut
|
||||
}
|
||||
|
||||
return key, nil
|
||||
|
@ -123,7 +123,7 @@ type PyramidChunker struct {
|
||||
hashSize int64
|
||||
branches int64
|
||||
workerCount int64
|
||||
workerLock sync.RWMutex
|
||||
workerLock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewPyramidChunker(params *ChunkerParams) (self *PyramidChunker) {
|
||||
@ -634,4 +634,4 @@ func (self *PyramidChunker) enqueueDataChunk(chunkData []byte, size uint64, pare
|
||||
|
||||
return pkey
|
||||
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user