swarm: remove unused/dead code (#18351)
This commit is contained in:
committed by
GitHub
parent
335760bf06
commit
9e9fc87e70
@ -65,10 +65,6 @@ If all is well it is possible to implement this by simply composing readers so t
|
||||
The hashing itself does use extra copies and allocation though, since it does need it.
|
||||
*/
|
||||
|
||||
var (
|
||||
errAppendOppNotSuported = errors.New("Append operation not supported")
|
||||
)
|
||||
|
||||
type ChunkerParams struct {
|
||||
chunkSize int64
|
||||
hashSize int64
|
||||
@ -99,7 +95,6 @@ type TreeChunker struct {
|
||||
ctx context.Context
|
||||
|
||||
branches int64
|
||||
hashFunc SwarmHasher
|
||||
dataSize int64
|
||||
data io.Reader
|
||||
// calculated
|
||||
@ -365,10 +360,6 @@ func (tc *TreeChunker) runWorker(ctx context.Context) {
|
||||
}()
|
||||
}
|
||||
|
||||
func (tc *TreeChunker) Append() (Address, func(), error) {
|
||||
return nil, nil, errAppendOppNotSuported
|
||||
}
|
||||
|
||||
// LazyChunkReader implements LazySectionReader
|
||||
type LazyChunkReader struct {
|
||||
ctx context.Context
|
||||
@ -411,7 +402,6 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
|
||||
|
||||
log.Debug("lazychunkreader.size", "addr", r.addr)
|
||||
if r.chunkData == nil {
|
||||
|
||||
startTime := time.Now()
|
||||
chunkData, err := r.getter.Get(cctx, Reference(r.addr))
|
||||
if err != nil {
|
||||
@ -420,13 +410,8 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
|
||||
}
|
||||
metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime)
|
||||
r.chunkData = chunkData
|
||||
s := r.chunkData.Size()
|
||||
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
|
||||
if s < 0 {
|
||||
return 0, errors.New("corrupt size")
|
||||
}
|
||||
return int64(s), nil
|
||||
}
|
||||
|
||||
s := r.chunkData.Size()
|
||||
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
|
||||
|
||||
|
@ -64,16 +64,6 @@ func (db *LDBDatabase) Delete(key []byte) error {
|
||||
return db.db.Delete(key, nil)
|
||||
}
|
||||
|
||||
func (db *LDBDatabase) LastKnownTD() []byte {
|
||||
data, _ := db.Get([]byte("LTD"))
|
||||
|
||||
if len(data) == 0 {
|
||||
data = []byte{0x0}
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func (db *LDBDatabase) NewIterator() iterator.Iterator {
|
||||
metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1)
|
||||
|
||||
|
@ -23,23 +23,15 @@ import (
|
||||
const (
|
||||
ErrInit = iota
|
||||
ErrNotFound
|
||||
ErrIO
|
||||
ErrUnauthorized
|
||||
ErrInvalidValue
|
||||
ErrDataOverflow
|
||||
ErrNothingToReturn
|
||||
ErrCorruptData
|
||||
ErrInvalidSignature
|
||||
ErrNotSynced
|
||||
ErrPeriodDepth
|
||||
ErrCnt
|
||||
)
|
||||
|
||||
var (
|
||||
ErrChunkNotFound = errors.New("chunk not found")
|
||||
ErrFetching = errors.New("chunk still fetching")
|
||||
ErrChunkInvalid = errors.New("invalid chunk")
|
||||
ErrChunkForward = errors.New("cannot forward")
|
||||
ErrChunkUnavailable = errors.New("chunk unavailable")
|
||||
ErrChunkTimeout = errors.New("timeout")
|
||||
ErrChunkNotFound = errors.New("chunk not found")
|
||||
ErrChunkInvalid = errors.New("invalid chunk")
|
||||
)
|
||||
|
@ -248,10 +248,6 @@ func U64ToBytes(val uint64) []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) {
|
||||
index.Access = s.accessCnt
|
||||
}
|
||||
|
||||
func getIndexKey(hash Address) []byte {
|
||||
hashSize := len(hash)
|
||||
key := make([]byte, hashSize+1)
|
||||
@ -777,18 +773,6 @@ func (s *LDBStore) BinIndex(po uint8) uint64 {
|
||||
return s.bucketCnt[po]
|
||||
}
|
||||
|
||||
func (s *LDBStore) Size() uint64 {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
return s.entryCnt
|
||||
}
|
||||
|
||||
func (s *LDBStore) CurrentStorageIndex() uint64 {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
return s.dataIdx
|
||||
}
|
||||
|
||||
// Put adds a chunk to the database, adding indices and incrementing global counters.
|
||||
// If it already exists, it merely increments the access count of the existing entry.
|
||||
// Is thread safe
|
||||
@ -810,11 +794,11 @@ func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
|
||||
batch := s.batch
|
||||
|
||||
log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
|
||||
idata, err := s.db.Get(ikey)
|
||||
_, err := s.db.Get(ikey)
|
||||
if err != nil {
|
||||
s.doPut(chunk, &index, po)
|
||||
}
|
||||
idata = encodeIndex(&index)
|
||||
idata := encodeIndex(&index)
|
||||
s.batch.Put(ikey, idata)
|
||||
|
||||
// add the access-chunkindex index for garbage collection
|
||||
|
@ -79,14 +79,6 @@ func testPoFunc(k Address) (ret uint8) {
|
||||
return uint8(Proximity(basekey, k[:]))
|
||||
}
|
||||
|
||||
func (db *testDbStore) close() {
|
||||
db.Close()
|
||||
err := os.RemoveAll(db.dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) {
|
||||
db, cleanup, err := newTestDbStore(mock, true)
|
||||
defer cleanup()
|
||||
@ -453,7 +445,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
|
||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
ret, err := ldb.Get(nil, chunks[i].Address())
|
||||
ret, err := ldb.Get(context.TODO(), chunks[i].Address())
|
||||
|
||||
if i%2 == 0 {
|
||||
// expect even chunks to be missing
|
||||
|
@ -103,13 +103,6 @@ type Exporter interface {
|
||||
Export(w io.Writer) (n int, err error)
|
||||
}
|
||||
|
||||
// ImportExporter is an interface for importing and exporting
|
||||
// mock store data to and from a tar archive.
|
||||
type ImportExporter interface {
|
||||
Importer
|
||||
Exporter
|
||||
}
|
||||
|
||||
// ExportedChunk is the structure that is saved in tar archive for
|
||||
// each chunk as JSON-encoded bytes.
|
||||
type ExportedChunk struct {
|
||||
|
@ -71,11 +71,6 @@ const (
|
||||
splitTimeout = time.Minute * 5
|
||||
)
|
||||
|
||||
const (
|
||||
DataChunk = 0
|
||||
TreeChunk = 1
|
||||
)
|
||||
|
||||
type PyramidSplitterParams struct {
|
||||
SplitterParams
|
||||
getter Getter
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -35,50 +34,10 @@ import (
|
||||
const MaxPO = 16
|
||||
const AddressLength = 32
|
||||
|
||||
type Hasher func() hash.Hash
|
||||
type SwarmHasher func() SwarmHash
|
||||
|
||||
// Peer is the recorded as Source on the chunk
|
||||
// should probably not be here? but network should wrap chunk object
|
||||
type Peer interface{}
|
||||
|
||||
type Address []byte
|
||||
|
||||
func (a Address) Size() uint {
|
||||
return uint(len(a))
|
||||
}
|
||||
|
||||
func (a Address) isEqual(y Address) bool {
|
||||
return bytes.Equal(a, y)
|
||||
}
|
||||
|
||||
func (a Address) bits(i, j uint) uint {
|
||||
ii := i >> 3
|
||||
jj := i & 7
|
||||
if ii >= a.Size() {
|
||||
return 0
|
||||
}
|
||||
|
||||
if jj+j <= 8 {
|
||||
return uint((a[ii] >> jj) & ((1 << j) - 1))
|
||||
}
|
||||
|
||||
res := uint(a[ii] >> jj)
|
||||
jj = 8 - jj
|
||||
j -= jj
|
||||
for j != 0 {
|
||||
ii++
|
||||
if j < 8 {
|
||||
res += uint(a[ii]&((1<<j)-1)) << jj
|
||||
return res
|
||||
}
|
||||
res += uint(a[ii]) << jj
|
||||
jj += 8
|
||||
j -= 8
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Proximity(x, y) returns the proximity order of the MSB distance between x and y
|
||||
//
|
||||
// The distance metric MSB(x, y) of two equal length byte sequences x an y is the
|
||||
@ -112,10 +71,6 @@ func Proximity(one, other []byte) (ret int) {
|
||||
return MaxPO
|
||||
}
|
||||
|
||||
func IsZeroAddr(addr Address) bool {
|
||||
return len(addr) == 0 || bytes.Equal(addr, ZeroAddr)
|
||||
}
|
||||
|
||||
var ZeroAddr = Address(common.Hash{}.Bytes())
|
||||
|
||||
func MakeHashFunc(hash string) SwarmHasher {
|
||||
@ -304,10 +259,6 @@ func (c ChunkData) Size() uint64 {
|
||||
return binary.LittleEndian.Uint64(c[:8])
|
||||
}
|
||||
|
||||
func (c ChunkData) Data() []byte {
|
||||
return c[8:]
|
||||
}
|
||||
|
||||
type ChunkValidator interface {
|
||||
Validate(chunk Chunk) bool
|
||||
}
|
||||
|
Reference in New Issue
Block a user