swarm: codebase split from go-ethereum (#1405)

This commit is contained in:
Rafael Matias
2019-06-03 12:28:18 +02:00
committed by Anton Evangelatov
parent 7a22da98b9
commit b046760db1
1540 changed files with 4654 additions and 129393 deletions

311
shed/db.go Normal file
View File

@@ -0,0 +1,311 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package shed provides a simple abstraction components to compose
// more complex operations on storage data organized in fields and indexes.
//
// Only type which holds logical information about swarm storage chunks data
// and metadata is Item. This part is not generalized mostly for
// performance reasons.
package shed
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethersphere/swarm/log"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
)
const (
openFileLimit = 128 // The limit for LevelDB OpenFilesCacheCapacity.
writePauseWarningThrottler = 1 * time.Minute
)
// DB provides abstractions over LevelDB in order to
// implement complex structures using fields and ordered indexes.
// It provides a schema functionality to store fields and indexes
// information about naming and types.
type DB struct {
ldb *leveldb.DB
quit chan struct{} // Quit channel to stop the metrics collection before closing the database
}
// NewDB constructs a new DB and validates the schema
// if it exists in database on the given path.
// metricsPrefix is used for metrics collection for the given DB.
func NewDB(path string, metricsPrefix string) (db *DB, err error) {
ldb, err := leveldb.OpenFile(path, &opt.Options{
OpenFilesCacheCapacity: openFileLimit,
})
if err != nil {
return nil, err
}
db = &DB{
ldb: ldb,
}
if _, err = db.getSchema(); err != nil {
if err == leveldb.ErrNotFound {
// save schema with initialized default fields
if err = db.putSchema(schema{
Fields: make(map[string]fieldSpec),
Indexes: make(map[byte]indexSpec),
}); err != nil {
return nil, err
}
} else {
return nil, err
}
}
// Create a quit channel for the periodic metrics collector and run it
db.quit = make(chan struct{})
go db.meter(metricsPrefix, 10*time.Second)
return db, nil
}
// Put wraps LevelDB Put method to increment metrics counter.
func (db *DB) Put(key []byte, value []byte) (err error) {
err = db.ldb.Put(key, value, nil)
if err != nil {
metrics.GetOrRegisterCounter("DB.putFail", nil).Inc(1)
return err
}
metrics.GetOrRegisterCounter("DB.put", nil).Inc(1)
return nil
}
// Get wraps LevelDB Get method to increment metrics counter.
func (db *DB) Get(key []byte) (value []byte, err error) {
value, err = db.ldb.Get(key, nil)
if err != nil {
if err == leveldb.ErrNotFound {
metrics.GetOrRegisterCounter("DB.getNotFound", nil).Inc(1)
} else {
metrics.GetOrRegisterCounter("DB.getFail", nil).Inc(1)
}
return nil, err
}
metrics.GetOrRegisterCounter("DB.get", nil).Inc(1)
return value, nil
}
// Has wraps LevelDB Has method to increment metrics counter.
func (db *DB) Has(key []byte) (yes bool, err error) {
yes, err = db.ldb.Has(key, nil)
if err != nil {
metrics.GetOrRegisterCounter("DB.hasFail", nil).Inc(1)
return false, err
}
metrics.GetOrRegisterCounter("DB.has", nil).Inc(1)
return yes, nil
}
// Delete wraps LevelDB Delete method to increment metrics counter.
func (db *DB) Delete(key []byte) (err error) {
err = db.ldb.Delete(key, nil)
if err != nil {
metrics.GetOrRegisterCounter("DB.deleteFail", nil).Inc(1)
return err
}
metrics.GetOrRegisterCounter("DB.delete", nil).Inc(1)
return nil
}
// NewIterator wraps LevelDB NewIterator method to increment metrics counter.
func (db *DB) NewIterator() iterator.Iterator {
metrics.GetOrRegisterCounter("DB.newiterator", nil).Inc(1)
return db.ldb.NewIterator(nil, nil)
}
// WriteBatch wraps LevelDB Write method to increment metrics counter.
func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) {
err = db.ldb.Write(batch, nil)
if err != nil {
metrics.GetOrRegisterCounter("DB.writebatchFail", nil).Inc(1)
return err
}
metrics.GetOrRegisterCounter("DB.writebatch", nil).Inc(1)
return nil
}
// Close closes LevelDB database.
func (db *DB) Close() (err error) {
close(db.quit)
return db.ldb.Close()
}
func (db *DB) meter(prefix string, refresh time.Duration) {
// Meter for measuring the total time spent in database compaction
compTimeMeter := metrics.NewRegisteredMeter(prefix+"compact/time", nil)
// Meter for measuring the data read during compaction
compReadMeter := metrics.NewRegisteredMeter(prefix+"compact/input", nil)
// Meter for measuring the data written during compaction
compWriteMeter := metrics.NewRegisteredMeter(prefix+"compact/output", nil)
// Meter for measuring the write delay number due to database compaction
writeDelayMeter := metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
// Meter for measuring the write delay duration due to database compaction
writeDelayNMeter := metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
// Meter for measuring the effective amount of data read
diskReadMeter := metrics.NewRegisteredMeter(prefix+"disk/read", nil)
// Meter for measuring the effective amount of data written
diskWriteMeter := metrics.NewRegisteredMeter(prefix+"disk/write", nil)
// Create the counters to store current and previous compaction values
compactions := make([][]float64, 2)
for i := 0; i < 2; i++ {
compactions[i] = make([]float64, 3)
}
// Create storage for iostats.
var iostats [2]float64
// Create storage and warning log tracer for write delay.
var (
delaystats [2]int64
lastWritePaused time.Time
)
// Iterate ad infinitum and collect the stats
for i := 1; true; i++ {
// Retrieve the database stats
stats, err := db.ldb.GetProperty("leveldb.stats")
if err != nil {
log.Error("Failed to read database stats", "err", err)
continue
}
// Find the compaction table, skip the header
lines := strings.Split(stats, "\n")
for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
lines = lines[1:]
}
if len(lines) <= 3 {
log.Error("Compaction table not found")
continue
}
lines = lines[3:]
// Iterate over all the table rows, and accumulate the entries
for j := 0; j < len(compactions[i%2]); j++ {
compactions[i%2][j] = 0
}
for _, line := range lines {
parts := strings.Split(line, "|")
if len(parts) != 6 {
break
}
for idx, counter := range parts[3:] {
value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
if err != nil {
log.Error("Compaction entry parsing failed", "err", err)
continue
}
compactions[i%2][idx] += value
}
}
// Update all the requested meters
if compTimeMeter != nil {
compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
}
if compReadMeter != nil {
compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
}
if compWriteMeter != nil {
compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
}
// Retrieve the write delay statistic
writedelay, err := db.ldb.GetProperty("leveldb.writedelay")
if err != nil {
log.Error("Failed to read database write delay statistic", "err", err)
continue
}
var (
delayN int64
delayDuration string
duration time.Duration
paused bool
)
if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
log.Error("Write delay statistic not found")
continue
}
duration, err = time.ParseDuration(delayDuration)
if err != nil {
log.Error("Failed to parse delay duration", "err", err)
continue
}
if writeDelayNMeter != nil {
writeDelayNMeter.Mark(delayN - delaystats[0])
}
if writeDelayMeter != nil {
writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
}
// If a warning that db is performing compaction has been displayed, any subsequent
// warnings will be withheld for one minute not to overwhelm the user.
if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) {
log.Warn("Database compacting, degraded performance")
lastWritePaused = time.Now()
}
delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
// Retrieve the database iostats.
ioStats, err := db.ldb.GetProperty("leveldb.iostats")
if err != nil {
log.Error("Failed to read database iostats", "err", err)
continue
}
var nRead, nWrite float64
parts := strings.Split(ioStats, " ")
if len(parts) < 2 {
log.Error("Bad syntax of ioStats", "ioStats", ioStats)
continue
}
if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
log.Error("Bad syntax of read entry", "entry", parts[0])
continue
}
if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
log.Error("Bad syntax of write entry", "entry", parts[1])
continue
}
if diskReadMeter != nil {
diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
}
if diskWriteMeter != nil {
diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
}
iostats[0], iostats[1] = nRead, nWrite
// Sleep a bit, then repeat the stats collection
select {
case <-db.quit:
// Quit requesting, stop hammering the database
return
case <-time.After(refresh):
// Timeout, gather a new set of stats
}
}
}

112
shed/db_test.go Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"io/ioutil"
"os"
"testing"
)
// TestNewDB constructs a new DB
// and validates if the schema is initialized properly.
func TestNewDB(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
s, err := db.getSchema()
if err != nil {
t.Fatal(err)
}
if s.Fields == nil {
t.Error("schema fields are empty")
}
if len(s.Fields) != 0 {
t.Errorf("got schema fields length %v, want %v", len(s.Fields), 0)
}
if s.Indexes == nil {
t.Error("schema indexes are empty")
}
if len(s.Indexes) != 0 {
t.Errorf("got schema indexes length %v, want %v", len(s.Indexes), 0)
}
}
// TestDB_persistence creates one DB, saves a field and closes that DB.
// Then, it constructs another DB and trues to retrieve the saved value.
func TestDB_persistence(t *testing.T) {
dir, err := ioutil.TempDir("", "shed-test-persistence")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
db, err := NewDB(dir, "")
if err != nil {
t.Fatal(err)
}
stringField, err := db.NewStringField("preserve-me")
if err != nil {
t.Fatal(err)
}
want := "persistent value"
err = stringField.Put(want)
if err != nil {
t.Fatal(err)
}
err = db.Close()
if err != nil {
t.Fatal(err)
}
db2, err := NewDB(dir, "")
if err != nil {
t.Fatal(err)
}
stringField2, err := db2.NewStringField("preserve-me")
if err != nil {
t.Fatal(err)
}
got, err := stringField2.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got string %q, want %q", got, want)
}
}
// newTestDB is a helper function that constructs a
// temporary database and returns a cleanup function that must
// be called to remove the data.
func newTestDB(t *testing.T) (db *DB, cleanupFunc func()) {
t.Helper()
dir, err := ioutil.TempDir("", "shed-test")
if err != nil {
t.Fatal(err)
}
db, err = NewDB(dir, "")
if err != nil {
os.RemoveAll(dir)
t.Fatal(err)
}
return db, func() {
db.Close()
os.RemoveAll(dir)
}
}

332
shed/example_store_test.go Normal file
View File

@@ -0,0 +1,332 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed_test
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io/ioutil"
"log"
"os"
"time"
"github.com/ethersphere/swarm/shed"
"github.com/ethersphere/swarm/storage"
"github.com/syndtr/goleveldb/leveldb"
)
// Store holds fields and indexes (including their encoding functions)
// and defines operations on them by composing data from them.
// It implements storage.ChunkStore interface.
// It is just an example without any support for parallel operations
// or real world implementation.
type Store struct {
db *shed.DB
// fields and indexes
schemaName shed.StringField
sizeCounter shed.Uint64Field
accessCounter shed.Uint64Field
retrievalIndex shed.Index
accessIndex shed.Index
gcIndex shed.Index
}
// New returns new Store. All fields and indexes are initialized
// and possible conflicts with schema from existing database is checked
// automatically.
func New(path string) (s *Store, err error) {
db, err := shed.NewDB(path, "")
if err != nil {
return nil, err
}
s = &Store{
db: db,
}
// Identify current storage schema by arbitrary name.
s.schemaName, err = db.NewStringField("schema-name")
if err != nil {
return nil, err
}
// Global ever incrementing index of chunk accesses.
s.accessCounter, err = db.NewUint64Field("access-counter")
if err != nil {
return nil, err
}
// Index storing actual chunk address, data and store timestamp.
s.retrievalIndex, err = db.NewIndex("Address->StoreTimestamp|Data", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
value = append(b, fields.Data...)
return value, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
e.Data = value[8:]
return e, nil
},
})
if err != nil {
return nil, err
}
// Index storing access timestamp for a particular address.
// It is needed in order to update gc index keys for iteration order.
s.accessIndex, err = db.NewIndex("Address->AccessTimestamp", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fields.AccessTimestamp))
return b, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(value))
return e, nil
},
})
if err != nil {
return nil, err
}
// Index with keys ordered by access timestamp for garbage collection prioritization.
s.gcIndex, err = db.NewIndex("AccessTimestamp|StoredTimestamp|Address->nil", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
b := make([]byte, 16, 16+len(fields.Address))
binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
key = append(b, fields.Address...)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[8:16]))
e.Address = key[16:]
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
return nil, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
return e, nil
},
})
if err != nil {
return nil, err
}
return s, nil
}
// Put stores the chunk and sets it store timestamp.
func (s *Store) Put(_ context.Context, ch storage.Chunk) (err error) {
return s.retrievalIndex.Put(shed.Item{
Address: ch.Address(),
Data: ch.Data(),
StoreTimestamp: time.Now().UTC().UnixNano(),
})
}
// Get retrieves a chunk with the provided address.
// It updates access and gc indexes by removing the previous
// items from them and adding new items as keys of index entries
// are changed.
func (s *Store) Get(_ context.Context, addr storage.Address) (c storage.Chunk, err error) {
batch := new(leveldb.Batch)
// Get the chunk data and storage timestamp.
item, err := s.retrievalIndex.Get(shed.Item{
Address: addr,
})
if err != nil {
if err == leveldb.ErrNotFound {
return nil, storage.ErrChunkNotFound
}
return nil, err
}
// Get the chunk access timestamp.
accessItem, err := s.accessIndex.Get(shed.Item{
Address: addr,
})
switch err {
case nil:
// Remove gc index entry if access timestamp is found.
err = s.gcIndex.DeleteInBatch(batch, shed.Item{
Address: item.Address,
StoreTimestamp: accessItem.AccessTimestamp,
AccessTimestamp: item.StoreTimestamp,
})
if err != nil {
return nil, err
}
case leveldb.ErrNotFound:
// Access timestamp is not found. Do not do anything.
// This is the firs get request.
default:
return nil, err
}
// Specify new access timestamp
accessTimestamp := time.Now().UTC().UnixNano()
// Put new access timestamp in access index.
err = s.accessIndex.PutInBatch(batch, shed.Item{
Address: addr,
AccessTimestamp: accessTimestamp,
})
if err != nil {
return nil, err
}
// Put new access timestamp in gc index.
err = s.gcIndex.PutInBatch(batch, shed.Item{
Address: item.Address,
AccessTimestamp: accessTimestamp,
StoreTimestamp: item.StoreTimestamp,
})
if err != nil {
return nil, err
}
// Increment access counter.
// Currently this information is not used anywhere.
_, err = s.accessCounter.IncInBatch(batch)
if err != nil {
return nil, err
}
// Write the batch.
err = s.db.WriteBatch(batch)
if err != nil {
return nil, err
}
// Return the chunk.
return storage.NewChunk(item.Address, item.Data), nil
}
// CollectGarbage is an example of index iteration.
// It provides no reliable garbage collection functionality.
func (s *Store) CollectGarbage() (err error) {
const maxTrashSize = 100
maxRounds := 10 // arbitrary number, needs to be calculated
// Run a few gc rounds.
for roundCount := 0; roundCount < maxRounds; roundCount++ {
var garbageCount int
// New batch for a new cg round.
trash := new(leveldb.Batch)
// Iterate through all index items and break when needed.
err = s.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) {
// Remove the chunk.
err = s.retrievalIndex.DeleteInBatch(trash, item)
if err != nil {
return false, err
}
// Remove the element in gc index.
err = s.gcIndex.DeleteInBatch(trash, item)
if err != nil {
return false, err
}
// Remove the relation in access index.
err = s.accessIndex.DeleteInBatch(trash, item)
if err != nil {
return false, err
}
garbageCount++
if garbageCount >= maxTrashSize {
return true, nil
}
return false, nil
}, nil)
if err != nil {
return err
}
if garbageCount == 0 {
return nil
}
err = s.db.WriteBatch(trash)
if err != nil {
return err
}
}
return nil
}
// GetSchema is an example of retrieveing the most simple
// string from a database field.
func (s *Store) GetSchema() (name string, err error) {
name, err = s.schemaName.Get()
if err == leveldb.ErrNotFound {
return "", nil
}
return name, err
}
// GetSchema is an example of storing the most simple
// string in a database field.
func (s *Store) PutSchema(name string) (err error) {
return s.schemaName.Put(name)
}
// Close closes the underlying database.
func (s *Store) Close() error {
return s.db.Close()
}
// Example_store constructs a simple storage implementation using shed package.
func Example_store() {
dir, err := ioutil.TempDir("", "ephemeral")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(dir)
s, err := New(dir)
if err != nil {
log.Fatal(err)
}
defer s.Close()
ch := storage.GenerateRandomChunk(1024)
err = s.Put(context.Background(), ch)
if err != nil {
log.Fatal(err)
}
got, err := s.Get(context.Background(), ch.Address())
if err != nil {
log.Fatal(err)
}
fmt.Println(bytes.Equal(got.Data(), ch.Data()))
//Output: true
}

66
shed/field_string.go Normal file
View File

@@ -0,0 +1,66 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"github.com/syndtr/goleveldb/leveldb"
)
// StringField is the most simple field implementation
// that stores an arbitrary string under a specific LevelDB key.
type StringField struct {
db *DB
key []byte
}
// NewStringField retruns a new Instance of StringField.
// It validates its name and type against the database schema.
func (db *DB) NewStringField(name string) (f StringField, err error) {
key, err := db.schemaFieldKey(name, "string")
if err != nil {
return f, err
}
return StringField{
db: db,
key: key,
}, nil
}
// Get returns a string value from database.
// If the value is not found, an empty string is returned
// an no error.
func (f StringField) Get() (val string, err error) {
b, err := f.db.Get(f.key)
if err != nil {
if err == leveldb.ErrNotFound {
return "", nil
}
return "", err
}
return string(b), nil
}
// Put stores a string in the database.
func (f StringField) Put(val string) (err error) {
return f.db.Put(f.key, []byte(val))
}
// PutInBatch stores a string in a batch that can be
// saved later in database.
func (f StringField) PutInBatch(batch *leveldb.Batch, val string) {
batch.Put(f.key, []byte(val))
}

110
shed/field_string_test.go Normal file
View File

@@ -0,0 +1,110 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"testing"
"github.com/syndtr/goleveldb/leveldb"
)
// TestStringField validates put and get operations
// of the StringField.
func TestStringField(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
simpleString, err := db.NewStringField("simple-string")
if err != nil {
t.Fatal(err)
}
t.Run("get empty", func(t *testing.T) {
got, err := simpleString.Get()
if err != nil {
t.Fatal(err)
}
want := ""
if got != want {
t.Errorf("got string %q, want %q", got, want)
}
})
t.Run("put", func(t *testing.T) {
want := "simple string value"
err = simpleString.Put(want)
if err != nil {
t.Fatal(err)
}
got, err := simpleString.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got string %q, want %q", got, want)
}
t.Run("overwrite", func(t *testing.T) {
want := "overwritten string value"
err = simpleString.Put(want)
if err != nil {
t.Fatal(err)
}
got, err := simpleString.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got string %q, want %q", got, want)
}
})
})
t.Run("put in batch", func(t *testing.T) {
batch := new(leveldb.Batch)
want := "simple string batch value"
simpleString.PutInBatch(batch, want)
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := simpleString.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got string %q, want %q", got, want)
}
t.Run("overwrite", func(t *testing.T) {
batch := new(leveldb.Batch)
want := "overwritten string batch value"
simpleString.PutInBatch(batch, want)
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := simpleString.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got string %q, want %q", got, want)
}
})
})
}

71
shed/field_struct.go Normal file
View File

@@ -0,0 +1,71 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb"
)
// StructField is a helper to store complex structure by
// encoding it in RLP format.
type StructField struct {
db *DB
key []byte
}
// NewStructField returns a new StructField.
// It validates its name and type against the database schema.
func (db *DB) NewStructField(name string) (f StructField, err error) {
key, err := db.schemaFieldKey(name, "struct-rlp")
if err != nil {
return f, err
}
return StructField{
db: db,
key: key,
}, nil
}
// Get unmarshals data from the database to a provided val.
// If the data is not found leveldb.ErrNotFound is returned.
func (f StructField) Get(val interface{}) (err error) {
b, err := f.db.Get(f.key)
if err != nil {
return err
}
return rlp.DecodeBytes(b, val)
}
// Put marshals provided val and saves it to the database.
func (f StructField) Put(val interface{}) (err error) {
b, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
return f.db.Put(f.key, b)
}
// PutInBatch marshals provided val and puts it into the batch.
func (f StructField) PutInBatch(batch *leveldb.Batch, val interface{}) (err error) {
b, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
batch.Put(f.key, b)
return nil
}

127
shed/field_struct_test.go Normal file
View File

@@ -0,0 +1,127 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"testing"
"github.com/syndtr/goleveldb/leveldb"
)
// TestStructField validates put and get operations
// of the StructField.
func TestStructField(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
complexField, err := db.NewStructField("complex-field")
if err != nil {
t.Fatal(err)
}
type complexStructure struct {
A string
}
t.Run("get empty", func(t *testing.T) {
var s complexStructure
err := complexField.Get(&s)
if err != leveldb.ErrNotFound {
t.Fatalf("got error %v, want %v", err, leveldb.ErrNotFound)
}
want := ""
if s.A != want {
t.Errorf("got string %q, want %q", s.A, want)
}
})
t.Run("put", func(t *testing.T) {
want := complexStructure{
A: "simple string value",
}
err = complexField.Put(want)
if err != nil {
t.Fatal(err)
}
var got complexStructure
err = complexField.Get(&got)
if err != nil {
t.Fatal(err)
}
if got.A != want.A {
t.Errorf("got string %q, want %q", got.A, want.A)
}
t.Run("overwrite", func(t *testing.T) {
want := complexStructure{
A: "overwritten string value",
}
err = complexField.Put(want)
if err != nil {
t.Fatal(err)
}
var got complexStructure
err = complexField.Get(&got)
if err != nil {
t.Fatal(err)
}
if got.A != want.A {
t.Errorf("got string %q, want %q", got.A, want.A)
}
})
})
t.Run("put in batch", func(t *testing.T) {
batch := new(leveldb.Batch)
want := complexStructure{
A: "simple string batch value",
}
complexField.PutInBatch(batch, want)
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
var got complexStructure
err := complexField.Get(&got)
if err != nil {
t.Fatal(err)
}
if got.A != want.A {
t.Errorf("got string %q, want %q", got, want)
}
t.Run("overwrite", func(t *testing.T) {
batch := new(leveldb.Batch)
want := complexStructure{
A: "overwritten string batch value",
}
complexField.PutInBatch(batch, want)
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
var got complexStructure
err := complexField.Get(&got)
if err != nil {
t.Fatal(err)
}
if got.A != want.A {
t.Errorf("got string %q, want %q", got, want)
}
})
})
}

146
shed/field_uint64.go Normal file
View File

@@ -0,0 +1,146 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"encoding/binary"
"github.com/syndtr/goleveldb/leveldb"
)
// Uint64Field provides a way to have a simple counter in the database.
// It transparently encodes uint64 type value to bytes.
type Uint64Field struct {
db *DB
key []byte
}
// NewUint64Field returns a new Uint64Field.
// It validates its name and type against the database schema.
func (db *DB) NewUint64Field(name string) (f Uint64Field, err error) {
key, err := db.schemaFieldKey(name, "uint64")
if err != nil {
return f, err
}
return Uint64Field{
db: db,
key: key,
}, nil
}
// Get retrieves a uint64 value from the database.
// If the value is not found in the database a 0 value
// is returned and no error.
func (f Uint64Field) Get() (val uint64, err error) {
b, err := f.db.Get(f.key)
if err != nil {
if err == leveldb.ErrNotFound {
return 0, nil
}
return 0, err
}
return binary.BigEndian.Uint64(b), nil
}
// Put encodes uin64 value and stores it in the database.
func (f Uint64Field) Put(val uint64) (err error) {
return f.db.Put(f.key, encodeUint64(val))
}
// PutInBatch stores a uint64 value in a batch
// that can be saved later in the database.
func (f Uint64Field) PutInBatch(batch *leveldb.Batch, val uint64) {
batch.Put(f.key, encodeUint64(val))
}
// Inc increments a uint64 value in the database.
// This operation is not goroutine save.
func (f Uint64Field) Inc() (val uint64, err error) {
val, err = f.Get()
if err != nil {
if err == leveldb.ErrNotFound {
val = 0
} else {
return 0, err
}
}
val++
return val, f.Put(val)
}
// IncInBatch increments a uint64 value in the batch
// by retreiving a value from the database, not the same batch.
// This operation is not goroutine save.
func (f Uint64Field) IncInBatch(batch *leveldb.Batch) (val uint64, err error) {
val, err = f.Get()
if err != nil {
if err == leveldb.ErrNotFound {
val = 0
} else {
return 0, err
}
}
val++
f.PutInBatch(batch, val)
return val, nil
}
// Dec decrements a uint64 value in the database.
// This operation is not goroutine save.
// The field is protected from overflow to a negative value.
func (f Uint64Field) Dec() (val uint64, err error) {
val, err = f.Get()
if err != nil {
if err == leveldb.ErrNotFound {
val = 0
} else {
return 0, err
}
}
if val != 0 {
val--
}
return val, f.Put(val)
}
// DecInBatch decrements a uint64 value in the batch
// by retreiving a value from the database, not the same batch.
// This operation is not goroutine save.
// The field is protected from overflow to a negative value.
func (f Uint64Field) DecInBatch(batch *leveldb.Batch) (val uint64, err error) {
val, err = f.Get()
if err != nil {
if err == leveldb.ErrNotFound {
val = 0
} else {
return 0, err
}
}
if val != 0 {
val--
}
f.PutInBatch(batch, val)
return val, nil
}
// encode transforms uint64 to 8 byte long
// slice in big endian encoding.
func encodeUint64(val uint64) (b []byte) {
b = make([]byte, 8)
binary.BigEndian.PutUint64(b, val)
return b
}

300
shed/field_uint64_test.go Normal file
View File

@@ -0,0 +1,300 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"testing"
"github.com/syndtr/goleveldb/leveldb"
)
// TestUint64Field validates put and get operations
// of the Uint64Field.
func TestUint64Field(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
counter, err := db.NewUint64Field("counter")
if err != nil {
t.Fatal(err)
}
t.Run("get empty", func(t *testing.T) {
got, err := counter.Get()
if err != nil {
t.Fatal(err)
}
var want uint64
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
})
t.Run("put", func(t *testing.T) {
var want uint64 = 42
err = counter.Put(want)
if err != nil {
t.Fatal(err)
}
got, err := counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
t.Run("overwrite", func(t *testing.T) {
var want uint64 = 84
err = counter.Put(want)
if err != nil {
t.Fatal(err)
}
got, err := counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
})
})
t.Run("put in batch", func(t *testing.T) {
batch := new(leveldb.Batch)
var want uint64 = 42
counter.PutInBatch(batch, want)
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
t.Run("overwrite", func(t *testing.T) {
batch := new(leveldb.Batch)
var want uint64 = 84
counter.PutInBatch(batch, want)
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
})
})
}
// TestUint64Field_Inc validates Inc operation
// of the Uint64Field.
func TestUint64Field_Inc(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
counter, err := db.NewUint64Field("counter")
if err != nil {
t.Fatal(err)
}
var want uint64 = 1
got, err := counter.Inc()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
want = 2
got, err = counter.Inc()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
}
// TestUint64Field_IncInBatch validates IncInBatch operation
// of the Uint64Field.
func TestUint64Field_IncInBatch(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
counter, err := db.NewUint64Field("counter")
if err != nil {
t.Fatal(err)
}
batch := new(leveldb.Batch)
var want uint64 = 1
got, err := counter.IncInBatch(batch)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err = counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
batch2 := new(leveldb.Batch)
want = 2
got, err = counter.IncInBatch(batch2)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
err = db.WriteBatch(batch2)
if err != nil {
t.Fatal(err)
}
got, err = counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
}
// TestUint64Field_Dec validates Dec operation
// of the Uint64Field.
func TestUint64Field_Dec(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
counter, err := db.NewUint64Field("counter")
if err != nil {
t.Fatal(err)
}
// test overflow protection
var want uint64
got, err := counter.Dec()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
want = 32
err = counter.Put(want)
if err != nil {
t.Fatal(err)
}
want = 31
got, err = counter.Dec()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
}
// TestUint64Field_DecInBatch validates DecInBatch operation
// of the Uint64Field.
func TestUint64Field_DecInBatch(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
counter, err := db.NewUint64Field("counter")
if err != nil {
t.Fatal(err)
}
batch := new(leveldb.Batch)
var want uint64
got, err := counter.DecInBatch(batch)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err = counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
batch2 := new(leveldb.Batch)
want = 42
counter.PutInBatch(batch2, want)
err = db.WriteBatch(batch2)
if err != nil {
t.Fatal(err)
}
got, err = counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
batch3 := new(leveldb.Batch)
want = 41
got, err = counter.DecInBatch(batch3)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
err = db.WriteBatch(batch3)
if err != nil {
t.Fatal(err)
}
got, err = counter.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
}

390
shed/index.go Normal file
View File

@@ -0,0 +1,390 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"bytes"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
)
// Item holds fields relevant to Swarm Chunk data and metadata.
// All information required for swarm storage and operations
// on that storage must be defined here.
// This structure is logically connected to swarm storage,
// the only part of this package that is not generalized,
// mostly for performance reasons.
//
// Item is a type that is used for retrieving, storing and encoding
// chunk data and metadata. It is passed as an argument to Index encoding
// functions, get function and put function.
// But it is also returned with additional data from get function call
// and as the argument in iterator function definition.
type Item struct {
Address []byte
Data []byte
AccessTimestamp int64
StoreTimestamp int64
BinID uint64
}
// Merge is a helper method to construct a new
// Item by filling up fields with default values
// of a particular Item with values from another one.
func (i Item) Merge(i2 Item) (new Item) {
if i.Address == nil {
i.Address = i2.Address
}
if i.Data == nil {
i.Data = i2.Data
}
if i.AccessTimestamp == 0 {
i.AccessTimestamp = i2.AccessTimestamp
}
if i.StoreTimestamp == 0 {
i.StoreTimestamp = i2.StoreTimestamp
}
if i.BinID == 0 {
i.BinID = i2.BinID
}
return i
}
// Index represents a set of LevelDB key value pairs that have common
// prefix. It holds functions for encoding and decoding keys and values
// to provide transparent actions on saved data which inclide:
// - getting a particular Item
// - saving a particular Item
// - iterating over a sorted LevelDB keys
// It implements IndexIteratorInterface interface.
type Index struct {
db *DB
prefix []byte
encodeKeyFunc func(fields Item) (key []byte, err error)
decodeKeyFunc func(key []byte) (e Item, err error)
encodeValueFunc func(fields Item) (value []byte, err error)
decodeValueFunc func(keyFields Item, value []byte) (e Item, err error)
}
// IndexFuncs structure defines functions for encoding and decoding
// LevelDB keys and values for a specific index.
type IndexFuncs struct {
EncodeKey func(fields Item) (key []byte, err error)
DecodeKey func(key []byte) (e Item, err error)
EncodeValue func(fields Item) (value []byte, err error)
DecodeValue func(keyFields Item, value []byte) (e Item, err error)
}
// NewIndex returns a new Index instance with defined name and
// encoding functions. The name must be unique and will be validated
// on database schema for a key prefix byte.
func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) {
id, err := db.schemaIndexPrefix(name)
if err != nil {
return f, err
}
prefix := []byte{id}
return Index{
db: db,
prefix: prefix,
// This function adjusts Index LevelDB key
// by appending the provided index id byte.
// This is needed to avoid collisions between keys of different
// indexes as all index ids are unique.
encodeKeyFunc: func(e Item) (key []byte, err error) {
key, err = funcs.EncodeKey(e)
if err != nil {
return nil, err
}
return append(append(make([]byte, 0, len(key)+1), prefix...), key...), nil
},
// This function reverses the encodeKeyFunc constructed key
// to transparently work with index keys without their index ids.
// It assumes that index keys are prefixed with only one byte.
decodeKeyFunc: func(key []byte) (e Item, err error) {
return funcs.DecodeKey(key[1:])
},
encodeValueFunc: funcs.EncodeValue,
decodeValueFunc: funcs.DecodeValue,
}, nil
}
// Get accepts key fields represented as Item to retrieve a
// value from the index and return maximum available information
// from the index represented as another Item.
func (f Index) Get(keyFields Item) (out Item, err error) {
key, err := f.encodeKeyFunc(keyFields)
if err != nil {
return out, err
}
value, err := f.db.Get(key)
if err != nil {
return out, err
}
out, err = f.decodeValueFunc(keyFields, value)
if err != nil {
return out, err
}
return out.Merge(keyFields), nil
}
// Has accepts key fields represented as Item to check
// if there this Item's encoded key is stored in
// the index.
func (f Index) Has(keyFields Item) (bool, error) {
key, err := f.encodeKeyFunc(keyFields)
if err != nil {
return false, err
}
return f.db.Has(key)
}
// Put accepts Item to encode information from it
// and save it to the database.
func (f Index) Put(i Item) (err error) {
key, err := f.encodeKeyFunc(i)
if err != nil {
return err
}
value, err := f.encodeValueFunc(i)
if err != nil {
return err
}
return f.db.Put(key, value)
}
// PutInBatch is the same as Put method, but it just
// saves the key/value pair to the batch instead
// directly to the database.
func (f Index) PutInBatch(batch *leveldb.Batch, i Item) (err error) {
key, err := f.encodeKeyFunc(i)
if err != nil {
return err
}
value, err := f.encodeValueFunc(i)
if err != nil {
return err
}
batch.Put(key, value)
return nil
}
// Delete accepts Item to remove a key/value pair
// from the database based on its fields.
func (f Index) Delete(keyFields Item) (err error) {
key, err := f.encodeKeyFunc(keyFields)
if err != nil {
return err
}
return f.db.Delete(key)
}
// DeleteInBatch is the same as Delete just the operation
// is performed on the batch instead on the database.
func (f Index) DeleteInBatch(batch *leveldb.Batch, keyFields Item) (err error) {
key, err := f.encodeKeyFunc(keyFields)
if err != nil {
return err
}
batch.Delete(key)
return nil
}
// IndexIterFunc is a callback on every Item that is decoded
// by iterating on an Index keys.
// By returning a true for stop variable, iteration will
// stop, and by returning the error, that error will be
// propagated to the called iterator method on Index.
type IndexIterFunc func(item Item) (stop bool, err error)
// IterateOptions defines optional parameters for Iterate function.
type IterateOptions struct {
// StartFrom is the Item to start the iteration from.
StartFrom *Item
// If SkipStartFromItem is true, StartFrom item will not
// be iterated on.
SkipStartFromItem bool
// Iterate over items which keys have a common prefix.
Prefix []byte
}
// Iterate function iterates over keys of the Index.
// If IterateOptions is nil, the iterations is over all keys.
func (f Index) Iterate(fn IndexIterFunc, options *IterateOptions) (err error) {
if options == nil {
options = new(IterateOptions)
}
// construct a prefix with Index prefix and optional common key prefix
prefix := append(f.prefix, options.Prefix...)
// start from the prefix
startKey := prefix
if options.StartFrom != nil {
// start from the provided StartFrom Item key value
startKey, err = f.encodeKeyFunc(*options.StartFrom)
if err != nil {
return err
}
}
it := f.db.NewIterator()
defer it.Release()
// move the cursor to the start key
ok := it.Seek(startKey)
if !ok {
// stop iterator if seek has failed
return it.Error()
}
if options.SkipStartFromItem && bytes.Equal(startKey, it.Key()) {
// skip the start from Item if it is the first key
// and it is explicitly configured to skip it
ok = it.Next()
}
for ; ok; ok = it.Next() {
item, err := f.itemFromIterator(it, prefix)
if err != nil {
if err == leveldb.ErrNotFound {
break
}
return err
}
stop, err := fn(item)
if err != nil {
return err
}
if stop {
break
}
}
return it.Error()
}
// First returns the first item in the Index which encoded key starts with a prefix.
// If the prefix is nil, the first element of the whole index is returned.
// If Index has no elements, a leveldb.ErrNotFound error is returned.
func (f Index) First(prefix []byte) (i Item, err error) {
it := f.db.NewIterator()
defer it.Release()
totalPrefix := append(f.prefix, prefix...)
it.Seek(totalPrefix)
return f.itemFromIterator(it, totalPrefix)
}
// itemFromIterator returns the Item from the current iterator position.
// If the complete encoded key does not start with totalPrefix,
// leveldb.ErrNotFound is returned. Value for totalPrefix must start with
// Index prefix.
func (f Index) itemFromIterator(it iterator.Iterator, totalPrefix []byte) (i Item, err error) {
key := it.Key()
if !bytes.HasPrefix(key, totalPrefix) {
return i, leveldb.ErrNotFound
}
// create a copy of key byte slice not to share leveldb underlaying slice array
keyItem, err := f.decodeKeyFunc(append([]byte(nil), key...))
if err != nil {
return i, err
}
// create a copy of value byte slice not to share leveldb underlaying slice array
valueItem, err := f.decodeValueFunc(keyItem, append([]byte(nil), it.Value()...))
if err != nil {
return i, err
}
return keyItem.Merge(valueItem), it.Error()
}
// Last returns the last item in the Index which encoded key starts with a prefix.
// If the prefix is nil, the last element of the whole index is returned.
// If Index has no elements, a leveldb.ErrNotFound error is returned.
func (f Index) Last(prefix []byte) (i Item, err error) {
it := f.db.NewIterator()
defer it.Release()
// get the next prefix in line
// since leveldb iterator Seek seeks to the
// next key if the key that it seeks to is not found
// and by getting the previous key, the last one for the
// actual prefix is found
nextPrefix := incByteSlice(prefix)
l := len(prefix)
if l > 0 && nextPrefix != nil {
it.Seek(append(f.prefix, nextPrefix...))
it.Prev()
} else {
it.Last()
}
totalPrefix := append(f.prefix, prefix...)
return f.itemFromIterator(it, totalPrefix)
}
// incByteSlice returns the byte slice of the same size
// of the provided one that is by one incremented in its
// total value. If all bytes in provided slice are equal
// to 255 a nil slice would be returned indicating that
// increment can not happen for the same length.
func incByteSlice(b []byte) (next []byte) {
l := len(b)
next = make([]byte, l)
copy(next, b)
for i := l - 1; i >= 0; i-- {
if b[i] == 255 {
next[i] = 0
} else {
next[i] = b[i] + 1
return next
}
}
return nil
}
// Count returns the number of items in index.
func (f Index) Count() (count int, err error) {
it := f.db.NewIterator()
defer it.Release()
for ok := it.Seek(f.prefix); ok; ok = it.Next() {
key := it.Key()
if key[0] != f.prefix[0] {
break
}
count++
}
return count, it.Error()
}
// CountFrom returns the number of items in index keys
// starting from the key encoded from the provided Item.
func (f Index) CountFrom(start Item) (count int, err error) {
startKey, err := f.encodeKeyFunc(start)
if err != nil {
return 0, err
}
it := f.db.NewIterator()
defer it.Release()
for ok := it.Seek(startKey); ok; ok = it.Next() {
key := it.Key()
if key[0] != f.prefix[0] {
break
}
count++
}
return count, it.Error()
}

962
shed/index_test.go Normal file
View File

@@ -0,0 +1,962 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"bytes"
"encoding/binary"
"fmt"
"sort"
"testing"
"time"
"github.com/syndtr/goleveldb/leveldb"
)
// Index functions for the index that is used in tests in this file.
var retrievalIndexFuncs = IndexFuncs{
EncodeKey: func(fields Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
value = append(b, fields.Data...)
return value, nil
},
DecodeValue: func(keyItem Item, value []byte) (e Item, err error) {
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
e.Data = value[8:]
return e, nil
},
}
// TestIndex validates put, get, has and delete functions of the Index implementation.
func TestIndex(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
t.Run("put", func(t *testing.T) {
want := Item{
Address: []byte("put-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
err := index.Put(want)
if err != nil {
t.Fatal(err)
}
got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
checkItem(t, got, want)
t.Run("overwrite", func(t *testing.T) {
want := Item{
Address: []byte("put-hash"),
Data: []byte("New DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
err = index.Put(want)
if err != nil {
t.Fatal(err)
}
got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
checkItem(t, got, want)
})
})
t.Run("put in batch", func(t *testing.T) {
want := Item{
Address: []byte("put-in-batch-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
batch := new(leveldb.Batch)
index.PutInBatch(batch, want)
err := db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
checkItem(t, got, want)
t.Run("overwrite", func(t *testing.T) {
want := Item{
Address: []byte("put-in-batch-hash"),
Data: []byte("New DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
batch := new(leveldb.Batch)
index.PutInBatch(batch, want)
db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
checkItem(t, got, want)
})
})
t.Run("put in batch twice", func(t *testing.T) {
// ensure that the last item of items with the same db keys
// is actually saved
batch := new(leveldb.Batch)
address := []byte("put-in-batch-twice-hash")
// put the first item
index.PutInBatch(batch, Item{
Address: address,
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
})
want := Item{
Address: address,
Data: []byte("New DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
// then put the item that will produce the same key
// but different value in the database
index.PutInBatch(batch, want)
db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := index.Get(Item{
Address: address,
})
if err != nil {
t.Fatal(err)
}
checkItem(t, got, want)
})
t.Run("has", func(t *testing.T) {
want := Item{
Address: []byte("has-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
dontWant := Item{
Address: []byte("do-not-has-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
err := index.Put(want)
if err != nil {
t.Fatal(err)
}
has, err := index.Has(want)
if err != nil {
t.Fatal(err)
}
if !has {
t.Error("item is not found")
}
has, err = index.Has(dontWant)
if err != nil {
t.Fatal(err)
}
if has {
t.Error("unwanted item is found")
}
})
t.Run("delete", func(t *testing.T) {
want := Item{
Address: []byte("delete-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
err := index.Put(want)
if err != nil {
t.Fatal(err)
}
got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
checkItem(t, got, want)
err = index.Delete(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
wantErr := leveldb.ErrNotFound
got, err = index.Get(Item{
Address: want.Address,
})
if err != wantErr {
t.Fatalf("got error %v, want %v", err, wantErr)
}
})
t.Run("delete in batch", func(t *testing.T) {
want := Item{
Address: []byte("delete-in-batch-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
}
err := index.Put(want)
if err != nil {
t.Fatal(err)
}
got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
checkItem(t, got, want)
batch := new(leveldb.Batch)
index.DeleteInBatch(batch, Item{
Address: want.Address,
})
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
wantErr := leveldb.ErrNotFound
got, err = index.Get(Item{
Address: want.Address,
})
if err != wantErr {
t.Fatalf("got error %v, want %v", err, wantErr)
}
})
}
// TestIndex_Iterate validates index Iterate
// functions for correctness.
func TestIndex_Iterate(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
items := []Item{
{
Address: []byte("iterate-hash-01"),
Data: []byte("data80"),
},
{
Address: []byte("iterate-hash-03"),
Data: []byte("data22"),
},
{
Address: []byte("iterate-hash-05"),
Data: []byte("data41"),
},
{
Address: []byte("iterate-hash-02"),
Data: []byte("data84"),
},
{
Address: []byte("iterate-hash-06"),
Data: []byte("data1"),
},
}
batch := new(leveldb.Batch)
for _, i := range items {
index.PutInBatch(batch, i)
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
item04 := Item{
Address: []byte("iterate-hash-04"),
Data: []byte("data0"),
}
err = index.Put(item04)
if err != nil {
t.Fatal(err)
}
items = append(items, item04)
sort.SliceStable(items, func(i, j int) bool {
return bytes.Compare(items[i].Address, items[j].Address) < 0
})
t.Run("all", func(t *testing.T) {
var i int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i++
return false, nil
}, nil)
if err != nil {
t.Fatal(err)
}
})
t.Run("start from", func(t *testing.T) {
startIndex := 2
i := startIndex
err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i++
return false, nil
}, &IterateOptions{
StartFrom: &items[startIndex],
})
if err != nil {
t.Fatal(err)
}
})
t.Run("skip start from", func(t *testing.T) {
startIndex := 2
i := startIndex + 1
err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i++
return false, nil
}, &IterateOptions{
StartFrom: &items[startIndex],
SkipStartFromItem: true,
})
if err != nil {
t.Fatal(err)
}
})
t.Run("stop", func(t *testing.T) {
var i int
stopIndex := 3
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
count++
if i == stopIndex {
return true, nil
}
i++
return false, nil
}, nil)
if err != nil {
t.Fatal(err)
}
wantItemsCount := stopIndex + 1
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("no overflow", func(t *testing.T) {
secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
secondItem := Item{
Address: []byte("iterate-hash-10"),
Data: []byte("data-second"),
}
err = secondIndex.Put(secondItem)
if err != nil {
t.Fatal(err)
}
var i int
err = index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i++
return false, nil
}, nil)
if err != nil {
t.Fatal(err)
}
i = 0
err = secondIndex.Iterate(func(item Item) (stop bool, err error) {
if i > 1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
checkItem(t, item, secondItem)
i++
return false, nil
}, nil)
if err != nil {
t.Fatal(err)
}
})
}
// TestIndex_Iterate_withPrefix validates index Iterate
// function for correctness.
func TestIndex_Iterate_withPrefix(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
allItems := []Item{
{Address: []byte("want-hash-00"), Data: []byte("data80")},
{Address: []byte("skip-hash-01"), Data: []byte("data81")},
{Address: []byte("skip-hash-02"), Data: []byte("data82")},
{Address: []byte("skip-hash-03"), Data: []byte("data83")},
{Address: []byte("want-hash-04"), Data: []byte("data84")},
{Address: []byte("want-hash-05"), Data: []byte("data85")},
{Address: []byte("want-hash-06"), Data: []byte("data86")},
{Address: []byte("want-hash-07"), Data: []byte("data87")},
{Address: []byte("want-hash-08"), Data: []byte("data88")},
{Address: []byte("want-hash-09"), Data: []byte("data89")},
{Address: []byte("skip-hash-10"), Data: []byte("data90")},
}
batch := new(leveldb.Batch)
for _, i := range allItems {
index.PutInBatch(batch, i)
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
prefix := []byte("want")
items := make([]Item, 0)
for _, item := range allItems {
if bytes.HasPrefix(item.Address, prefix) {
items = append(items, item)
}
}
sort.SliceStable(items, func(i, j int) bool {
return bytes.Compare(items[i].Address, items[j].Address) < 0
})
t.Run("with prefix", func(t *testing.T) {
var i int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i++
return false, nil
}, &IterateOptions{
Prefix: prefix,
})
if err != nil {
t.Fatal(err)
}
if i != len(items) {
t.Errorf("got %v items, want %v", i, len(items))
}
})
t.Run("with prefix and start from", func(t *testing.T) {
startIndex := 2
var count int
i := startIndex
err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i++
count++
return false, nil
}, &IterateOptions{
StartFrom: &items[startIndex],
Prefix: prefix,
})
if err != nil {
t.Fatal(err)
}
wantCount := len(items) - startIndex
if count != wantCount {
t.Errorf("got %v items, want %v", count, wantCount)
}
})
t.Run("with prefix and skip start from", func(t *testing.T) {
startIndex := 2
var count int
i := startIndex + 1
err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i++
count++
return false, nil
}, &IterateOptions{
StartFrom: &items[startIndex],
SkipStartFromItem: true,
Prefix: prefix,
})
if err != nil {
t.Fatal(err)
}
wantCount := len(items) - startIndex - 1
if count != wantCount {
t.Errorf("got %v items, want %v", count, wantCount)
}
})
t.Run("stop", func(t *testing.T) {
var i int
stopIndex := 3
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
count++
if i == stopIndex {
return true, nil
}
i++
return false, nil
}, &IterateOptions{
Prefix: prefix,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := stopIndex + 1
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("no overflow", func(t *testing.T) {
secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
secondItem := Item{
Address: []byte("iterate-hash-10"),
Data: []byte("data-second"),
}
err = secondIndex.Put(secondItem)
if err != nil {
t.Fatal(err)
}
var i int
err = index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i++
return false, nil
}, &IterateOptions{
Prefix: prefix,
})
if err != nil {
t.Fatal(err)
}
if i != len(items) {
t.Errorf("got %v items, want %v", i, len(items))
}
})
}
// TestIndex_count tests if Index.Count and Index.CountFrom
// returns the correct number of items.
func TestIndex_count(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
items := []Item{
{
Address: []byte("iterate-hash-01"),
Data: []byte("data80"),
},
{
Address: []byte("iterate-hash-02"),
Data: []byte("data84"),
},
{
Address: []byte("iterate-hash-03"),
Data: []byte("data22"),
},
{
Address: []byte("iterate-hash-04"),
Data: []byte("data41"),
},
{
Address: []byte("iterate-hash-05"),
Data: []byte("data1"),
},
}
batch := new(leveldb.Batch)
for _, i := range items {
index.PutInBatch(batch, i)
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
t.Run("Count", func(t *testing.T) {
got, err := index.Count()
if err != nil {
t.Fatal(err)
}
want := len(items)
if got != want {
t.Errorf("got %v items count, want %v", got, want)
}
})
t.Run("CountFrom", func(t *testing.T) {
got, err := index.CountFrom(Item{
Address: items[1].Address,
})
if err != nil {
t.Fatal(err)
}
want := len(items) - 1
if got != want {
t.Errorf("got %v items count, want %v", got, want)
}
})
// update the index with another item
t.Run("add item", func(t *testing.T) {
item04 := Item{
Address: []byte("iterate-hash-06"),
Data: []byte("data0"),
}
err = index.Put(item04)
if err != nil {
t.Fatal(err)
}
count := len(items) + 1
t.Run("Count", func(t *testing.T) {
got, err := index.Count()
if err != nil {
t.Fatal(err)
}
want := count
if got != want {
t.Errorf("got %v items count, want %v", got, want)
}
})
t.Run("CountFrom", func(t *testing.T) {
got, err := index.CountFrom(Item{
Address: items[1].Address,
})
if err != nil {
t.Fatal(err)
}
want := count - 1
if got != want {
t.Errorf("got %v items count, want %v", got, want)
}
})
})
// delete some items
t.Run("delete items", func(t *testing.T) {
deleteCount := 3
for _, item := range items[:deleteCount] {
err := index.Delete(item)
if err != nil {
t.Fatal(err)
}
}
count := len(items) + 1 - deleteCount
t.Run("Count", func(t *testing.T) {
got, err := index.Count()
if err != nil {
t.Fatal(err)
}
want := count
if got != want {
t.Errorf("got %v items count, want %v", got, want)
}
})
t.Run("CountFrom", func(t *testing.T) {
got, err := index.CountFrom(Item{
Address: items[deleteCount+1].Address,
})
if err != nil {
t.Fatal(err)
}
want := count - 1
if got != want {
t.Errorf("got %v items count, want %v", got, want)
}
})
})
}
// checkItem is a test helper function that compares if two Index items are the same.
func checkItem(t *testing.T, got, want Item) {
t.Helper()
if !bytes.Equal(got.Address, want.Address) {
t.Errorf("got hash %q, expected %q", string(got.Address), string(want.Address))
}
if !bytes.Equal(got.Data, want.Data) {
t.Errorf("got data %q, expected %q", string(got.Data), string(want.Data))
}
if got.StoreTimestamp != want.StoreTimestamp {
t.Errorf("got store timestamp %v, expected %v", got.StoreTimestamp, want.StoreTimestamp)
}
if got.AccessTimestamp != want.AccessTimestamp {
t.Errorf("got access timestamp %v, expected %v", got.AccessTimestamp, want.AccessTimestamp)
}
}
// TestIndex_firstAndLast validates that index First and Last methods
// are returning expected results based on the provided prefix.
func TestIndex_firstAndLast(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
addrs := [][]byte{
{0, 0, 0, 0, 0},
{0, 1},
{0, 1, 0, 0, 0},
{0, 1, 0, 0, 1},
{0, 1, 0, 0, 2},
{0, 2, 0, 0, 1},
{0, 4, 0, 0, 0},
{0, 10, 0, 0, 10},
{0, 10, 0, 0, 11},
{0, 10, 0, 0, 20},
{1, 32, 255, 0, 1},
{1, 32, 255, 0, 2},
{1, 32, 255, 0, 3},
{255, 255, 255, 255, 32},
{255, 255, 255, 255, 64},
{255, 255, 255, 255, 255},
}
// ensure that the addresses are sorted for
// validation of nil prefix
sort.Slice(addrs, func(i, j int) (less bool) {
return bytes.Compare(addrs[i], addrs[j]) == -1
})
batch := new(leveldb.Batch)
for _, addr := range addrs {
index.PutInBatch(batch, Item{
Address: addr,
})
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
for _, tc := range []struct {
prefix []byte
first []byte
last []byte
err error
}{
{
prefix: nil,
first: addrs[0],
last: addrs[len(addrs)-1],
},
{
prefix: []byte{0, 0},
first: []byte{0, 0, 0, 0, 0},
last: []byte{0, 0, 0, 0, 0},
},
{
prefix: []byte{0},
first: []byte{0, 0, 0, 0, 0},
last: []byte{0, 10, 0, 0, 20},
},
{
prefix: []byte{0, 1},
first: []byte{0, 1},
last: []byte{0, 1, 0, 0, 2},
},
{
prefix: []byte{0, 10},
first: []byte{0, 10, 0, 0, 10},
last: []byte{0, 10, 0, 0, 20},
},
{
prefix: []byte{1, 32, 255},
first: []byte{1, 32, 255, 0, 1},
last: []byte{1, 32, 255, 0, 3},
},
{
prefix: []byte{255},
first: []byte{255, 255, 255, 255, 32},
last: []byte{255, 255, 255, 255, 255},
},
{
prefix: []byte{255, 255, 255, 255, 255},
first: []byte{255, 255, 255, 255, 255},
last: []byte{255, 255, 255, 255, 255},
},
{
prefix: []byte{0, 3},
err: leveldb.ErrNotFound,
},
{
prefix: []byte{222},
err: leveldb.ErrNotFound,
},
} {
got, err := index.Last(tc.prefix)
if tc.err != err {
t.Errorf("got error %v for Last with prefix %v, want %v", err, tc.prefix, tc.err)
} else {
if !bytes.Equal(got.Address, tc.last) {
t.Errorf("got %v for Last with prefix %v, want %v", got.Address, tc.prefix, tc.last)
}
}
got, err = index.First(tc.prefix)
if tc.err != err {
t.Errorf("got error %v for First with prefix %v, want %v", err, tc.prefix, tc.err)
} else {
if !bytes.Equal(got.Address, tc.first) {
t.Errorf("got %v for First with prefix %v, want %v", got.Address, tc.prefix, tc.first)
}
}
}
}
// TestIncByteSlice validates returned values of incByteSlice function.
func TestIncByteSlice(t *testing.T) {
for _, tc := range []struct {
b []byte
want []byte
}{
{b: nil, want: nil},
{b: []byte{}, want: nil},
{b: []byte{0}, want: []byte{1}},
{b: []byte{42}, want: []byte{43}},
{b: []byte{255}, want: nil},
{b: []byte{0, 0}, want: []byte{0, 1}},
{b: []byte{1, 0}, want: []byte{1, 1}},
{b: []byte{1, 255}, want: []byte{2, 0}},
{b: []byte{255, 255}, want: nil},
{b: []byte{32, 0, 255}, want: []byte{32, 1, 0}},
} {
got := incByteSlice(tc.b)
if !bytes.Equal(got, tc.want) {
t.Errorf("got %v, want %v", got, tc.want)
}
}
}

134
shed/schema.go Normal file
View File

@@ -0,0 +1,134 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"encoding/json"
"errors"
"fmt"
)
var (
// LevelDB key value for storing the schema.
keySchema = []byte{0}
// LevelDB key prefix for all field type.
// LevelDB keys will be constructed by appending name values to this prefix.
keyPrefixFields byte = 1
// LevelDB key prefix from which indexing keys start.
// Every index has its own key prefix and this value defines the first one.
keyPrefixIndexStart byte = 2 // Q: or maybe a higher number like 7, to have more space for potential specific perfixes
)
// schema is used to serialize known database structure information.
type schema struct {
Fields map[string]fieldSpec `json:"fields"` // keys are field names
Indexes map[byte]indexSpec `json:"indexes"` // keys are index prefix bytes
}
// fieldSpec holds information about a particular field.
// It does not need Name field as it is contained in the
// schema.Field map key.
type fieldSpec struct {
Type string `json:"type"`
}
// indxSpec holds information about a particular index.
// It does not contain index type, as indexes do not have type.
type indexSpec struct {
Name string `json:"name"`
}
// schemaFieldKey retrieves the complete LevelDB key for
// a particular field form the schema definition.
func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
if name == "" {
return nil, errors.New("field name can not be blank")
}
if fieldType == "" {
return nil, errors.New("field type can not be blank")
}
s, err := db.getSchema()
if err != nil {
return nil, err
}
var found bool
for n, f := range s.Fields {
if n == name {
if f.Type != fieldType {
return nil, fmt.Errorf("field %q of type %q stored as %q in db", name, fieldType, f.Type)
}
break
}
}
if !found {
s.Fields[name] = fieldSpec{
Type: fieldType,
}
err := db.putSchema(s)
if err != nil {
return nil, err
}
}
return append([]byte{keyPrefixFields}, []byte(name)...), nil
}
// schemaIndexID retrieves the complete LevelDB prefix for
// a particular index.
func (db *DB) schemaIndexPrefix(name string) (id byte, err error) {
if name == "" {
return 0, errors.New("index name can not be blank")
}
s, err := db.getSchema()
if err != nil {
return 0, err
}
nextID := keyPrefixIndexStart
for i, f := range s.Indexes {
if i >= nextID {
nextID = i + 1
}
if f.Name == name {
return i, nil
}
}
id = nextID
s.Indexes[id] = indexSpec{
Name: name,
}
return id, db.putSchema(s)
}
// getSchema retrieves the complete schema from
// the database.
func (db *DB) getSchema() (s schema, err error) {
b, err := db.Get(keySchema)
if err != nil {
return s, err
}
err = json.Unmarshal(b, &s)
return s, err
}
// putSchema stores the complete schema to
// the database.
func (db *DB) putSchema(s schema) (err error) {
b, err := json.Marshal(s)
if err != nil {
return err
}
return db.Put(keySchema, b)
}

126
shed/schema_test.go Normal file
View File

@@ -0,0 +1,126 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"bytes"
"testing"
)
// TestDB_schemaFieldKey validates correctness of schemaFieldKey.
func TestDB_schemaFieldKey(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
t.Run("empty name or type", func(t *testing.T) {
_, err := db.schemaFieldKey("", "")
if err == nil {
t.Errorf("error not returned, but expected")
}
_, err = db.schemaFieldKey("", "type")
if err == nil {
t.Errorf("error not returned, but expected")
}
_, err = db.schemaFieldKey("test", "")
if err == nil {
t.Errorf("error not returned, but expected")
}
})
t.Run("same field", func(t *testing.T) {
key1, err := db.schemaFieldKey("test", "undefined")
if err != nil {
t.Fatal(err)
}
key2, err := db.schemaFieldKey("test", "undefined")
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(key1, key2) {
t.Errorf("schema keys for the same field name are not the same: %q, %q", string(key1), string(key2))
}
})
t.Run("different fields", func(t *testing.T) {
key1, err := db.schemaFieldKey("test1", "undefined")
if err != nil {
t.Fatal(err)
}
key2, err := db.schemaFieldKey("test2", "undefined")
if err != nil {
t.Fatal(err)
}
if bytes.Equal(key1, key2) {
t.Error("schema keys for the same field name are the same, but must not be")
}
})
t.Run("same field name different types", func(t *testing.T) {
_, err := db.schemaFieldKey("the-field", "one-type")
if err != nil {
t.Fatal(err)
}
_, err = db.schemaFieldKey("the-field", "another-type")
if err == nil {
t.Errorf("error not returned, but expected")
}
})
}
// TestDB_schemaIndexPrefix validates correctness of schemaIndexPrefix.
func TestDB_schemaIndexPrefix(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
t.Run("same name", func(t *testing.T) {
id1, err := db.schemaIndexPrefix("test")
if err != nil {
t.Fatal(err)
}
id2, err := db.schemaIndexPrefix("test")
if err != nil {
t.Fatal(err)
}
if id1 != id2 {
t.Errorf("schema keys for the same field name are not the same: %v, %v", id1, id2)
}
})
t.Run("different names", func(t *testing.T) {
id1, err := db.schemaIndexPrefix("test1")
if err != nil {
t.Fatal(err)
}
id2, err := db.schemaIndexPrefix("test2")
if err != nil {
t.Fatal(err)
}
if id1 == id2 {
t.Error("schema ids for the same index name are the same, but must not be")
}
})
}

146
shed/vector_uint64.go Normal file
View File

@@ -0,0 +1,146 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"encoding/binary"
"github.com/syndtr/goleveldb/leveldb"
)
// Uint64Vector provides a way to have multiple counters in the database.
// It transparently encodes uint64 type value to bytes.
type Uint64Vector struct {
db *DB
key []byte
}
// NewUint64Vector returns a new Uint64Vector.
// It validates its name and type against the database schema.
func (db *DB) NewUint64Vector(name string) (f Uint64Vector, err error) {
key, err := db.schemaFieldKey(name, "vector-uint64")
if err != nil {
return f, err
}
return Uint64Vector{
db: db,
key: key,
}, nil
}
// Get retrieves a uint64 value at index i from the database.
// If the value is not found in the database a 0 value
// is returned and no error.
func (f Uint64Vector) Get(i uint64) (val uint64, err error) {
b, err := f.db.Get(f.indexKey(i))
if err != nil {
if err == leveldb.ErrNotFound {
return 0, nil
}
return 0, err
}
return binary.BigEndian.Uint64(b), nil
}
// Put encodes uin64 value and stores it in the database.
func (f Uint64Vector) Put(i, val uint64) (err error) {
return f.db.Put(f.indexKey(i), encodeUint64(val))
}
// PutInBatch stores a uint64 value at index i in a batch
// that can be saved later in the database.
func (f Uint64Vector) PutInBatch(batch *leveldb.Batch, i, val uint64) {
batch.Put(f.indexKey(i), encodeUint64(val))
}
// Inc increments a uint64 value in the database.
// This operation is not goroutine safe.
func (f Uint64Vector) Inc(i uint64) (val uint64, err error) {
val, err = f.Get(i)
if err != nil {
if err == leveldb.ErrNotFound {
val = 0
} else {
return 0, err
}
}
val++
return val, f.Put(i, val)
}
// IncInBatch increments a uint64 value at index i in the batch
// by retreiving a value from the database, not the same batch.
// This operation is not goroutine safe.
func (f Uint64Vector) IncInBatch(batch *leveldb.Batch, i uint64) (val uint64, err error) {
val, err = f.Get(i)
if err != nil {
if err == leveldb.ErrNotFound {
val = 0
} else {
return 0, err
}
}
val++
f.PutInBatch(batch, i, val)
return val, nil
}
// Dec decrements a uint64 value at index i in the database.
// This operation is not goroutine safe.
// The field is protected from overflow to a negative value.
func (f Uint64Vector) Dec(i uint64) (val uint64, err error) {
val, err = f.Get(i)
if err != nil {
if err == leveldb.ErrNotFound {
val = 0
} else {
return 0, err
}
}
if val != 0 {
val--
}
return val, f.Put(i, val)
}
// DecInBatch decrements a uint64 value at index i in the batch
// by retreiving a value from the database, not the same batch.
// This operation is not goroutine safe.
// The field is protected from overflow to a negative value.
func (f Uint64Vector) DecInBatch(batch *leveldb.Batch, i uint64) (val uint64, err error) {
val, err = f.Get(i)
if err != nil {
if err == leveldb.ErrNotFound {
val = 0
} else {
return 0, err
}
}
if val != 0 {
val--
}
f.PutInBatch(batch, i, val)
return val, nil
}
// indexKey concatenates field prefix and vector index
// returning a unique database key for a specific vector element.
func (f Uint64Vector) indexKey(i uint64) (key []byte) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, i)
return append(f.key, b...)
}

312
shed/vector_uint64_test.go Normal file
View File

@@ -0,0 +1,312 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package shed
import (
"testing"
"github.com/syndtr/goleveldb/leveldb"
)
// TestUint64Vector validates put and get operations
// of the Uint64Vector.
func TestUint64Vector(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
bins, err := db.NewUint64Vector("bins")
if err != nil {
t.Fatal(err)
}
t.Run("get empty", func(t *testing.T) {
got, err := bins.Get(0)
if err != nil {
t.Fatal(err)
}
var want uint64
if got != want {
t.Errorf("got uint64 %v, want %v", got, want)
}
})
t.Run("put", func(t *testing.T) {
for _, index := range []uint64{0, 1, 2, 5, 100} {
var want uint64 = 42 + index
err = bins.Put(index, want)
if err != nil {
t.Fatal(err)
}
got, err := bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
t.Run("overwrite", func(t *testing.T) {
var want uint64 = 84 + index
err = bins.Put(index, want)
if err != nil {
t.Fatal(err)
}
got, err := bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
})
}
})
t.Run("put in batch", func(t *testing.T) {
for _, index := range []uint64{0, 1, 2, 3, 5, 10} {
batch := new(leveldb.Batch)
var want uint64 = 43 + index
bins.PutInBatch(batch, index, want)
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
t.Run("overwrite", func(t *testing.T) {
batch := new(leveldb.Batch)
var want uint64 = 85 + index
bins.PutInBatch(batch, index, want)
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err := bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
})
}
})
}
// TestUint64Vector_Inc validates Inc operation
// of the Uint64Vector.
func TestUint64Vector_Inc(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
bins, err := db.NewUint64Vector("bins")
if err != nil {
t.Fatal(err)
}
for _, index := range []uint64{0, 1, 2, 3, 5, 10} {
var want uint64 = 1
got, err := bins.Inc(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
want = 2
got, err = bins.Inc(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
}
}
// TestUint64Vector_IncInBatch validates IncInBatch operation
// of the Uint64Vector.
func TestUint64Vector_IncInBatch(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
bins, err := db.NewUint64Vector("bins")
if err != nil {
t.Fatal(err)
}
for _, index := range []uint64{0, 1, 2, 3, 5, 10} {
batch := new(leveldb.Batch)
var want uint64 = 1
got, err := bins.IncInBatch(batch, index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err = bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
batch2 := new(leveldb.Batch)
want = 2
got, err = bins.IncInBatch(batch2, index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
err = db.WriteBatch(batch2)
if err != nil {
t.Fatal(err)
}
got, err = bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
}
}
// TestUint64Vector_Dec validates Dec operation
// of the Uint64Vector.
func TestUint64Vector_Dec(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
bins, err := db.NewUint64Vector("bins")
if err != nil {
t.Fatal(err)
}
for _, index := range []uint64{0, 1, 2, 3, 5, 10} {
// test overflow protection
var want uint64
got, err := bins.Dec(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
want = 32 + index
err = bins.Put(index, want)
if err != nil {
t.Fatal(err)
}
want = 31 + index
got, err = bins.Dec(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
}
}
// TestUint64Vector_DecInBatch validates DecInBatch operation
// of the Uint64Vector.
func TestUint64Vector_DecInBatch(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
bins, err := db.NewUint64Vector("bins")
if err != nil {
t.Fatal(err)
}
for _, index := range []uint64{0, 1, 2, 3, 5, 10} {
batch := new(leveldb.Batch)
var want uint64
got, err := bins.DecInBatch(batch, index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
got, err = bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
batch2 := new(leveldb.Batch)
want = 42 + index
bins.PutInBatch(batch2, index, want)
err = db.WriteBatch(batch2)
if err != nil {
t.Fatal(err)
}
got, err = bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
batch3 := new(leveldb.Batch)
want = 41 + index
got, err = bins.DecInBatch(batch3, index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
err = db.WriteBatch(batch3)
if err != nil {
t.Fatal(err)
}
got, err = bins.Get(index)
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got %v uint64 %v, want %v", index, got, want)
}
}
}