swarm: codebase split from go-ethereum (#1405)

This commit is contained in:
Rafael Matias
2019-06-03 12:28:18 +02:00
committed by Anton Evangelatov
parent 7a22da98b9
commit b046760db1
1540 changed files with 4654 additions and 129393 deletions

476
storage/mock/db/db.go Normal file
View File

@ -0,0 +1,476 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package db implements a mock store that keeps all chunk data in LevelDB database.
package db
import (
"archive/tar"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"sync"
"time"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/swarm/storage/mock"
)
// GlobalStore contains the LevelDB database that is storing
// chunk data for all swarm nodes.
// Closing the GlobalStore with Close method is required to
// release resources used by the database.
type GlobalStore struct {
db *leveldb.DB
// protects nodes and keys indexes
// in Put and Delete methods
nodesLocks sync.Map
keysLocks sync.Map
}
// NewGlobalStore creates a new instance of GlobalStore.
func NewGlobalStore(path string) (s *GlobalStore, err error) {
db, err := leveldb.OpenFile(path, nil)
if err != nil {
return nil, err
}
return &GlobalStore{
db: db,
}, nil
}
// Close releases the resources used by the underlying LevelDB.
func (s *GlobalStore) Close() error {
return s.db.Close()
}
// NewNodeStore returns a new instance of NodeStore that retrieves and stores
// chunk data only for a node with address addr.
func (s *GlobalStore) NewNodeStore(addr common.Address) *mock.NodeStore {
return mock.NewNodeStore(addr, s)
}
// Get returns chunk data if the chunk with key exists for node
// on address addr.
func (s *GlobalStore) Get(addr common.Address, key []byte) (data []byte, err error) {
has, err := s.db.Has(indexForHashesPerNode(addr, key), nil)
if err != nil {
return nil, mock.ErrNotFound
}
if !has {
return nil, mock.ErrNotFound
}
data, err = s.db.Get(indexDataKey(key), nil)
if err == leveldb.ErrNotFound {
err = mock.ErrNotFound
}
return
}
// Put saves the chunk data for node with address addr.
func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
unlock, err := s.lock(addr, key)
if err != nil {
return err
}
defer unlock()
batch := new(leveldb.Batch)
batch.Put(indexForHashesPerNode(addr, key), nil)
batch.Put(indexForNodesWithHash(key, addr), nil)
batch.Put(indexForNodes(addr), nil)
batch.Put(indexForHashes(key), nil)
batch.Put(indexDataKey(key), data)
return s.db.Write(batch, nil)
}
// Delete removes the chunk reference to node with address addr.
func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
unlock, err := s.lock(addr, key)
if err != nil {
return err
}
defer unlock()
batch := new(leveldb.Batch)
batch.Delete(indexForHashesPerNode(addr, key))
batch.Delete(indexForNodesWithHash(key, addr))
// check if this node contains any keys, and if not
// remove it from the
x := indexForHashesPerNodePrefix(addr)
if k, _ := s.db.Get(x, nil); !bytes.HasPrefix(k, x) {
batch.Delete(indexForNodes(addr))
}
x = indexForNodesWithHashPrefix(key)
if k, _ := s.db.Get(x, nil); !bytes.HasPrefix(k, x) {
batch.Delete(indexForHashes(key))
}
return s.db.Write(batch, nil)
}
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
has, err := s.db.Has(indexForHashesPerNode(addr, key), nil)
if err != nil {
has = false
}
return has
}
// Keys returns a paginated list of keys on all nodes.
func (s *GlobalStore) Keys(startKey []byte, limit int) (keys mock.Keys, err error) {
return s.keys(nil, startKey, limit)
}
// Nodes returns a paginated list of all known nodes.
func (s *GlobalStore) Nodes(startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
return s.nodes(nil, startAddr, limit)
}
// NodeKeys returns a paginated list of keys on a node with provided address.
func (s *GlobalStore) NodeKeys(addr common.Address, startKey []byte, limit int) (keys mock.Keys, err error) {
return s.keys(&addr, startKey, limit)
}
// KeyNodes returns a paginated list of nodes that contain a particular key.
func (s *GlobalStore) KeyNodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
return s.nodes(key, startAddr, limit)
}
// keys returns a paginated list of keys. If addr is not nil, only keys on that
// node will be returned.
func (s *GlobalStore) keys(addr *common.Address, startKey []byte, limit int) (keys mock.Keys, err error) {
iter := s.db.NewIterator(nil, nil)
defer iter.Release()
if limit <= 0 {
limit = mock.DefaultLimit
}
prefix := []byte{indexForHashesPrefix}
if addr != nil {
prefix = indexForHashesPerNodePrefix(*addr)
}
if startKey != nil {
if addr != nil {
startKey = indexForHashesPerNode(*addr, startKey)
} else {
startKey = indexForHashes(startKey)
}
} else {
startKey = prefix
}
ok := iter.Seek(startKey)
if !ok {
return keys, iter.Error()
}
for ; ok; ok = iter.Next() {
k := iter.Key()
if !bytes.HasPrefix(k, prefix) {
break
}
key := append([]byte(nil), bytes.TrimPrefix(k, prefix)...)
if len(keys.Keys) >= limit {
keys.Next = key
break
}
keys.Keys = append(keys.Keys, key)
}
return keys, iter.Error()
}
// nodes returns a paginated list of node addresses. If key is not nil,
// only nodes that contain that key will be returned.
func (s *GlobalStore) nodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
iter := s.db.NewIterator(nil, nil)
defer iter.Release()
if limit <= 0 {
limit = mock.DefaultLimit
}
prefix := []byte{indexForNodesPrefix}
if key != nil {
prefix = indexForNodesWithHashPrefix(key)
}
startKey := prefix
if startAddr != nil {
if key != nil {
startKey = indexForNodesWithHash(key, *startAddr)
} else {
startKey = indexForNodes(*startAddr)
}
}
ok := iter.Seek(startKey)
if !ok {
return nodes, iter.Error()
}
for ; ok; ok = iter.Next() {
k := iter.Key()
if !bytes.HasPrefix(k, prefix) {
break
}
addr := common.BytesToAddress(append([]byte(nil), bytes.TrimPrefix(k, prefix)...))
if len(nodes.Addrs) >= limit {
nodes.Next = &addr
break
}
nodes.Addrs = append(nodes.Addrs, addr)
}
return nodes, iter.Error()
}
// Import reads tar archive from a reader that contains exported chunk data.
// It returns the number of chunks imported and an error.
func (s *GlobalStore) Import(r io.Reader) (n int, err error) {
tr := tar.NewReader(r)
for {
hdr, err := tr.Next()
if err != nil {
if err == io.EOF {
break
}
return n, err
}
data, err := ioutil.ReadAll(tr)
if err != nil {
return n, err
}
var c mock.ExportedChunk
if err = json.Unmarshal(data, &c); err != nil {
return n, err
}
key := common.Hex2Bytes(hdr.Name)
batch := new(leveldb.Batch)
for _, addr := range c.Addrs {
batch.Put(indexForHashesPerNode(addr, key), nil)
batch.Put(indexForNodesWithHash(key, addr), nil)
batch.Put(indexForNodes(addr), nil)
}
batch.Put(indexForHashes(key), nil)
batch.Put(indexDataKey(key), c.Data)
if err = s.db.Write(batch, nil); err != nil {
return n, err
}
n++
}
return n, err
}
// Export writes to a writer a tar archive with all chunk data from
// the store. It returns the number fo chunks exported and an error.
func (s *GlobalStore) Export(w io.Writer) (n int, err error) {
tw := tar.NewWriter(w)
defer tw.Close()
buf := bytes.NewBuffer(make([]byte, 0, 1024))
encoder := json.NewEncoder(buf)
snap, err := s.db.GetSnapshot()
if err != nil {
return 0, err
}
iter := snap.NewIterator(util.BytesPrefix([]byte{indexForHashesByNodePrefix}), nil)
defer iter.Release()
var currentKey string
var addrs []common.Address
saveChunk := func() error {
hexKey := currentKey
data, err := snap.Get(indexDataKey(common.Hex2Bytes(hexKey)), nil)
if err != nil {
return fmt.Errorf("get data %s: %v", hexKey, err)
}
buf.Reset()
if err = encoder.Encode(mock.ExportedChunk{
Addrs: addrs,
Data: data,
}); err != nil {
return err
}
d := buf.Bytes()
hdr := &tar.Header{
Name: hexKey,
Mode: 0644,
Size: int64(len(d)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := tw.Write(d); err != nil {
return err
}
n++
return nil
}
for iter.Next() {
k := bytes.TrimPrefix(iter.Key(), []byte{indexForHashesByNodePrefix})
i := bytes.Index(k, []byte{keyTermByte})
if i < 0 {
continue
}
hexKey := string(k[:i])
if currentKey == "" {
currentKey = hexKey
}
if hexKey != currentKey {
if err = saveChunk(); err != nil {
return n, err
}
addrs = addrs[:0]
}
currentKey = hexKey
addrs = append(addrs, common.BytesToAddress(k[i+1:]))
}
if len(addrs) > 0 {
if err = saveChunk(); err != nil {
return n, err
}
}
return n, iter.Error()
}
var (
// maximal time for lock to wait until it returns error
lockTimeout = 3 * time.Second
// duration between two lock checks.
lockCheckDelay = 30 * time.Microsecond
// error returned by lock method when lock timeout is reached
errLockTimeout = errors.New("lock timeout")
)
// lock protects parallel writes in Put and Delete methods for both
// node with provided address and for data with provided key.
func (s *GlobalStore) lock(addr common.Address, key []byte) (unlock func(), err error) {
start := time.Now()
nodeLockKey := addr.Hex()
for {
_, loaded := s.nodesLocks.LoadOrStore(nodeLockKey, struct{}{})
if !loaded {
break
}
time.Sleep(lockCheckDelay)
if time.Since(start) > lockTimeout {
return nil, errLockTimeout
}
}
start = time.Now()
keyLockKey := common.Bytes2Hex(key)
for {
_, loaded := s.keysLocks.LoadOrStore(keyLockKey, struct{}{})
if !loaded {
break
}
time.Sleep(lockCheckDelay)
if time.Since(start) > lockTimeout {
return nil, errLockTimeout
}
}
return func() {
s.nodesLocks.Delete(nodeLockKey)
s.keysLocks.Delete(keyLockKey)
}, nil
}
const (
// prefixes for different indexes
indexDataPrefix = 0
indexForNodesWithHashesPrefix = 1
indexForHashesByNodePrefix = 2
indexForNodesPrefix = 3
indexForHashesPrefix = 4
// keyTermByte splits keys and node addresses
// in database keys
keyTermByte = 0xff
)
// indexForHashesPerNode constructs a database key to store keys used in
// NodeKeys method.
func indexForHashesPerNode(addr common.Address, key []byte) []byte {
return append(indexForHashesPerNodePrefix(addr), key...)
}
// indexForHashesPerNodePrefix returns a prefix containing a node address used in
// NodeKeys method. Node address is hex encoded to be able to use keyTermByte
// for splitting node address and key.
func indexForHashesPerNodePrefix(addr common.Address) []byte {
return append([]byte{indexForNodesWithHashesPrefix}, append([]byte(addr.Hex()), keyTermByte)...)
}
// indexForNodesWithHash constructs a database key to store keys used in
// KeyNodes method.
func indexForNodesWithHash(key []byte, addr common.Address) []byte {
return append(indexForNodesWithHashPrefix(key), addr[:]...)
}
// indexForNodesWithHashPrefix returns a prefix containing a key used in
// KeyNodes method. Key is hex encoded to be able to use keyTermByte
// for splitting key and node address.
func indexForNodesWithHashPrefix(key []byte) []byte {
return append([]byte{indexForHashesByNodePrefix}, append([]byte(common.Bytes2Hex(key)), keyTermByte)...)
}
// indexForNodes constructs a database key to store keys used in
// Nodes method.
func indexForNodes(addr common.Address) []byte {
return append([]byte{indexForNodesPrefix}, addr[:]...)
}
// indexForHashes constructs a database key to store keys used in
// Keys method.
func indexForHashes(key []byte) []byte {
return append([]byte{indexForHashesPrefix}, key...)
}
// indexDataKey constructs a database key for key/data storage.
func indexDataKey(key []byte) []byte {
return append([]byte{indexDataPrefix}, key...)
}

View File

@ -0,0 +1,75 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package db
import (
"io/ioutil"
"os"
"testing"
"github.com/ethersphere/swarm/storage/mock/test"
)
// TestDBStore is running a test.MockStore tests
// using test.MockStore function.
func TestDBStore(t *testing.T) {
store, cleanup := newTestStore(t)
defer cleanup()
test.MockStore(t, store, 100)
}
// TestDBStoreListings is running test.MockStoreListings tests.
func TestDBStoreListings(t *testing.T) {
store, cleanup := newTestStore(t)
defer cleanup()
test.MockStoreListings(t, store, 1000)
}
// TestImportExport is running a test.ImportExport tests
// using test.MockStore function.
func TestImportExport(t *testing.T) {
store1, cleanup := newTestStore(t)
defer cleanup()
store2, cleanup := newTestStore(t)
defer cleanup()
test.ImportExport(t, store1, store2, 100)
}
// newTestStore creates a temporary GlobalStore
// that will be closed and data deleted when
// calling returned cleanup function.
func newTestStore(t *testing.T) (s *GlobalStore, cleanup func()) {
dir, err := ioutil.TempDir("", "swarm-mock-db-")
if err != nil {
t.Fatal(err)
}
s, err = NewGlobalStore(dir)
if err != nil {
os.RemoveAll(dir)
t.Fatal(err)
}
return s, func() {
s.Close()
os.RemoveAll(dir)
}
}

View File

@ -0,0 +1,257 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package explorer
import (
"bytes"
"encoding/json"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/storage/mock"
"github.com/rs/cors"
)
const jsonContentType = "application/json; charset=utf-8"
// NewHandler constructs an http.Handler with router
// that servers requests required by chunk explorer.
//
// /api/has-key/{node}/{key}
// /api/keys?start={key}&node={node}&limit={int[0..1000]}
// /api/nodes?start={node}&key={key}&limit={int[0..1000]}
//
// Data from global store will be served and appropriate
// CORS headers will be sent if allowed origins are provided.
func NewHandler(store mock.GlobalStorer, corsOrigins []string) (handler http.Handler) {
mux := http.NewServeMux()
mux.Handle("/api/has-key/", newHasKeyHandler(store))
mux.Handle("/api/keys", newKeysHandler(store))
mux.Handle("/api/nodes", newNodesHandler(store))
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
jsonStatusResponse(w, http.StatusNotFound)
})
handler = noCacheHandler(mux)
if corsOrigins != nil {
handler = cors.New(cors.Options{
AllowedOrigins: corsOrigins,
AllowedMethods: []string{"GET"},
MaxAge: 600,
}).Handler(handler)
}
return handler
}
// newHasKeyHandler returns a new handler that serves
// requests for HasKey global store method.
// Possible responses are StatusResponse with
// status codes 200 or 404 if the chunk is found or not.
func newHasKeyHandler(store mock.GlobalStorer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
addr, key, ok := parseHasKeyPath(r.URL.Path)
if !ok {
jsonStatusResponse(w, http.StatusNotFound)
return
}
found := store.HasKey(addr, key)
if !found {
jsonStatusResponse(w, http.StatusNotFound)
return
}
jsonStatusResponse(w, http.StatusOK)
}
}
// KeysResponse is a JSON-encoded response for global store
// Keys and NodeKeys methods.
type KeysResponse struct {
Keys []string `json:"keys"`
Next string `json:"next,omitempty"`
}
// newKeysHandler returns a new handler that serves
// requests for Key global store method.
// HTTP response body will be JSON-encoded KeysResponse.
func newKeysHandler(store mock.GlobalStorer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
node := q.Get("node")
start, limit := listingPage(q)
var keys mock.Keys
if node == "" {
var err error
keys, err = store.Keys(common.Hex2Bytes(start), limit)
if err != nil {
log.Error("chunk explorer: keys handler: get keys", "start", start, "err", err)
jsonStatusResponse(w, http.StatusInternalServerError)
return
}
} else {
var err error
keys, err = store.NodeKeys(common.HexToAddress(node), common.Hex2Bytes(start), limit)
if err != nil {
log.Error("chunk explorer: keys handler: get node keys", "node", node, "start", start, "err", err)
jsonStatusResponse(w, http.StatusInternalServerError)
return
}
}
ks := make([]string, len(keys.Keys))
for i, k := range keys.Keys {
ks[i] = common.Bytes2Hex(k)
}
data, err := json.Marshal(KeysResponse{
Keys: ks,
Next: common.Bytes2Hex(keys.Next),
})
if err != nil {
log.Error("chunk explorer: keys handler: json marshal", "err", err)
jsonStatusResponse(w, http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", jsonContentType)
_, err = io.Copy(w, bytes.NewReader(data))
if err != nil {
log.Error("chunk explorer: keys handler: write response", "err", err)
}
}
}
// NodesResponse is a JSON-encoded response for global store
// Nodes and KeyNodes methods.
type NodesResponse struct {
Nodes []string `json:"nodes"`
Next string `json:"next,omitempty"`
}
// newNodesHandler returns a new handler that serves
// requests for Nodes global store method.
// HTTP response body will be JSON-encoded NodesResponse.
func newNodesHandler(store mock.GlobalStorer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
key := q.Get("key")
var start *common.Address
queryStart, limit := listingPage(q)
if queryStart != "" {
s := common.HexToAddress(queryStart)
start = &s
}
var nodes mock.Nodes
if key == "" {
var err error
nodes, err = store.Nodes(start, limit)
if err != nil {
log.Error("chunk explorer: nodes handler: get nodes", "start", queryStart, "err", err)
jsonStatusResponse(w, http.StatusInternalServerError)
return
}
} else {
var err error
nodes, err = store.KeyNodes(common.Hex2Bytes(key), start, limit)
if err != nil {
log.Error("chunk explorer: nodes handler: get key nodes", "key", key, "start", queryStart, "err", err)
jsonStatusResponse(w, http.StatusInternalServerError)
return
}
}
ns := make([]string, len(nodes.Addrs))
for i, n := range nodes.Addrs {
ns[i] = n.Hex()
}
var next string
if nodes.Next != nil {
next = nodes.Next.Hex()
}
data, err := json.Marshal(NodesResponse{
Nodes: ns,
Next: next,
})
if err != nil {
log.Error("chunk explorer: nodes handler", "err", err)
jsonStatusResponse(w, http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", jsonContentType)
_, err = io.Copy(w, bytes.NewReader(data))
if err != nil {
log.Error("chunk explorer: nodes handler: write response", "err", err)
}
}
}
// parseHasKeyPath extracts address and key from HTTP request
// path for HasKey route: /api/has-key/{node}/{key}.
// If ok is false, the provided path is not matched.
func parseHasKeyPath(p string) (addr common.Address, key []byte, ok bool) {
p = strings.TrimPrefix(p, "/api/has-key/")
parts := strings.SplitN(p, "/", 2)
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
return addr, nil, false
}
addr = common.HexToAddress(parts[0])
key = common.Hex2Bytes(parts[1])
return addr, key, true
}
// listingPage returns start value and listing limit
// from url query values.
func listingPage(q url.Values) (start string, limit int) {
// if limit is not a valid integer (or blank string),
// ignore the error and use the returned 0 value
limit, _ = strconv.Atoi(q.Get("limit"))
return q.Get("start"), limit
}
// StatusResponse is a standardized JSON-encoded response
// that contains information about HTTP response code
// for easier status identification.
type StatusResponse struct {
Message string `json:"message"`
Code int `json:"code"`
}
// jsonStatusResponse writes to the response writer
// JSON-encoded StatusResponse based on the provided status code.
func jsonStatusResponse(w http.ResponseWriter, code int) {
w.Header().Set("Content-Type", jsonContentType)
w.WriteHeader(code)
err := json.NewEncoder(w).Encode(StatusResponse{
Message: http.StatusText(code),
Code: code,
})
if err != nil {
log.Error("chunk explorer: json status response", "err", err)
}
}
// noCacheHandler sets required HTTP headers to prevent
// response caching at the client side.
func noCacheHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
h.ServeHTTP(w, r)
})
}

View File

@ -0,0 +1,471 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package explorer
import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"sort"
"strconv"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/swarm/storage/mock"
"github.com/ethersphere/swarm/storage/mock/db"
"github.com/ethersphere/swarm/storage/mock/mem"
)
// TestHandler_memGlobalStore runs a set of tests
// to validate handler with mem global store.
func TestHandler_memGlobalStore(t *testing.T) {
t.Parallel()
globalStore := mem.NewGlobalStore()
testHandler(t, globalStore)
}
// TestHandler_dbGlobalStore runs a set of tests
// to validate handler with database global store.
func TestHandler_dbGlobalStore(t *testing.T) {
t.Parallel()
dir, err := ioutil.TempDir("", "swarm-mock-explorer-db-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
globalStore, err := db.NewGlobalStore(dir)
if err != nil {
t.Fatal(err)
}
defer globalStore.Close()
testHandler(t, globalStore)
}
// testHandler stores data distributed by node addresses
// and validates if this data is correctly retrievable
// by using the http.Handler returned by NewHandler function.
// This test covers all HTTP routes and various get parameters
// on them to check paginated results.
func testHandler(t *testing.T, globalStore mock.GlobalStorer) {
const (
nodeCount = 350
keyCount = 250
keysOnNodeCount = 150
)
// keys for every node
nodeKeys := make(map[string][]string)
// a node address that is not present in global store
invalidAddr := "0x7b8b72938c254cf002c4e1e714d27e022be88d93"
// a key that is not present in global store
invalidKey := "f9824192fb515cfb"
for i := 1; i <= nodeCount; i++ {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(i))
addr := common.BytesToAddress(b).Hex()
nodeKeys[addr] = make([]string, 0)
}
for i := 1; i <= keyCount; i++ {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(i))
key := common.Bytes2Hex(b)
var c int
for addr := range nodeKeys {
nodeKeys[addr] = append(nodeKeys[addr], key)
c++
if c >= keysOnNodeCount {
break
}
}
}
// sort keys for every node as they are expected to be
// sorted in HTTP responses
for _, keys := range nodeKeys {
sort.Strings(keys)
}
// nodes for every key
keyNodes := make(map[string][]string)
// construct a reverse mapping of nodes for every key
for addr, keys := range nodeKeys {
for _, key := range keys {
keyNodes[key] = append(keyNodes[key], addr)
}
}
// sort node addresses with case insensitive sort,
// as hex letters in node addresses are in mixed caps
for _, addrs := range keyNodes {
sortCaseInsensitive(addrs)
}
// find a key that is not stored at the address
var (
unmatchedAddr string
unmatchedKey string
)
for addr, keys := range nodeKeys {
for key := range keyNodes {
var found bool
for _, k := range keys {
if k == key {
found = true
break
}
}
if !found {
unmatchedAddr = addr
unmatchedKey = key
}
break
}
if unmatchedAddr != "" {
break
}
}
// check if unmatched key/address pair is found
if unmatchedAddr == "" || unmatchedKey == "" {
t.Fatalf("could not find a key that is not associated with a node")
}
// store the data
for addr, keys := range nodeKeys {
for _, key := range keys {
err := globalStore.Put(common.HexToAddress(addr), common.Hex2Bytes(key), []byte("data"))
if err != nil {
t.Fatal(err)
}
}
}
handler := NewHandler(globalStore, nil)
// this subtest confirms that it has uploaded key and that it does not have invalid keys
t.Run("has key", func(t *testing.T) {
for addr, keys := range nodeKeys {
for _, key := range keys {
testStatusResponse(t, handler, "/api/has-key/"+addr+"/"+key, http.StatusOK)
testStatusResponse(t, handler, "/api/has-key/"+invalidAddr+"/"+key, http.StatusNotFound)
}
testStatusResponse(t, handler, "/api/has-key/"+addr+"/"+invalidKey, http.StatusNotFound)
}
testStatusResponse(t, handler, "/api/has-key/"+invalidAddr+"/"+invalidKey, http.StatusNotFound)
testStatusResponse(t, handler, "/api/has-key/"+unmatchedAddr+"/"+unmatchedKey, http.StatusNotFound)
})
// this subtest confirms that all keys are are listed in correct order with expected pagination
t.Run("keys", func(t *testing.T) {
var allKeys []string
for key := range keyNodes {
allKeys = append(allKeys, key)
}
sort.Strings(allKeys)
t.Run("limit 0", testKeys(handler, allKeys, 0, ""))
t.Run("limit default", testKeys(handler, allKeys, mock.DefaultLimit, ""))
t.Run("limit 2x default", testKeys(handler, allKeys, 2*mock.DefaultLimit, ""))
t.Run("limit 0.5x default", testKeys(handler, allKeys, mock.DefaultLimit/2, ""))
t.Run("limit max", testKeys(handler, allKeys, mock.MaxLimit, ""))
t.Run("limit 2x max", testKeys(handler, allKeys, 2*mock.MaxLimit, ""))
t.Run("limit negative", testKeys(handler, allKeys, -10, ""))
})
// this subtest confirms that all keys are are listed for every node in correct order
// and that for one node different pagination options are correct
t.Run("node keys", func(t *testing.T) {
var limitCheckAddr string
for addr, keys := range nodeKeys {
testKeys(handler, keys, 0, addr)(t)
if limitCheckAddr == "" {
limitCheckAddr = addr
}
}
testKeys(handler, nil, 0, invalidAddr)(t)
limitCheckKeys := nodeKeys[limitCheckAddr]
t.Run("limit 0", testKeys(handler, limitCheckKeys, 0, limitCheckAddr))
t.Run("limit default", testKeys(handler, limitCheckKeys, mock.DefaultLimit, limitCheckAddr))
t.Run("limit 2x default", testKeys(handler, limitCheckKeys, 2*mock.DefaultLimit, limitCheckAddr))
t.Run("limit 0.5x default", testKeys(handler, limitCheckKeys, mock.DefaultLimit/2, limitCheckAddr))
t.Run("limit max", testKeys(handler, limitCheckKeys, mock.MaxLimit, limitCheckAddr))
t.Run("limit 2x max", testKeys(handler, limitCheckKeys, 2*mock.MaxLimit, limitCheckAddr))
t.Run("limit negative", testKeys(handler, limitCheckKeys, -10, limitCheckAddr))
})
// this subtest confirms that all nodes are are listed in correct order with expected pagination
t.Run("nodes", func(t *testing.T) {
var allNodes []string
for addr := range nodeKeys {
allNodes = append(allNodes, addr)
}
sortCaseInsensitive(allNodes)
t.Run("limit 0", testNodes(handler, allNodes, 0, ""))
t.Run("limit default", testNodes(handler, allNodes, mock.DefaultLimit, ""))
t.Run("limit 2x default", testNodes(handler, allNodes, 2*mock.DefaultLimit, ""))
t.Run("limit 0.5x default", testNodes(handler, allNodes, mock.DefaultLimit/2, ""))
t.Run("limit max", testNodes(handler, allNodes, mock.MaxLimit, ""))
t.Run("limit 2x max", testNodes(handler, allNodes, 2*mock.MaxLimit, ""))
t.Run("limit negative", testNodes(handler, allNodes, -10, ""))
})
// this subtest confirms that all nodes are are listed that contain a a particular key in correct order
// and that for one key different node pagination options are correct
t.Run("key nodes", func(t *testing.T) {
var limitCheckKey string
for key, addrs := range keyNodes {
testNodes(handler, addrs, 0, key)(t)
if limitCheckKey == "" {
limitCheckKey = key
}
}
testNodes(handler, nil, 0, invalidKey)(t)
limitCheckKeys := keyNodes[limitCheckKey]
t.Run("limit 0", testNodes(handler, limitCheckKeys, 0, limitCheckKey))
t.Run("limit default", testNodes(handler, limitCheckKeys, mock.DefaultLimit, limitCheckKey))
t.Run("limit 2x default", testNodes(handler, limitCheckKeys, 2*mock.DefaultLimit, limitCheckKey))
t.Run("limit 0.5x default", testNodes(handler, limitCheckKeys, mock.DefaultLimit/2, limitCheckKey))
t.Run("limit max", testNodes(handler, limitCheckKeys, mock.MaxLimit, limitCheckKey))
t.Run("limit 2x max", testNodes(handler, limitCheckKeys, 2*mock.MaxLimit, limitCheckKey))
t.Run("limit negative", testNodes(handler, limitCheckKeys, -10, limitCheckKey))
})
}
// testsKeys returns a test function that validates wantKeys against a series of /api/keys
// HTTP responses with provided limit and node options.
func testKeys(handler http.Handler, wantKeys []string, limit int, node string) func(t *testing.T) {
return func(t *testing.T) {
t.Helper()
wantLimit := limit
if wantLimit <= 0 {
wantLimit = mock.DefaultLimit
}
if wantLimit > mock.MaxLimit {
wantLimit = mock.MaxLimit
}
wantKeysLen := len(wantKeys)
var i int
var startKey string
for {
var wantNext string
start := i * wantLimit
end := (i + 1) * wantLimit
if end < wantKeysLen {
wantNext = wantKeys[end]
} else {
end = wantKeysLen
}
testKeysResponse(t, handler, node, startKey, limit, KeysResponse{
Keys: wantKeys[start:end],
Next: wantNext,
})
if wantNext == "" {
break
}
startKey = wantNext
i++
}
}
}
// testNodes returns a test function that validates wantAddrs against a series of /api/nodes
// HTTP responses with provided limit and key options.
func testNodes(handler http.Handler, wantAddrs []string, limit int, key string) func(t *testing.T) {
return func(t *testing.T) {
t.Helper()
wantLimit := limit
if wantLimit <= 0 {
wantLimit = mock.DefaultLimit
}
if wantLimit > mock.MaxLimit {
wantLimit = mock.MaxLimit
}
wantAddrsLen := len(wantAddrs)
var i int
var startKey string
for {
var wantNext string
start := i * wantLimit
end := (i + 1) * wantLimit
if end < wantAddrsLen {
wantNext = wantAddrs[end]
} else {
end = wantAddrsLen
}
testNodesResponse(t, handler, key, startKey, limit, NodesResponse{
Nodes: wantAddrs[start:end],
Next: wantNext,
})
if wantNext == "" {
break
}
startKey = wantNext
i++
}
}
}
// testStatusResponse validates a response made on url if it matches
// the expected StatusResponse.
func testStatusResponse(t *testing.T, handler http.Handler, url string, code int) {
t.Helper()
resp := httpGet(t, handler, url)
if resp.StatusCode != code {
t.Errorf("got status code %v, want %v", resp.StatusCode, code)
}
if got := resp.Header.Get("Content-Type"); got != jsonContentType {
t.Errorf("got Content-Type header %q, want %q", got, jsonContentType)
}
var r StatusResponse
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
t.Fatal(err)
}
if r.Code != code {
t.Errorf("got response code %v, want %v", r.Code, code)
}
if r.Message != http.StatusText(code) {
t.Errorf("got response message %q, want %q", r.Message, http.StatusText(code))
}
}
// testKeysResponse validates response returned from handler on /api/keys
// with node, start and limit options against KeysResponse.
func testKeysResponse(t *testing.T, handler http.Handler, node, start string, limit int, want KeysResponse) {
t.Helper()
u, err := url.Parse("/api/keys")
if err != nil {
t.Fatal(err)
}
q := u.Query()
if node != "" {
q.Set("node", node)
}
if start != "" {
q.Set("start", start)
}
if limit != 0 {
q.Set("limit", strconv.Itoa(limit))
}
u.RawQuery = q.Encode()
resp := httpGet(t, handler, u.String())
if resp.StatusCode != http.StatusOK {
t.Errorf("got status code %v, want %v", resp.StatusCode, http.StatusOK)
}
if got := resp.Header.Get("Content-Type"); got != jsonContentType {
t.Errorf("got Content-Type header %q, want %q", got, jsonContentType)
}
var r KeysResponse
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
t.Fatal(err)
}
if fmt.Sprint(r.Keys) != fmt.Sprint(want.Keys) {
t.Errorf("got keys %v, want %v", r.Keys, want.Keys)
}
if r.Next != want.Next {
t.Errorf("got next %s, want %s", r.Next, want.Next)
}
}
// testNodesResponse validates response returned from handler on /api/nodes
// with key, start and limit options against NodesResponse.
func testNodesResponse(t *testing.T, handler http.Handler, key, start string, limit int, want NodesResponse) {
t.Helper()
u, err := url.Parse("/api/nodes")
if err != nil {
t.Fatal(err)
}
q := u.Query()
if key != "" {
q.Set("key", key)
}
if start != "" {
q.Set("start", start)
}
if limit != 0 {
q.Set("limit", strconv.Itoa(limit))
}
u.RawQuery = q.Encode()
resp := httpGet(t, handler, u.String())
if resp.StatusCode != http.StatusOK {
t.Errorf("got status code %v, want %v", resp.StatusCode, http.StatusOK)
}
if got := resp.Header.Get("Content-Type"); got != jsonContentType {
t.Errorf("got Content-Type header %q, want %q", got, jsonContentType)
}
var r NodesResponse
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
t.Fatal(err)
}
if fmt.Sprint(r.Nodes) != fmt.Sprint(want.Nodes) {
t.Errorf("got nodes %v, want %v", r.Nodes, want.Nodes)
}
if r.Next != want.Next {
t.Errorf("got next %s, want %s", r.Next, want.Next)
}
}
// httpGet uses httptest recorder to provide a response on handler's url.
func httpGet(t *testing.T, handler http.Handler, url string) (r *http.Response) {
t.Helper()
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
t.Fatal(err)
}
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
return w.Result()
}
// sortCaseInsensitive performs a case insensitive sort on a string slice.
func sortCaseInsensitive(s []string) {
sort.Slice(s, func(i, j int) bool {
return strings.ToLower(s[i]) < strings.ToLower(s[j])
})
}

View File

@ -0,0 +1,163 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package explorer
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/ethersphere/swarm/storage/mock/mem"
)
// TestHandler_CORSOrigin validates that the correct Access-Control-Allow-Origin
// header is served with various allowed origin settings.
func TestHandler_CORSOrigin(t *testing.T) {
notAllowedOrigin := "http://not-allowed-origin.com/"
for _, tc := range []struct {
name string
origins []string
}{
{
name: "no origin",
origins: nil,
},
{
name: "single origin",
origins: []string{"http://localhost/"},
},
{
name: "multiple origins",
origins: []string{"http://localhost/", "http://ethereum.org/"},
},
} {
t.Run(tc.name, func(t *testing.T) {
handler := NewHandler(mem.NewGlobalStore(), tc.origins)
origins := tc.origins
if origins == nil {
// handle the "no origin" test case
origins = []string{""}
}
for _, origin := range origins {
t.Run(fmt.Sprintf("get %q", origin), newTestCORSOrigin(handler, origin, origin))
t.Run(fmt.Sprintf("preflight %q", origin), newTestCORSPreflight(handler, origin, origin))
}
t.Run(fmt.Sprintf("get %q", notAllowedOrigin), newTestCORSOrigin(handler, notAllowedOrigin, ""))
t.Run(fmt.Sprintf("preflight %q", notAllowedOrigin), newTestCORSPreflight(handler, notAllowedOrigin, ""))
})
}
t.Run("wildcard", func(t *testing.T) {
handler := NewHandler(mem.NewGlobalStore(), []string{"*"})
for _, origin := range []string{
"http://example.com/",
"http://ethereum.org",
"http://localhost",
} {
t.Run(fmt.Sprintf("get %q", origin), newTestCORSOrigin(handler, origin, origin))
t.Run(fmt.Sprintf("preflight %q", origin), newTestCORSPreflight(handler, origin, origin))
}
})
}
// newTestCORSOrigin returns a test function that validates if wantOrigin CORS header is
// served by the handler for a GET request.
func newTestCORSOrigin(handler http.Handler, origin, wantOrigin string) func(t *testing.T) {
return func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "/", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Origin", origin)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
resp := w.Result()
header := resp.Header.Get("Access-Control-Allow-Origin")
if header != wantOrigin {
t.Errorf("got Access-Control-Allow-Origin header %q, want %q", header, wantOrigin)
}
}
}
// newTestCORSPreflight returns a test function that validates if wantOrigin CORS header is
// served by the handler for an OPTIONS CORS preflight request.
func newTestCORSPreflight(handler http.Handler, origin, wantOrigin string) func(t *testing.T) {
return func(t *testing.T) {
req, err := http.NewRequest(http.MethodOptions, "/", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Origin", origin)
req.Header.Set("Access-Control-Request-Method", "GET")
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
resp := w.Result()
header := resp.Header.Get("Access-Control-Allow-Origin")
if header != wantOrigin {
t.Errorf("got Access-Control-Allow-Origin header %q, want %q", header, wantOrigin)
}
}
}
// TestHandler_noCacheHeaders validates that no cache headers are server.
func TestHandler_noCacheHeaders(t *testing.T) {
handler := NewHandler(mem.NewGlobalStore(), nil)
for _, tc := range []struct {
url string
}{
{
url: "/",
},
{
url: "/api/nodes",
},
{
url: "/api/keys",
},
} {
req, err := http.NewRequest(http.MethodGet, tc.url, nil)
if err != nil {
t.Fatal(err)
}
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
resp := w.Result()
for header, want := range map[string]string{
"Cache-Control": "no-cache, no-store, must-revalidate",
"Pragma": "no-cache",
"Expires": "0",
} {
got := resp.Header.Get(header)
if got != want {
t.Errorf("got %q header %q for url %q, want %q", header, tc.url, got, want)
}
}
}
}

View File

@ -0,0 +1,176 @@
swagger: '2.0'
info:
title: Swarm Global Store API
version: 0.1.0
tags:
- name: Has Key
description: Checks if a Key is stored on a Node
- name: Keys
description: Lists Keys
- name: Nodes
description: Lists Node addresses
paths:
'/api/has-key/{node}/{key}':
get:
tags:
- Has Key
summary: Checks if a Key is stored on a Node
operationId: hasKey
produces:
- application/json
parameters:
- name: node
in: path
required: true
type: string
format: hex-endoded
description: Node address.
- name: key
in: path
required: true
type: string
format: hex-endoded
description: Key.
responses:
'200':
description: Key is stored on Node
schema:
$ref: '#/definitions/Status'
'404':
description: Key is not stored on Node
schema:
$ref: '#/definitions/Status'
'500':
description: Internal Server Error
schema:
$ref: '#/definitions/Status'
'/api/keys':
get:
tags:
- Keys
summary: Lists Keys
operationId: keys
produces:
- application/json
parameters:
- name: start
in: query
required: false
type: string
format: hex-encoded Key
description: A Key as the starting point for the returned list. It is usually a value from the returned "next" field in the Keys repsonse.
- name: limit
in: query
required: false
type: integer
default: 100
minimum: 1
maximum: 1000
description: Limits the number of Keys returned in on response.
- name: node
in: query
required: false
type: string
format: hex-encoded Node address
description: If this parameter is provided, only Keys that are stored on this Node be returned in the response. If not, all known Keys will be returned.
responses:
'200':
description: List of Keys
schema:
$ref: '#/definitions/Keys'
'500':
description: Internal Server Error
schema:
$ref: '#/definitions/Status'
'/api/nodes':
get:
tags:
- Nodes
summary: Lists Node addresses
operationId: nodes
produces:
- application/json
parameters:
- name: start
in: query
required: false
type: string
format: hex-encoded Node address
description: A Node address as the starting point for the returned list. It is usually a value from the returned "next" field in the Nodes repsonse.
- name: limit
in: query
required: false
type: integer
default: 100
minimum: 1
maximum: 1000
description: Limits the number of Node addresses returned in on response.
- name: key
in: query
required: false
type: string
format: hex-encoded Key
description: If this parameter is provided, only addresses of Nodes that store this Key will be returned in the response. If not, all known Node addresses will be returned.
responses:
'200':
description: List of Node addresses
schema:
$ref: '#/definitions/Nodes'
'500':
description: Internal Server Error
schema:
$ref: '#/definitions/Status'
definitions:
Status:
type: object
properties:
message:
type: string
description: HTTP Status Code name.
code:
type: integer
description: HTTP Status Code.
Keys:
type: object
properties:
keys:
type: array
description: A list of Keys.
items:
type: string
format: hex-encoded Key
next:
type: string
format: hex-encoded Key
description: If present, the next Key in listing. Can be passed as "start" query parameter to continue the listing. If not present, the end of the listing is reached.
Nodes:
type: object
properties:
nodes:
type: array
description: A list of Node addresses.
items:
type: string
format: hex-encoded Node address
next:
type: string
format: hex-encoded Node address
description: If present, the next Node address in listing. Can be passed as "start" query parameter to continue the listing. If not present, the end of the listing is reached.

385
storage/mock/mem/mem.go Normal file
View File

@ -0,0 +1,385 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package mem implements a mock store that keeps all chunk data in memory.
// While it can be used for testing on smaller scales, the main purpose of this
// package is to provide the simplest reference implementation of a mock store.
package mem
import (
"archive/tar"
"bytes"
"encoding/json"
"io"
"io/ioutil"
"sort"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/swarm/storage/mock"
)
// GlobalStore stores all chunk data and also keys and node addresses relations.
// It implements mock.GlobalStore interface.
type GlobalStore struct {
// holds a slice of keys per node
nodeKeys map[common.Address][][]byte
// holds which key is stored on which nodes
keyNodes map[string][]common.Address
// all node addresses
nodes []common.Address
// all keys
keys [][]byte
// all keys data
data map[string][]byte
mu sync.RWMutex
}
// NewGlobalStore creates a new instance of GlobalStore.
func NewGlobalStore() *GlobalStore {
return &GlobalStore{
nodeKeys: make(map[common.Address][][]byte),
keyNodes: make(map[string][]common.Address),
nodes: make([]common.Address, 0),
keys: make([][]byte, 0),
data: make(map[string][]byte),
}
}
// NewNodeStore returns a new instance of NodeStore that retrieves and stores
// chunk data only for a node with address addr.
func (s *GlobalStore) NewNodeStore(addr common.Address) *mock.NodeStore {
return mock.NewNodeStore(addr, s)
}
// Get returns chunk data if the chunk with key exists for node
// on address addr.
func (s *GlobalStore) Get(addr common.Address, key []byte) (data []byte, err error) {
s.mu.RLock()
defer s.mu.RUnlock()
if _, has := s.nodeKeyIndex(addr, key); !has {
return nil, mock.ErrNotFound
}
data, ok := s.data[string(key)]
if !ok {
return nil, mock.ErrNotFound
}
return data, nil
}
// Put saves the chunk data for node with address addr.
func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
s.mu.Lock()
defer s.mu.Unlock()
if i, found := s.nodeKeyIndex(addr, key); !found {
s.nodeKeys[addr] = append(s.nodeKeys[addr], nil)
copy(s.nodeKeys[addr][i+1:], s.nodeKeys[addr][i:])
s.nodeKeys[addr][i] = key
}
if i, found := s.keyNodeIndex(key, addr); !found {
k := string(key)
s.keyNodes[k] = append(s.keyNodes[k], addr)
copy(s.keyNodes[k][i+1:], s.keyNodes[k][i:])
s.keyNodes[k][i] = addr
}
if i, found := s.nodeIndex(addr); !found {
s.nodes = append(s.nodes, addr)
copy(s.nodes[i+1:], s.nodes[i:])
s.nodes[i] = addr
}
if i, found := s.keyIndex(key); !found {
s.keys = append(s.keys, nil)
copy(s.keys[i+1:], s.keys[i:])
s.keys[i] = key
}
s.data[string(key)] = data
return nil
}
// Delete removes the chunk data for node with address addr.
func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
s.mu.Lock()
defer s.mu.Unlock()
if i, has := s.nodeKeyIndex(addr, key); has {
s.nodeKeys[addr] = append(s.nodeKeys[addr][:i], s.nodeKeys[addr][i+1:]...)
}
k := string(key)
if i, on := s.keyNodeIndex(key, addr); on {
s.keyNodes[k] = append(s.keyNodes[k][:i], s.keyNodes[k][i+1:]...)
}
if len(s.nodeKeys[addr]) == 0 {
if i, found := s.nodeIndex(addr); found {
s.nodes = append(s.nodes[:i], s.nodes[i+1:]...)
}
}
if len(s.keyNodes[k]) == 0 {
if i, found := s.keyIndex(key); found {
s.keys = append(s.keys[:i], s.keys[i+1:]...)
}
}
return nil
}
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) (yes bool) {
s.mu.RLock()
defer s.mu.RUnlock()
_, yes = s.nodeKeyIndex(addr, key)
return yes
}
// keyIndex returns the index of a key in keys slice.
func (s *GlobalStore) keyIndex(key []byte) (index int, found bool) {
l := len(s.keys)
index = sort.Search(l, func(i int) bool {
return bytes.Compare(s.keys[i], key) >= 0
})
found = index < l && bytes.Equal(s.keys[index], key)
return index, found
}
// nodeIndex returns the index of a node address in nodes slice.
func (s *GlobalStore) nodeIndex(addr common.Address) (index int, found bool) {
l := len(s.nodes)
index = sort.Search(l, func(i int) bool {
return bytes.Compare(s.nodes[i][:], addr[:]) >= 0
})
found = index < l && bytes.Equal(s.nodes[index][:], addr[:])
return index, found
}
// nodeKeyIndex returns the index of a key in nodeKeys slice.
func (s *GlobalStore) nodeKeyIndex(addr common.Address, key []byte) (index int, found bool) {
l := len(s.nodeKeys[addr])
index = sort.Search(l, func(i int) bool {
return bytes.Compare(s.nodeKeys[addr][i], key) >= 0
})
found = index < l && bytes.Equal(s.nodeKeys[addr][index], key)
return index, found
}
// keyNodeIndex returns the index of a node address in keyNodes slice.
func (s *GlobalStore) keyNodeIndex(key []byte, addr common.Address) (index int, found bool) {
k := string(key)
l := len(s.keyNodes[k])
index = sort.Search(l, func(i int) bool {
return bytes.Compare(s.keyNodes[k][i][:], addr[:]) >= 0
})
found = index < l && s.keyNodes[k][index] == addr
return index, found
}
// Keys returns a paginated list of keys on all nodes.
func (s *GlobalStore) Keys(startKey []byte, limit int) (keys mock.Keys, err error) {
s.mu.RLock()
defer s.mu.RUnlock()
var i int
if startKey != nil {
i, _ = s.keyIndex(startKey)
}
total := len(s.keys)
max := maxIndex(i, limit, total)
keys.Keys = make([][]byte, 0, max-i)
for ; i < max; i++ {
keys.Keys = append(keys.Keys, append([]byte(nil), s.keys[i]...))
}
if total > max {
keys.Next = s.keys[max]
}
return keys, nil
}
// Nodes returns a paginated list of all known nodes.
func (s *GlobalStore) Nodes(startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
s.mu.RLock()
defer s.mu.RUnlock()
var i int
if startAddr != nil {
i, _ = s.nodeIndex(*startAddr)
}
total := len(s.nodes)
max := maxIndex(i, limit, total)
nodes.Addrs = make([]common.Address, 0, max-i)
for ; i < max; i++ {
nodes.Addrs = append(nodes.Addrs, s.nodes[i])
}
if total > max {
nodes.Next = &s.nodes[max]
}
return nodes, nil
}
// NodeKeys returns a paginated list of keys on a node with provided address.
func (s *GlobalStore) NodeKeys(addr common.Address, startKey []byte, limit int) (keys mock.Keys, err error) {
s.mu.RLock()
defer s.mu.RUnlock()
var i int
if startKey != nil {
i, _ = s.nodeKeyIndex(addr, startKey)
}
total := len(s.nodeKeys[addr])
max := maxIndex(i, limit, total)
keys.Keys = make([][]byte, 0, max-i)
for ; i < max; i++ {
keys.Keys = append(keys.Keys, append([]byte(nil), s.nodeKeys[addr][i]...))
}
if total > max {
keys.Next = s.nodeKeys[addr][max]
}
return keys, nil
}
// KeyNodes returns a paginated list of nodes that contain a particular key.
func (s *GlobalStore) KeyNodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
s.mu.RLock()
defer s.mu.RUnlock()
var i int
if startAddr != nil {
i, _ = s.keyNodeIndex(key, *startAddr)
}
total := len(s.keyNodes[string(key)])
max := maxIndex(i, limit, total)
nodes.Addrs = make([]common.Address, 0, max-i)
for ; i < max; i++ {
nodes.Addrs = append(nodes.Addrs, s.keyNodes[string(key)][i])
}
if total > max {
nodes.Next = &s.keyNodes[string(key)][max]
}
return nodes, nil
}
// maxIndex returns the end index for one page listing
// based on the start index, limit and total number of elements.
func maxIndex(start, limit, total int) (max int) {
if limit <= 0 {
limit = mock.DefaultLimit
}
if limit > mock.MaxLimit {
limit = mock.MaxLimit
}
max = total
if start+limit < max {
max = start + limit
}
return max
}
// Import reads tar archive from a reader that contains exported chunk data.
// It returns the number of chunks imported and an error.
func (s *GlobalStore) Import(r io.Reader) (n int, err error) {
s.mu.Lock()
defer s.mu.Unlock()
tr := tar.NewReader(r)
for {
hdr, err := tr.Next()
if err != nil {
if err == io.EOF {
break
}
return n, err
}
data, err := ioutil.ReadAll(tr)
if err != nil {
return n, err
}
var c mock.ExportedChunk
if err = json.Unmarshal(data, &c); err != nil {
return n, err
}
key := common.Hex2Bytes(hdr.Name)
s.keyNodes[string(key)] = c.Addrs
for _, addr := range c.Addrs {
if i, has := s.nodeKeyIndex(addr, key); !has {
s.nodeKeys[addr] = append(s.nodeKeys[addr], nil)
copy(s.nodeKeys[addr][i+1:], s.nodeKeys[addr][i:])
s.nodeKeys[addr][i] = key
}
if i, found := s.nodeIndex(addr); !found {
s.nodes = append(s.nodes, addr)
copy(s.nodes[i+1:], s.nodes[i:])
s.nodes[i] = addr
}
}
if i, found := s.keyIndex(key); !found {
s.keys = append(s.keys, nil)
copy(s.keys[i+1:], s.keys[i:])
s.keys[i] = key
}
s.data[string(key)] = c.Data
n++
}
return n, err
}
// Export writes to a writer a tar archive with all chunk data from
// the store. It returns the number of chunks exported and an error.
func (s *GlobalStore) Export(w io.Writer) (n int, err error) {
s.mu.RLock()
defer s.mu.RUnlock()
tw := tar.NewWriter(w)
defer tw.Close()
buf := bytes.NewBuffer(make([]byte, 0, 1024))
encoder := json.NewEncoder(buf)
for key, addrs := range s.keyNodes {
buf.Reset()
if err = encoder.Encode(mock.ExportedChunk{
Addrs: addrs,
Data: s.data[key],
}); err != nil {
return n, err
}
data := buf.Bytes()
hdr := &tar.Header{
Name: common.Bytes2Hex([]byte(key)),
Mode: 0644,
Size: int64(len(data)),
}
if err := tw.WriteHeader(hdr); err != nil {
return n, err
}
if _, err := tw.Write(data); err != nil {
return n, err
}
n++
}
return n, err
}

View File

@ -0,0 +1,42 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package mem
import (
"testing"
"github.com/ethersphere/swarm/storage/mock/test"
)
// TestGlobalStore is running test for a GlobalStore
// using test.MockStore function.
func TestGlobalStore(t *testing.T) {
test.MockStore(t, NewGlobalStore(), 100)
}
// TestGlobalStoreListings is running test for a GlobalStore
// using test.MockStoreListings function.
func TestGlobalStoreListings(t *testing.T) {
test.MockStoreListings(t, NewGlobalStore(), 1000)
}
// TestImportExport is running tests for importing and
// exporting data between two GlobalStores
// using test.ImportExport function.
func TestImportExport(t *testing.T) {
test.ImportExport(t, NewGlobalStore(), NewGlobalStore(), 100)
}

142
storage/mock/mock.go Normal file
View File

@ -0,0 +1,142 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package mock defines types that are used by different implementations
// of mock storages.
//
// Implementations of mock storages are located in directories
// under this package:
//
// - db - LevelDB backend
// - mem - in memory map backend
// - rpc - RPC client that can connect to other backends
//
// Mock storages can implement Importer and Exporter interfaces
// for importing and exporting all chunk data that they contain.
// The exported file is a tar archive with all files named by
// hexadecimal representations of chunk keys and with content
// with JSON-encoded ExportedChunk structure. Exported format
// should be preserved across all mock store implementations.
package mock
import (
"errors"
"io"
"github.com/ethereum/go-ethereum/common"
)
const (
// DefaultLimit should be used as default limit for
// Keys, Nodes, NodeKeys and KeyNodes GlobarStorer
// methids implementations.
DefaultLimit = 100
// MaxLimit should be used as the maximal returned number
// of items for Keys, Nodes, NodeKeys and KeyNodes GlobarStorer
// methids implementations, regardless of provided limit.
MaxLimit = 1000
)
// ErrNotFound indicates that the chunk is not found.
var ErrNotFound = errors.New("not found")
// NodeStore holds the node address and a reference to the GlobalStore
// in order to access and store chunk data only for one node.
type NodeStore struct {
store GlobalStorer
addr common.Address
}
// NewNodeStore creates a new instance of NodeStore that keeps
// chunk data using GlobalStorer with a provided address.
func NewNodeStore(addr common.Address, store GlobalStorer) *NodeStore {
return &NodeStore{
store: store,
addr: addr,
}
}
// Get returns chunk data for a key for a node that has the address
// provided on NodeStore initialization.
func (n *NodeStore) Get(key []byte) (data []byte, err error) {
return n.store.Get(n.addr, key)
}
// Put saves chunk data for a key for a node that has the address
// provided on NodeStore initialization.
func (n *NodeStore) Put(key []byte, data []byte) error {
return n.store.Put(n.addr, key, data)
}
// Delete removes chunk data for a key for a node that has the address
// provided on NodeStore initialization.
func (n *NodeStore) Delete(key []byte) error {
return n.store.Delete(n.addr, key)
}
func (n *NodeStore) Keys(startKey []byte, limit int) (keys Keys, err error) {
return n.store.NodeKeys(n.addr, startKey, limit)
}
// GlobalStorer defines methods for mock db store
// that stores chunk data for all swarm nodes.
// It is used in tests to construct mock NodeStores
// for swarm nodes and to track and validate chunks.
type GlobalStorer interface {
Get(addr common.Address, key []byte) (data []byte, err error)
Put(addr common.Address, key []byte, data []byte) error
Delete(addr common.Address, key []byte) error
HasKey(addr common.Address, key []byte) bool
Keys(startKey []byte, limit int) (keys Keys, err error)
Nodes(startAddr *common.Address, limit int) (nodes Nodes, err error)
NodeKeys(addr common.Address, startKey []byte, limit int) (keys Keys, err error)
KeyNodes(key []byte, startAddr *common.Address, limit int) (nodes Nodes, err error)
// NewNodeStore creates an instance of NodeStore
// to be used by a single swarm node with
// address addr.
NewNodeStore(addr common.Address) *NodeStore
}
// Keys are returned results by Keys and NodeKeys GlobalStorer methods.
type Keys struct {
Keys [][]byte
Next []byte
}
// Nodes are returned results by Nodes and KeyNodes GlobalStorer methods.
type Nodes struct {
Addrs []common.Address
Next *common.Address
}
// Importer defines method for importing mock store data
// from an exported tar archive.
type Importer interface {
Import(r io.Reader) (n int, err error)
}
// Exporter defines method for exporting mock store data
// to a tar archive.
type Exporter interface {
Export(w io.Writer) (n int, err error)
}
// ExportedChunk is the structure that is saved in tar archive for
// each chunk as JSON-encoded bytes.
type ExportedChunk struct {
Data []byte `json:"d"`
Addrs []common.Address `json:"a"`
}

114
storage/mock/rpc/rpc.go Normal file
View File

@ -0,0 +1,114 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package rpc implements an RPC client that connect to a centralized mock store.
// Centralazied mock store can be any other mock store implementation that is
// registered to Ethereum RPC server under mockStore name. Methods that defines
// mock.GlobalStore are the same that are used by RPC. Example:
//
// server := rpc.NewServer()
// server.RegisterName("mockStore", mem.NewGlobalStore())
package rpc
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/storage/mock"
)
// GlobalStore is rpc.Client that connects to a centralized mock store.
// Closing GlobalStore instance is required to release RPC client resources.
type GlobalStore struct {
client *rpc.Client
}
// NewGlobalStore creates a new instance of GlobalStore.
func NewGlobalStore(client *rpc.Client) *GlobalStore {
return &GlobalStore{
client: client,
}
}
// Close closes RPC client.
func (s *GlobalStore) Close() error {
s.client.Close()
return nil
}
// NewNodeStore returns a new instance of NodeStore that retrieves and stores
// chunk data only for a node with address addr.
func (s *GlobalStore) NewNodeStore(addr common.Address) *mock.NodeStore {
return mock.NewNodeStore(addr, s)
}
// Get calls a Get method to RPC server.
func (s *GlobalStore) Get(addr common.Address, key []byte) (data []byte, err error) {
err = s.client.Call(&data, "mockStore_get", addr, key)
if err != nil && err.Error() == "not found" {
// pass the mock package value of error instead an rpc error
return data, mock.ErrNotFound
}
return data, err
}
// Put calls a Put method to RPC server.
func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
err := s.client.Call(nil, "mockStore_put", addr, key, data)
return err
}
// Delete calls a Delete method to RPC server.
func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
err := s.client.Call(nil, "mockStore_delete", addr, key)
return err
}
// HasKey calls a HasKey method to RPC server.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
var has bool
if err := s.client.Call(&has, "mockStore_hasKey", addr, key); err != nil {
log.Error(fmt.Sprintf("mock store HasKey: addr %s, key %064x: %v", addr, key, err))
return false
}
return has
}
// Keys returns a paginated list of keys on all nodes.
func (s *GlobalStore) Keys(startKey []byte, limit int) (keys mock.Keys, err error) {
err = s.client.Call(&keys, "mockStore_keys", startKey, limit)
return keys, err
}
// Nodes returns a paginated list of all known nodes.
func (s *GlobalStore) Nodes(startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
err = s.client.Call(&nodes, "mockStore_nodes", startAddr, limit)
return nodes, err
}
// NodeKeys returns a paginated list of keys on a node with provided address.
func (s *GlobalStore) NodeKeys(addr common.Address, startKey []byte, limit int) (keys mock.Keys, err error) {
err = s.client.Call(&keys, "mockStore_nodeKeys", addr, startKey, limit)
return keys, err
}
// KeyNodes returns a paginated list of nodes that contain a particular key.
func (s *GlobalStore) KeyNodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
err = s.client.Call(&nodes, "mockStore_keyNodes", key, startAddr, limit)
return nodes, err
}

View File

@ -0,0 +1,64 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rpc
import (
"testing"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/swarm/storage/mock/mem"
"github.com/ethersphere/swarm/storage/mock/test"
)
// TestDBStore is running test for a GlobalStore
// using test.MockStore function.
func TestRPCStore(t *testing.T) {
store, cleanup := newTestStore(t)
defer cleanup()
test.MockStore(t, store, 30)
}
// TestRPCStoreListings is running test for a GlobalStore
// using test.MockStoreListings function.
func TestRPCStoreListings(t *testing.T) {
store, cleanup := newTestStore(t)
defer cleanup()
test.MockStoreListings(t, store, 1000)
}
// newTestStore creates a temporary GlobalStore
// that will be closed when returned cleanup function
// is called.
func newTestStore(t *testing.T) (s *GlobalStore, cleanup func()) {
t.Helper()
serverStore := mem.NewGlobalStore()
server := rpc.NewServer()
if err := server.RegisterName("mockStore", serverStore); err != nil {
t.Fatal(err)
}
store := NewGlobalStore(rpc.DialInProc(server))
return store, func() {
if err := store.Close(); err != nil {
t.Error(err)
}
}
}

362
storage/mock/test/test.go Normal file
View File

@ -0,0 +1,362 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package test provides functions that are used for testing
// GlobalStorer implementations.
package test
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/swarm/storage"
"github.com/ethersphere/swarm/storage/mock"
)
// MockStore creates NodeStore instances from provided GlobalStorer,
// each one with a unique address, stores different chunks on them
// and checks if they are retrievable or not on all nodes.
// Attribute n defines the number of NodeStores that will be created.
func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) {
t.Run("GlobalStore", func(t *testing.T) {
addrs := make([]common.Address, n)
for i := 0; i < n; i++ {
addrs[i] = common.HexToAddress(strconv.FormatInt(int64(i)+1, 16))
}
for i, addr := range addrs {
chunkAddr := storage.Address(append(addr[:], []byte(strconv.FormatInt(int64(i)+1, 16))...))
data := []byte(strconv.FormatInt(int64(i)+1, 16))
data = append(data, make([]byte, 4096-len(data))...)
globalStore.Put(addr, chunkAddr, data)
for _, cAddr := range addrs {
cData, err := globalStore.Get(cAddr, chunkAddr)
if cAddr == addr {
if err != nil {
t.Fatalf("get data from store %s key %s: %v", cAddr.Hex(), chunkAddr.Hex(), err)
}
if !bytes.Equal(data, cData) {
t.Fatalf("data on store %s: expected %x, got %x", cAddr.Hex(), data, cData)
}
if !globalStore.HasKey(cAddr, chunkAddr) {
t.Fatalf("expected key %s on global store for node %s, but it was not found", chunkAddr.Hex(), cAddr.Hex())
}
} else {
if err != mock.ErrNotFound {
t.Fatalf("expected error from store %s: %v, got %v", cAddr.Hex(), mock.ErrNotFound, err)
}
if len(cData) > 0 {
t.Fatalf("data on store %s: expected nil, got %x", cAddr.Hex(), cData)
}
if globalStore.HasKey(cAddr, chunkAddr) {
t.Fatalf("not expected key %s on global store for node %s, but it was found", chunkAddr.Hex(), cAddr.Hex())
}
}
}
}
t.Run("delete", func(t *testing.T) {
chunkAddr := storage.Address([]byte("1234567890abcd"))
for _, addr := range addrs {
err := globalStore.Put(addr, chunkAddr, []byte("data"))
if err != nil {
t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
}
}
firstNodeAddr := addrs[0]
if err := globalStore.Delete(firstNodeAddr, chunkAddr); err != nil {
t.Fatalf("delete from store %s key %s: %v", firstNodeAddr.Hex(), chunkAddr.Hex(), err)
}
for i, addr := range addrs {
_, err := globalStore.Get(addr, chunkAddr)
if i == 0 {
if err != mock.ErrNotFound {
t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err)
}
} else {
if err != nil {
t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
}
}
}
})
})
t.Run("NodeStore", func(t *testing.T) {
nodes := make(map[common.Address]*mock.NodeStore)
for i := 0; i < n; i++ {
addr := common.HexToAddress(strconv.FormatInt(int64(i)+1, 16))
nodes[addr] = globalStore.NewNodeStore(addr)
}
i := 0
for addr, store := range nodes {
i++
chunkAddr := storage.Address(append(addr[:], []byte(fmt.Sprintf("%x", i))...))
data := []byte(strconv.FormatInt(int64(i)+1, 16))
data = append(data, make([]byte, 4096-len(data))...)
store.Put(chunkAddr, data)
for cAddr, cStore := range nodes {
cData, err := cStore.Get(chunkAddr)
if cAddr == addr {
if err != nil {
t.Fatalf("get data from store %s key %s: %v", cAddr.Hex(), chunkAddr.Hex(), err)
}
if !bytes.Equal(data, cData) {
t.Fatalf("data on store %s: expected %x, got %x", cAddr.Hex(), data, cData)
}
if !globalStore.HasKey(cAddr, chunkAddr) {
t.Fatalf("expected key %s on global store for node %s, but it was not found", chunkAddr.Hex(), cAddr.Hex())
}
} else {
if err != mock.ErrNotFound {
t.Fatalf("expected error from store %s: %v, got %v", cAddr.Hex(), mock.ErrNotFound, err)
}
if len(cData) > 0 {
t.Fatalf("data on store %s: expected nil, got %x", cAddr.Hex(), cData)
}
if globalStore.HasKey(cAddr, chunkAddr) {
t.Fatalf("not expected key %s on global store for node %s, but it was found", chunkAddr.Hex(), cAddr.Hex())
}
}
}
}
t.Run("delete", func(t *testing.T) {
chunkAddr := storage.Address([]byte("1234567890abcd"))
var chosenStore *mock.NodeStore
for addr, store := range nodes {
if chosenStore == nil {
chosenStore = store
}
err := store.Put(chunkAddr, []byte("data"))
if err != nil {
t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
}
}
if err := chosenStore.Delete(chunkAddr); err != nil {
t.Fatalf("delete key %s: %v", chunkAddr.Hex(), err)
}
for addr, store := range nodes {
_, err := store.Get(chunkAddr)
if store == chosenStore {
if err != mock.ErrNotFound {
t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err)
}
} else {
if err != nil {
t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
}
}
}
})
})
}
// MockStoreListings tests global store methods Keys, Nodes, NodeKeys and KeyNodes.
// It uses a provided globalstore to put chunks for n number of node addresses
// and to validate that methods are returning the right responses.
func MockStoreListings(t *testing.T, globalStore mock.GlobalStorer, n int) {
addrs := make([]common.Address, n)
for i := 0; i < n; i++ {
addrs[i] = common.HexToAddress(strconv.FormatInt(int64(i)+1, 16))
}
type chunk struct {
key []byte
data []byte
}
const chunksPerNode = 5
keys := make([][]byte, n*chunksPerNode)
for i := 0; i < n*chunksPerNode; i++ {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(i))
keys[i] = b
}
// keep track of keys on every node
nodeKeys := make(map[common.Address][][]byte)
// keep track of nodes that store particular key
keyNodes := make(map[string][]common.Address)
for i := 0; i < chunksPerNode; i++ {
// put chunks for every address
for j := 0; j < n; j++ {
addr := addrs[j]
key := keys[(i*n)+j]
err := globalStore.Put(addr, key, []byte("data"))
if err != nil {
t.Fatal(err)
}
nodeKeys[addr] = append(nodeKeys[addr], key)
keyNodes[string(key)] = append(keyNodes[string(key)], addr)
}
// test Keys method
var startKey []byte
var gotKeys [][]byte
for {
keys, err := globalStore.Keys(startKey, 0)
if err != nil {
t.Fatal(err)
}
gotKeys = append(gotKeys, keys.Keys...)
if keys.Next == nil {
break
}
startKey = keys.Next
}
wantKeys := keys[:(i+1)*n]
if fmt.Sprint(gotKeys) != fmt.Sprint(wantKeys) {
t.Fatalf("got #%v keys %v, want %v", i+1, gotKeys, wantKeys)
}
// test Nodes method
var startNode *common.Address
var gotNodes []common.Address
for {
nodes, err := globalStore.Nodes(startNode, 0)
if err != nil {
t.Fatal(err)
}
gotNodes = append(gotNodes, nodes.Addrs...)
if nodes.Next == nil {
break
}
startNode = nodes.Next
}
wantNodes := addrs
if fmt.Sprint(gotNodes) != fmt.Sprint(wantNodes) {
t.Fatalf("got #%v nodes %v, want %v", i+1, gotNodes, wantNodes)
}
// test NodeKeys method
for addr, wantKeys := range nodeKeys {
var startKey []byte
var gotKeys [][]byte
for {
keys, err := globalStore.NodeKeys(addr, startKey, 0)
if err != nil {
t.Fatal(err)
}
gotKeys = append(gotKeys, keys.Keys...)
if keys.Next == nil {
break
}
startKey = keys.Next
}
if fmt.Sprint(gotKeys) != fmt.Sprint(wantKeys) {
t.Fatalf("got #%v %s node keys %v, want %v", i+1, addr.Hex(), gotKeys, wantKeys)
}
}
// test KeyNodes method
for key, wantNodes := range keyNodes {
var startNode *common.Address
var gotNodes []common.Address
for {
nodes, err := globalStore.KeyNodes([]byte(key), startNode, 0)
if err != nil {
t.Fatal(err)
}
gotNodes = append(gotNodes, nodes.Addrs...)
if nodes.Next == nil {
break
}
startNode = nodes.Next
}
if fmt.Sprint(gotNodes) != fmt.Sprint(wantNodes) {
t.Fatalf("got #%v %x key nodes %v, want %v", i+1, []byte(key), gotNodes, wantNodes)
}
}
}
}
// ImportExport saves chunks to the outStore, exports them to the tar archive,
// imports tar archive to the inStore and checks if all chunks are imported correctly.
func ImportExport(t *testing.T, outStore, inStore mock.GlobalStorer, n int) {
exporter, ok := outStore.(mock.Exporter)
if !ok {
t.Fatal("outStore does not implement mock.Exporter")
}
importer, ok := inStore.(mock.Importer)
if !ok {
t.Fatal("inStore does not implement mock.Importer")
}
addrs := make([]common.Address, n)
for i := 0; i < n; i++ {
addrs[i] = common.HexToAddress(strconv.FormatInt(int64(i)+1, 16))
}
for i, addr := range addrs {
chunkAddr := storage.Address(append(addr[:], []byte(strconv.FormatInt(int64(i)+1, 16))...))
data := []byte(strconv.FormatInt(int64(i)+1, 16))
data = append(data, make([]byte, 4096-len(data))...)
outStore.Put(addr, chunkAddr, data)
}
r, w := io.Pipe()
defer r.Close()
exportErrChan := make(chan error)
go func() {
defer w.Close()
_, err := exporter.Export(w)
exportErrChan <- err
}()
if _, err := importer.Import(r); err != nil {
t.Fatalf("import: %v", err)
}
if err := <-exportErrChan; err != nil {
t.Fatalf("export: %v", err)
}
for i, addr := range addrs {
chunkAddr := storage.Address(append(addr[:], []byte(strconv.FormatInt(int64(i)+1, 16))...))
data := []byte(strconv.FormatInt(int64(i)+1, 16))
data = append(data, make([]byte, 4096-len(data))...)
for _, cAddr := range addrs {
cData, err := inStore.Get(cAddr, chunkAddr)
if cAddr == addr {
if err != nil {
t.Fatalf("get data from store %s key %s: %v", cAddr.Hex(), chunkAddr.Hex(), err)
}
if !bytes.Equal(data, cData) {
t.Fatalf("data on store %s: expected %x, got %x", cAddr.Hex(), data, cData)
}
if !inStore.HasKey(cAddr, chunkAddr) {
t.Fatalf("expected key %s on global store for node %s, but it was not found", chunkAddr.Hex(), cAddr.Hex())
}
} else {
if err != mock.ErrNotFound {
t.Fatalf("expected error from store %s: %v, got %v", cAddr.Hex(), mock.ErrNotFound, err)
}
if len(cData) > 0 {
t.Fatalf("data on store %s: expected nil, got %x", cAddr.Hex(), cData)
}
if inStore.HasKey(cAddr, chunkAddr) {
t.Fatalf("not expected key %s on global store for node %s, but it was found", chunkAddr.Hex(), cAddr.Hex())
}
}
}
}
}