cmd/swarm, swarm: LocalStore storage integration
This commit is contained in:
committed by
Anton Evangelatov
parent
c94d582aa7
commit
996755c4a8
@ -252,15 +252,15 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
}
|
||||
|
||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
||||
currentConfig.LocalStoreParams.ChunkDbPath = storePath
|
||||
currentConfig.ChunkDbPath = storePath
|
||||
}
|
||||
|
||||
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
|
||||
currentConfig.DbCapacity = storeCapacity
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmStoreCacheCapacity.Name) {
|
||||
currentConfig.LocalStoreParams.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
|
||||
currentConfig.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmBootnodeModeFlag.Name) {
|
||||
|
@ -447,8 +447,8 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.LocalStoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
|
||||
if info.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.DbCapacity)
|
||||
}
|
||||
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
|
118
cmd/swarm/db.go
118
cmd/swarm/db.go
@ -17,6 +17,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -25,10 +29,22 @@ import (
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var legacyKeyIndex = byte(0)
|
||||
var keyData = byte(6)
|
||||
|
||||
type dpaDBIndex struct {
|
||||
Idx uint64
|
||||
Access uint64
|
||||
}
|
||||
|
||||
var dbCommand = cli.Command{
|
||||
Name: "db",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
@ -67,6 +83,9 @@ The import may be quite large, consider piping the input through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
|
||||
Flags: []cli.Flag{
|
||||
SwarmLegacyFlag,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -77,12 +96,6 @@ func dbExport(ctx *cli.Context) {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
|
||||
}
|
||||
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
var out io.Writer
|
||||
if args[1] == "-" {
|
||||
out = os.Stdout
|
||||
@ -95,6 +108,23 @@ func dbExport(ctx *cli.Context) {
|
||||
out = f
|
||||
}
|
||||
|
||||
isLegacy := localstore.IsLegacyDatabase(args[0])
|
||||
if isLegacy {
|
||||
count, err := exportLegacy(args[0], common.Hex2Bytes(args[2]), out)
|
||||
if err != nil {
|
||||
utils.Fatalf("error exporting legacy local chunk database: %s", err)
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("successfully exported %d chunks from legacy db", count))
|
||||
return
|
||||
}
|
||||
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
count, err := store.Export(out)
|
||||
if err != nil {
|
||||
utils.Fatalf("error exporting local chunk database: %s", err)
|
||||
@ -109,6 +139,8 @@ func dbImport(ctx *cli.Context) {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
|
||||
}
|
||||
|
||||
legacy := ctx.IsSet(SwarmLegacyFlag.Name)
|
||||
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
@ -127,7 +159,7 @@ func dbImport(ctx *cli.Context) {
|
||||
in = f
|
||||
}
|
||||
|
||||
count, err := store.Import(in)
|
||||
count, err := store.Import(in, legacy)
|
||||
if err != nil {
|
||||
utils.Fatalf("error importing local chunk database: %s", err)
|
||||
}
|
||||
@ -135,13 +167,73 @@ func dbImport(ctx *cli.Context) {
|
||||
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
|
||||
}
|
||||
|
||||
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
|
||||
func openLDBStore(path string, basekey []byte) (*localstore.DB, error) {
|
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||
}
|
||||
|
||||
storeparams := storage.NewDefaultStoreParams()
|
||||
ldbparams := storage.NewLDBStoreParams(storeparams, path)
|
||||
ldbparams.BaseKey = basekey
|
||||
return storage.NewLDBStore(ldbparams)
|
||||
return localstore.New(path, basekey, nil)
|
||||
}
|
||||
|
||||
func decodeIndex(data []byte, index *dpaDBIndex) error {
|
||||
dec := rlp.NewStream(bytes.NewReader(data), 0)
|
||||
return dec.Decode(index)
|
||||
}
|
||||
|
||||
func getDataKey(idx uint64, po uint8) []byte {
|
||||
key := make([]byte, 10)
|
||||
key[0] = keyData
|
||||
key[1] = po
|
||||
binary.BigEndian.PutUint64(key[2:], idx)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
func exportLegacy(path string, basekey []byte, out io.Writer) (int64, error) {
|
||||
tw := tar.NewWriter(out)
|
||||
defer tw.Close()
|
||||
db, err := leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 128})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
it := db.NewIterator(nil, nil)
|
||||
defer it.Release()
|
||||
var count int64
|
||||
for ok := it.Seek([]byte{legacyKeyIndex}); ok; ok = it.Next() {
|
||||
key := it.Key()
|
||||
if (key == nil) || (key[0] != legacyKeyIndex) {
|
||||
break
|
||||
}
|
||||
|
||||
var index dpaDBIndex
|
||||
|
||||
hash := key[1:]
|
||||
decodeIndex(it.Value(), &index)
|
||||
|
||||
po := uint8(chunk.Proximity(basekey, hash))
|
||||
|
||||
datakey := getDataKey(index.Idx, po)
|
||||
data, err := db.Get(datakey, nil)
|
||||
if err != nil {
|
||||
log.Crit(fmt.Sprintf("Chunk %x found but could not be accessed: %v, %x", key, err, datakey))
|
||||
continue
|
||||
}
|
||||
|
||||
hdr := &tar.Header{
|
||||
Name: hex.EncodeToString(hash),
|
||||
Mode: 0644,
|
||||
Size: int64(len(data)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return count, err
|
||||
}
|
||||
if _, err := tw.Write(data); err != nil {
|
||||
return count, err
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
@ -17,19 +17,34 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/swarm/testdata"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
)
|
||||
|
||||
const (
|
||||
DATABASE_FIXTURE_BZZ_ACCOUNT = "0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
|
||||
DATABASE_FIXTURE_PASSWORD = "pass"
|
||||
FIXTURE_DATADIR_PREFIX = "swarm/bzz-0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
|
||||
FixtureBaseKey = "a9f22b3d77b4bdf5f3eefce995d6c8e7cecf2636f20956f08a0d1ed95adb52ad"
|
||||
)
|
||||
|
||||
// TestCLISwarmExportImport perform the following test:
|
||||
// 1. runs swarm node
|
||||
// 2. uploads a random file
|
||||
@ -99,6 +114,112 @@ func TestCLISwarmExportImport(t *testing.T) {
|
||||
mustEqualFiles(t, bytes.NewReader(content), res.Body)
|
||||
}
|
||||
|
||||
// TestExportLegacyToNew checks that an old database gets imported correctly into the new localstore structure
|
||||
// The test sequence is as follows:
|
||||
// 1. unpack database fixture to tmp dir
|
||||
// 2. try to open with new swarm binary that should complain about old database
|
||||
// 3. export from old database
|
||||
// 4. remove the chunks folder
|
||||
// 5. import the dump
|
||||
// 6. file should be accessible
|
||||
func TestExportLegacyToNew(t *testing.T) {
|
||||
/*
|
||||
fixture bzz account 0aa159029fa13ffa8fa1c6fff6ebceface99d6a4
|
||||
*/
|
||||
const UPLOADED_FILE_MD5_HASH = "a001fdae53ba50cae584b8b02b06f821"
|
||||
const UPLOADED_HASH = "67a86082ee0ea1bc7dd8d955bb1e14d04f61d55ae6a4b37b3d0296a3a95e454a"
|
||||
tmpdir, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Trace("running legacy datastore migration test", "temp dir", tmpdir)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
inflateBase64Gzip(t, testdata.DATADIR_MIGRATION_FIXTURE, tmpdir)
|
||||
|
||||
tmpPassword := testutil.TempFileWithContent(t, DATABASE_FIXTURE_PASSWORD)
|
||||
defer os.Remove(tmpPassword)
|
||||
|
||||
flags := []string{
|
||||
"--datadir", tmpdir,
|
||||
"--bzzaccount", DATABASE_FIXTURE_BZZ_ACCOUNT,
|
||||
"--password", tmpPassword,
|
||||
}
|
||||
|
||||
newSwarmOldDb := runSwarm(t, flags...)
|
||||
_, matches := newSwarmOldDb.ExpectRegexp(".+")
|
||||
newSwarmOldDb.ExpectExit()
|
||||
|
||||
if len(matches) == 0 {
|
||||
t.Fatalf("stdout not matched")
|
||||
}
|
||||
|
||||
if newSwarmOldDb.ExitStatus() == 0 {
|
||||
t.Fatal("should error")
|
||||
}
|
||||
t.Log("exporting legacy database")
|
||||
actualDataDir := path.Join(tmpdir, FIXTURE_DATADIR_PREFIX)
|
||||
exportCmd := runSwarm(t, "--verbosity", "5", "db", "export", actualDataDir+"/chunks", tmpdir+"/export.tar", FixtureBaseKey)
|
||||
exportCmd.ExpectExit()
|
||||
|
||||
stat, err := os.Stat(tmpdir + "/export.tar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// make some silly size assumption
|
||||
if stat.Size() < 90000 {
|
||||
t.Fatal("export size too small")
|
||||
}
|
||||
t.Log("removing chunk datadir")
|
||||
err = os.RemoveAll(path.Join(actualDataDir, "chunks"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start second cluster
|
||||
cluster2 := newTestCluster(t, 1)
|
||||
var info2 swarm.Info
|
||||
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// stop second cluster, so that we close LevelDB
|
||||
cluster2.Stop()
|
||||
defer cluster2.Cleanup()
|
||||
|
||||
// import the export.tar
|
||||
importCmd := runSwarm(t, "db", "import", "--legacy", info2.Path+"/chunks", tmpdir+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
|
||||
importCmd.ExpectExit()
|
||||
|
||||
// spin second cluster back up
|
||||
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
|
||||
t.Log("trying to http get the file")
|
||||
// try to fetch imported file
|
||||
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + UPLOADED_HASH)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
|
||||
}
|
||||
h := md5.New()
|
||||
if _, err := io.Copy(h, res.Body); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sum := h.Sum(nil)
|
||||
|
||||
b, err := hex.DecodeString(UPLOADED_FILE_MD5_HASH)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(sum, b) {
|
||||
t.Fatal("should be equal")
|
||||
}
|
||||
}
|
||||
|
||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||
h := md5.New()
|
||||
upLen, err := io.Copy(h, up)
|
||||
@ -117,3 +238,46 @@ func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
||||
}
|
||||
}
|
||||
|
||||
func inflateBase64Gzip(t *testing.T, base64File, directory string) {
|
||||
t.Helper()
|
||||
|
||||
f := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64File))
|
||||
gzf, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(gzf)
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
name := header.Name
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
err := os.Mkdir(path.Join(directory, name), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case tar.TypeReg:
|
||||
file, err := os.Create(path.Join(directory, name))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := io.Copy(file, tarReader); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
default:
|
||||
t.Fatal("shouldn't happen")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -182,4 +182,8 @@ var (
|
||||
Usage: "URL of the Global Store API provider (only for testing)",
|
||||
EnvVar: SwarmGlobalstoreAPI,
|
||||
}
|
||||
SwarmLegacyFlag = cli.BoolFlag{
|
||||
Name: "legacy",
|
||||
Usage: "Use this flag when importing a db export from a legacy local store database dump (for schemas older than 'sanctuary')",
|
||||
}
|
||||
)
|
||||
|
1390
cmd/swarm/testdata/datastore_fixture.go
vendored
Normal file
1390
cmd/swarm/testdata/datastore_fixture.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user