Compare commits
207 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
37685930d9 | ||
|
51df1c1f20 | ||
|
f524ec4326 | ||
|
eb794af833 | ||
|
67a7857124 | ||
|
c73b654fd1 | ||
|
9da128db70 | ||
|
4e5d1f1c39 | ||
|
d57e85ecc9 | ||
|
1990c9e621 | ||
|
319098cc1c | ||
|
223d943481 | ||
|
8974e2e5e0 | ||
|
a4a2343cdc | ||
|
fdfd6d3c39 | ||
|
b5537c5601 | ||
|
e916f9786d | ||
|
909e968ebb | ||
|
598f786aab | ||
|
461291882e | ||
|
1f0f6f0272 | ||
|
0a22ae5572 | ||
|
b0cfd9c786 | ||
|
6d8a1bfb08 | ||
|
4895665670 | ||
|
eaff89291c | ||
|
e187711c65 | ||
|
d926bf2c7e | ||
|
8db8d074e2 | ||
|
1a70338734 | ||
|
61a5976368 | ||
|
88c42ab4e7 | ||
|
4210dd1500 | ||
|
c971ab617d | ||
|
f1986f86f2 | ||
|
28aca90716 | ||
|
9b1536b26a | ||
|
3e57c33147 | ||
|
baa7eb901e | ||
|
574378edb5 | ||
|
c95e4a80d1 | ||
|
897ea01d5f | ||
|
ec192f18b4 | ||
|
aa34173f13 | ||
|
c343f75c26 | ||
|
52b1d09457 | ||
|
9402f96597 | ||
|
d0fd8d6fc2 | ||
|
cfde0b5f52 | ||
|
de06185fc3 | ||
|
3fb5f3ae11 | ||
|
e75d0a6e4c | ||
|
947e0afeb3 | ||
|
1836366ac1 | ||
|
591cef17d4 | ||
|
e33a5de454 | ||
|
f04c0e341e | ||
|
ea89f40f0d | ||
|
1fc54d92ec | ||
|
8c4a7fa8d3 | ||
|
423d4254f5 | ||
|
dea1ce052a | ||
|
25982375a8 | ||
|
049f5b3572 | ||
|
0255951587 | ||
|
85cd64df0e | ||
|
9608ccf106 | ||
|
3f06da7b5f | ||
|
546d42179e | ||
|
90829a04bf | ||
|
f991995918 | ||
|
aab7ab04b0 | ||
|
43b940ec5a | ||
|
b487bdf0ba | ||
|
a3267ed929 | ||
|
9f7592c802 | ||
|
99483e85b9 | ||
|
1d666cf27e | ||
|
eac16f9824 | ||
|
69c52bde3f | ||
|
2977538ac0 | ||
|
7f0726f706 | ||
|
13af276418 | ||
|
ea06da0892 | ||
|
feb6620c34 | ||
|
90b22773e9 | ||
|
9e4f96a1a6 | ||
|
01a7e267dc | ||
|
e8ea5aa0d5 | ||
|
5bee5d69d7 | ||
|
cbfb40b0aa | ||
|
4cf2b4110e | ||
|
0029a869f0 | ||
|
2ab24a2a8f | ||
|
400332b99d | ||
|
a5237a27ea | ||
|
7a22e89080 | ||
|
e3a993d774 | ||
|
ed40767355 | ||
|
a20cc75b4a | ||
|
b659718fd0 | ||
|
be2aec092d | ||
|
17f80cc2e2 | ||
|
143c4341d8 | ||
|
3f33a7c8ce | ||
|
c8dcb9584e | ||
|
af28d12847 | ||
|
0ad32d3be7 | ||
|
68b0d30d4a | ||
|
eae63c511c | ||
|
ca34e8230e | ||
|
342ec83d67 | ||
|
38c7eb0f26 | ||
|
d51faee240 | ||
|
426f62f1a8 | ||
|
7677ec1f34 | ||
|
d258eee211 | ||
|
84f8c0cc1f | ||
|
998f6564b2 | ||
|
40a2c52397 | ||
|
a9c6ef6905 | ||
|
ccc0debb63 | ||
|
ff9b14617e | ||
|
d6ed2f67a8 | ||
|
54294b45b1 | ||
|
d31802312a | ||
|
55b579e02c | ||
|
be22ee8dda | ||
|
56de337e57 | ||
|
c934c06cc1 | ||
|
fbf57d53e2 | ||
|
6ce21a4744 | ||
|
9af364e42b | ||
|
09d44247f7 | ||
|
0fe47e98c4 | ||
|
415969f534 | ||
|
d9cee2c172 | ||
|
ab6bdbd9b0 | ||
|
953b5ac015 | ||
|
f2fdb75dd9 | ||
|
f9c456e02d | ||
|
579bd0f9fb | ||
|
49719e21bc | ||
|
a2e43d28d0 | ||
|
6286c255f1 | ||
|
f6bc65fc68 | ||
|
ff8a033f18 | ||
|
247b5f0369 | ||
|
49ec4f0cd1 | ||
|
2688dab48c | ||
|
595b47e535 | ||
|
784aa83942 | ||
|
fcc18f4c80 | ||
|
53a18d2e27 | ||
|
7beccb29be | ||
|
5dbd8b42a9 | ||
|
4e7dc34ff1 | ||
|
4747aad160 | ||
|
4ea493e7eb | ||
|
c60f6f6214 | ||
|
ba975dc093 | ||
|
eab6e5a317 | ||
|
c4a4613d95 | ||
|
fedae95015 | ||
|
864e80a48f | ||
|
a42be3b78d | ||
|
6cf0ab38bd | ||
|
5463ed9996 | ||
|
d7be5c6619 | ||
|
d2fe83dc5c | ||
|
16f3c31773 | ||
|
5b3af4c3d1 | ||
|
60b433ab84 | ||
|
fd3da7c69d | ||
|
cd9a1d5b37 | ||
|
2ad511ce09 | ||
|
541f299fbb | ||
|
7c02933275 | ||
|
f2447bd4c3 | ||
|
ea1724de1a | ||
|
577d375a0d | ||
|
66432f3821 | ||
|
5d4d79ae26 | ||
|
6a01363d1d | ||
|
58c4e033f4 | ||
|
5449139ca2 | ||
|
579ac6287b | ||
|
a7720b5926 | ||
|
670bae4cd3 | ||
|
4a8d5d2b1e | ||
|
d76c5ca532 | ||
|
c1ea527573 | ||
|
8dfa4f46a9 | ||
|
0afd767537 | ||
|
448d17b8f7 | ||
|
9922943b42 | ||
|
a1949d0788 | ||
|
9f6af6f812 | ||
|
ea171d5bd9 | ||
|
1da33028ce | ||
|
7a7428a027 | ||
|
cfe8f5fd94 | ||
|
852aa143ac | ||
|
b724d1aada | ||
|
86be91b3e2 | ||
|
e7067be94f | ||
|
9586f2acc7 |
38
.github/CODEOWNERS
vendored
38
.github/CODEOWNERS
vendored
@@ -1,12 +1,32 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
whisper/ @gballet @gluk256
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
swarm/bmt @zelig
|
||||
swarm/dev @lmars
|
||||
swarm/fuse @jmozah @holisticode
|
||||
swarm/grafana_dashboards @nonsense
|
||||
swarm/metrics @nonsense @holisticode
|
||||
swarm/multihash @nolash
|
||||
swarm/network/bitvector @zelig @janos @gbalint
|
||||
swarm/network/priorityqueue @zelig @janos @gbalint
|
||||
swarm/network/simulations @zelig
|
||||
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
|
||||
swarm/network/stream/intervals @janos
|
||||
swarm/network/stream/testing @zelig
|
||||
swarm/pot @zelig
|
||||
swarm/pss @nolash @zelig @nonsense
|
||||
swarm/services @zelig
|
||||
swarm/state @justelad
|
||||
swarm/storage/encryption @gbalint @zelig @nagydani
|
||||
swarm/storage/mock @janos
|
||||
swarm/storage/mru @nolash
|
||||
swarm/testutil @lmars
|
||||
whisper/ @gballet @gluk256
|
||||
|
13
.travis.yml
13
.travis.yml
@@ -31,7 +31,6 @@ matrix:
|
||||
script:
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- brew update
|
||||
- brew install caskroom/cask/brew-cask
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
@@ -127,7 +126,7 @@ matrix:
|
||||
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- os: linux
|
||||
dist: precise # Needed for the android tools
|
||||
dist: trusty
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
@@ -147,16 +146,16 @@ matrix:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.10.1.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.10.3.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r15c-linux-x86_64.zip -o android-ndk-r15c.zip
|
||||
- unzip -q android-ndk-r15c.zip && rm android-ndk-r15c.zip
|
||||
- mv android-ndk-r15c $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r15c
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r17b-linux-x86_64.zip -o android-ndk-r17b.zip
|
||||
- unzip -q android-ndk-r17b.zip && rm android-ndk-r17b.zip
|
||||
- mv android-ndk-r17b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r17b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
|
@@ -111,9 +111,14 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the output interface is a struct, make sure names don't collide
|
||||
|
||||
// If the interface is a struct, get of abi->struct_field mapping
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
if err := requireUniqueStructFieldNames(arguments); err != nil {
|
||||
var err error
|
||||
abi2struct, err = mapAbiToStructFields(arguments, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -123,9 +128,10 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
err := unpackStruct(value, reflectValue, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
@@ -151,17 +157,22 @@ func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interf
|
||||
if len(marshalledValues) != 1 {
|
||||
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
||||
}
|
||||
|
||||
elem := reflect.ValueOf(v).Elem()
|
||||
kind := elem.Kind()
|
||||
reflectValue := reflect.ValueOf(marshalledValues[0])
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
//make sure names don't collide
|
||||
if err := requireUniqueStructFieldNames(arguments); err != nil {
|
||||
var err error
|
||||
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unpackStruct(elem, reflectValue, arguments[0])
|
||||
arg := arguments.NonIndexed()[0]
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
return set(elem.FieldByName(structField), reflectValue, arg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
||||
@@ -277,18 +288,3 @@ func capitalise(input string) string {
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
}
|
||||
|
||||
//unpackStruct extracts each argument into its corresponding struct field
|
||||
func unpackStruct(value, reflectValue reflect.Value, arg Argument) error {
|
||||
name := capitalise(arg.Name)
|
||||
typ := value.Type()
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if typ.Field(j).Name == name {
|
||||
if err := set(value.Field(j), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
@@ -65,7 +66,7 @@ type SimulatedBackend struct {
|
||||
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
database, _ := ethdb.NewMemDatabase()
|
||||
database := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
@@ -159,7 +160,7 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction.
|
||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
receipt, _, _, _ := core.GetReceipt(b.database, txHash)
|
||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash)
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
@@ -430,11 +431,19 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
|
||||
}
|
||||
|
||||
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
|
||||
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
||||
if number == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return rawdb.ReadReceipts(fb.db, hash, *number), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
||||
receipts := core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash))
|
||||
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
||||
if number == nil {
|
||||
return nil, nil
|
||||
}
|
||||
receipts := rawdb.ReadReceipts(fb.db, hash, *number)
|
||||
if receipts == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -445,7 +454,7 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
||||
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
<-quit
|
||||
return nil
|
||||
|
@@ -33,15 +33,15 @@ type Event struct {
|
||||
Inputs Arguments
|
||||
}
|
||||
|
||||
func (event Event) String() string {
|
||||
inputs := make([]string, len(event.Inputs))
|
||||
for i, input := range event.Inputs {
|
||||
func (e Event) String() string {
|
||||
inputs := make([]string, len(e.Inputs))
|
||||
for i, input := range e.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
if input.Indexed {
|
||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("event %v(%v)", event.Name, strings.Join(inputs, ", "))
|
||||
return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
|
||||
}
|
||||
|
||||
// Id returns the canonical representation of the event's signature used by the
|
||||
|
@@ -58,12 +58,28 @@ var jsonEventPledge = []byte(`{
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
var jsonEventMixedCase = []byte(`{
|
||||
"anonymous": false,
|
||||
"inputs": [{
|
||||
"indexed": false, "name": "value", "type": "uint256"
|
||||
}, {
|
||||
"indexed": false, "name": "_value", "type": "uint256"
|
||||
}, {
|
||||
"indexed": false, "name": "Value", "type": "uint256"
|
||||
}],
|
||||
"name": "MixedCase",
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
// 1000000
|
||||
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
|
||||
|
||||
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
|
||||
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
// 1000000,2218516807680,1000001
|
||||
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
|
||||
|
||||
func TestEventId(t *testing.T) {
|
||||
var table = []struct {
|
||||
definition string
|
||||
@@ -121,6 +137,27 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
Value *big.Int
|
||||
}
|
||||
|
||||
type EventTransferWithTag struct {
|
||||
// this is valid because `value` is not exportable,
|
||||
// so value is only unmarshalled into `Value1`.
|
||||
value *big.Int
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithSameFieldAndTag struct {
|
||||
Value *big.Int
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithDuplicatedTag struct {
|
||||
Value1 *big.Int `abi:"value"`
|
||||
Value2 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithEmptyTag struct {
|
||||
Value *big.Int `abi:""`
|
||||
}
|
||||
|
||||
type EventPledge struct {
|
||||
Who common.Address
|
||||
Wad *big.Int
|
||||
@@ -133,9 +170,16 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
Currency [3]byte
|
||||
}
|
||||
|
||||
type EventMixedCase struct {
|
||||
Value1 *big.Int `abi:"value"`
|
||||
Value2 *big.Int `abi:"_value"`
|
||||
Value3 *big.Int `abi:"Value"`
|
||||
}
|
||||
|
||||
bigint := new(big.Int)
|
||||
bigintExpected := big.NewInt(1000000)
|
||||
bigintExpected2 := big.NewInt(2218516807680)
|
||||
bigintExpected3 := big.NewInt(1000001)
|
||||
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
|
||||
var testCases = []struct {
|
||||
data string
|
||||
@@ -158,6 +202,34 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into slice",
|
||||
}, {
|
||||
transferData1,
|
||||
&EventTransferWithTag{},
|
||||
&EventTransferWithTag{Value1: bigintExpected},
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into structure with abi: tag",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithDuplicatedTag{},
|
||||
&BadEventTransferWithDuplicatedTag{},
|
||||
jsonEventTransfer,
|
||||
"struct: abi tag in 'Value2' already mapped",
|
||||
"Can not unpack ERC20 Transfer event with duplicated abi tag",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithSameFieldAndTag{},
|
||||
&BadEventTransferWithSameFieldAndTag{},
|
||||
jsonEventTransfer,
|
||||
"abi: multiple variables maps to the same abi field 'value'",
|
||||
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithEmptyTag{},
|
||||
&BadEventTransferWithEmptyTag{},
|
||||
jsonEventTransfer,
|
||||
"struct: abi tag in 'Value' is empty",
|
||||
"Can not unpack ERC20 Transfer event with an empty tag",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&EventPledge{},
|
||||
@@ -216,6 +288,13 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal tuple into map[string]interface {}",
|
||||
"Can not unpack Pledge event into map",
|
||||
}, {
|
||||
mixedCaseData1,
|
||||
&EventMixedCase{},
|
||||
&EventMixedCase{Value1: bigintExpected, Value2: bigintExpected2, Value3: bigintExpected3},
|
||||
jsonEventMixedCase,
|
||||
"",
|
||||
"Can unpack abi variables with mixed case",
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -227,7 +306,7 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
assert.Nil(err, "Should be able to unpack event data.")
|
||||
assert.Equal(tc.expected, tc.dest, tc.name)
|
||||
} else {
|
||||
assert.EqualError(err, tc.error)
|
||||
assert.EqualError(err, tc.error, tc.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@@ -31,29 +31,14 @@ var (
|
||||
uint16T = reflect.TypeOf(uint16(0))
|
||||
uint32T = reflect.TypeOf(uint32(0))
|
||||
uint64T = reflect.TypeOf(uint64(0))
|
||||
intT = reflect.TypeOf(int(0))
|
||||
int8T = reflect.TypeOf(int8(0))
|
||||
int16T = reflect.TypeOf(int16(0))
|
||||
int32T = reflect.TypeOf(int32(0))
|
||||
int64T = reflect.TypeOf(int64(0))
|
||||
addressT = reflect.TypeOf(common.Address{})
|
||||
intTS = reflect.TypeOf([]int(nil))
|
||||
int8TS = reflect.TypeOf([]int8(nil))
|
||||
int16TS = reflect.TypeOf([]int16(nil))
|
||||
int32TS = reflect.TypeOf([]int32(nil))
|
||||
int64TS = reflect.TypeOf([]int64(nil))
|
||||
)
|
||||
|
||||
// U256 converts a big Int into a 256bit EVM number.
|
||||
func U256(n *big.Int) []byte {
|
||||
return math.PaddedBigBytes(math.U256(n), 32)
|
||||
}
|
||||
|
||||
// checks whether the given reflect value is signed. This also works for slices with a number type
|
||||
func isSigned(v reflect.Value) bool {
|
||||
switch v.Type() {
|
||||
case intTS, int8TS, int16TS, int32TS, int64TS, intT, int8T, int16T, int32T, int64T:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@@ -19,7 +19,6 @@ package abi
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -32,13 +31,3 @@ func TestNumberTypes(t *testing.T) {
|
||||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigned(t *testing.T) {
|
||||
if isSigned(reflect.ValueOf(uint(10))) {
|
||||
t.Error("signed")
|
||||
}
|
||||
|
||||
if !isSigned(reflect.ValueOf(int(10))) {
|
||||
t.Error("not signed")
|
||||
}
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ package abi
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
@@ -111,18 +112,101 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireUniqueStructFieldNames makes sure field names don't collide
|
||||
func requireUniqueStructFieldNames(args Arguments) error {
|
||||
exists := make(map[string]bool)
|
||||
for _, arg := range args {
|
||||
field := capitalise(arg.Name)
|
||||
if field == "" {
|
||||
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
// mapAbiToStringField maps abi to struct fields.
|
||||
// first round: for each Exportable field that contains a `abi:""` tag
|
||||
// and this field name exists in the arguments, pair them together.
|
||||
// second round: for each argument field that has not been already linked,
|
||||
// find what variable is expected to be mapped into, if it exists and has not been
|
||||
// used, pair them.
|
||||
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
|
||||
|
||||
typ := value.Type()
|
||||
|
||||
abi2struct := make(map[string]string)
|
||||
struct2abi := make(map[string]string)
|
||||
|
||||
// first round ~~~
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
structFieldName := typ.Field(i).Name
|
||||
|
||||
// skip private struct fields.
|
||||
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
|
||||
continue
|
||||
}
|
||||
if exists[field] {
|
||||
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
|
||||
|
||||
// skip fields that have no abi:"" tag.
|
||||
var ok bool
|
||||
var tagName string
|
||||
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
|
||||
continue
|
||||
}
|
||||
exists[field] = true
|
||||
|
||||
// check if tag is empty.
|
||||
if tagName == "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
|
||||
}
|
||||
|
||||
// check which argument field matches with the abi tag.
|
||||
found := false
|
||||
for _, abiField := range args.NonIndexed() {
|
||||
if abiField.Name == tagName {
|
||||
if abi2struct[abiField.Name] != "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
|
||||
}
|
||||
// pair them
|
||||
abi2struct[abiField.Name] = structFieldName
|
||||
struct2abi[structFieldName] = abiField.Name
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
// check if this tag has been mapped.
|
||||
if !found {
|
||||
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
|
||||
// second round ~~~
|
||||
for _, arg := range args {
|
||||
|
||||
abiFieldName := arg.Name
|
||||
structFieldName := capitalise(abiFieldName)
|
||||
|
||||
if structFieldName == "" {
|
||||
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
}
|
||||
|
||||
// this abi has already been paired, skip it... unless there exists another, yet unassigned
|
||||
// struct field with the same field name. If so, raise an error:
|
||||
// abi: [ { "name": "value" } ]
|
||||
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
|
||||
if abi2struct[abiFieldName] != "" {
|
||||
if abi2struct[abiFieldName] != structFieldName &&
|
||||
struct2abi[structFieldName] == "" &&
|
||||
value.FieldByName(structFieldName).IsValid() {
|
||||
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// return an error if this struct field has already been paired.
|
||||
if struct2abi[structFieldName] != "" {
|
||||
return nil, fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", structFieldName)
|
||||
}
|
||||
|
||||
if value.FieldByName(structFieldName).IsValid() {
|
||||
// pair them
|
||||
abi2struct[abiFieldName] = structFieldName
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
} else {
|
||||
// not paired, but annotate as used, to detect cases like
|
||||
// abi : [ { "name": "value" }, { "name": "_value" } ]
|
||||
// struct { Value *big.Int }
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return abi2struct, nil
|
||||
}
|
||||
|
@@ -50,7 +50,7 @@ var (
|
||||
var KeyStoreType = reflect.TypeOf(&KeyStore{})
|
||||
|
||||
// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
var KeyStoreScheme = "keystore"
|
||||
const KeyStoreScheme = "keystore"
|
||||
|
||||
// Maximum time between wallet refreshes (if filesystem notifications don't work).
|
||||
const walletRefreshCycle = 3 * time.Second
|
||||
|
@@ -108,9 +108,8 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
|
||||
func (ks keyStorePassphrase) JoinPath(filename string) string {
|
||||
if filepath.IsAbs(filename) {
|
||||
return filename
|
||||
} else {
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
|
||||
// EncryptKey encrypts a key using the specified scrypt parameters into a json
|
||||
|
@@ -56,7 +56,6 @@ func (ks keyStorePlain) StoreKey(filename string, key *Key, auth string) error {
|
||||
func (ks keyStorePlain) JoinPath(filename string) string {
|
||||
if filepath.IsAbs(filename) {
|
||||
return filename
|
||||
} else {
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
|
@@ -36,7 +36,7 @@ func Type(msg proto.Message) uint16 {
|
||||
}
|
||||
|
||||
// Name returns the friendly message type name of a specific protocol buffer
|
||||
// type numbers.
|
||||
// type number.
|
||||
func Name(kind uint16) string {
|
||||
name := MessageType_name[int32(kind)]
|
||||
if len(name) < 12 {
|
||||
|
@@ -302,7 +302,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requested
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.1.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
560
bmt/bmt.go
560
bmt/bmt.go
@@ -1,560 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
|
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution
|
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for rightmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
depth: depth(p.SegmentCount),
|
||||
size: p.SegmentSize,
|
||||
blocksize: p.SegmentSize,
|
||||
count: p.SegmentCount,
|
||||
result: make(chan []byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct {
|
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte
|
||||
}
|
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node {
|
||||
return &Node{
|
||||
parent: parent,
|
||||
level: level,
|
||||
index: index,
|
||||
initial: index == 0,
|
||||
isLeft: index%2 == 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *Tree
|
||||
hasher BaseHasher
|
||||
SegmentSize int
|
||||
SegmentCount int
|
||||
Capacity int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
|
||||
return &TreePool{
|
||||
c: make(chan *Tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: hasher().Size(),
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool until it has no more than n resources
|
||||
func (self *TreePool) Drain(n int) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
for len(self.c) > n {
|
||||
<-self.c
|
||||
self.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (self *TreePool) Reserve() *Tree {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
var t *Tree
|
||||
if self.count == self.Capacity {
|
||||
return <-self.c
|
||||
}
|
||||
select {
|
||||
case t = <-self.c:
|
||||
default:
|
||||
t = NewTree(self.hasher, self.SegmentSize, self.SegmentCount)
|
||||
self.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (self *TreePool) Release(t *Tree) {
|
||||
self.c <- t // can never fail but...
|
||||
}
|
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct {
|
||||
leaves []*Node
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (self *Tree) Draw(hash []byte, d int) string {
|
||||
var left, right []string
|
||||
var anc []*Node
|
||||
for i, n := range self.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = self.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*Node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
|
||||
n := NewNode(0, 0, nil)
|
||||
n.root = true
|
||||
prevlevel := []*Node{n}
|
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1
|
||||
count := 2
|
||||
for d := 1; d <= depth(segmentCount); d++ {
|
||||
nodes := make([]*Node, count)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
parent := prevlevel[i/2]
|
||||
t := NewNode(level, i, parent)
|
||||
nodes[i] = t
|
||||
}
|
||||
prevlevel = nodes
|
||||
level++
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{
|
||||
leaves: prevlevel,
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (self *Hasher) Size() int {
|
||||
return self.size
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (self *Hasher) BlockSize() int {
|
||||
return self.blocksize
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (self *Hasher) Sum(b []byte) (r []byte) {
|
||||
t := self.bmt
|
||||
i := self.cur
|
||||
n := t.leaves[i]
|
||||
j := i
|
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(self.segment) > self.size && i > 0 && n.parent != nil {
|
||||
n = n.parent
|
||||
} else {
|
||||
i *= 2
|
||||
}
|
||||
d := self.finalise(n, i)
|
||||
self.writeSegment(j, self.segment, d)
|
||||
c := <-self.result
|
||||
self.releaseTree()
|
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if self.blockLength == nil {
|
||||
return c
|
||||
}
|
||||
res := self.pool.hasher()
|
||||
res.Reset()
|
||||
res.Write(self.blockLength)
|
||||
res.Write(c)
|
||||
return res.Sum(nil)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (self *Hasher) Hash() []byte {
|
||||
return <-self.result
|
||||
}
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (self *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
s := self.segment
|
||||
i := self.cur
|
||||
count := (self.count + 1) / 2
|
||||
need := self.count*self.size - self.cur*2*self.size
|
||||
size := self.size
|
||||
if need > size {
|
||||
size *= 2
|
||||
}
|
||||
if l < need {
|
||||
need = l
|
||||
}
|
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s)
|
||||
if need < rest {
|
||||
rest = need
|
||||
}
|
||||
s = append(s, b[:rest]...)
|
||||
need -= rest
|
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 {
|
||||
// push all finished chunks we read
|
||||
self.writeSegment(i, s, self.depth)
|
||||
need -= size
|
||||
if need < 0 {
|
||||
size += need
|
||||
}
|
||||
s = b[rest : rest+size]
|
||||
rest += size
|
||||
i++
|
||||
}
|
||||
self.segment = s
|
||||
self.cur = i
|
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
|
||||
bufsize := self.size*self.count - self.size*self.cur - len(self.segment)
|
||||
buf := make([]byte, bufsize)
|
||||
var read int
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Read(buf)
|
||||
read += n
|
||||
if err == io.EOF || read == len(buf) {
|
||||
hash := self.Sum(buf[:n])
|
||||
if read == len(buf) {
|
||||
err = NewEOC(hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
n, err = self.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return int64(read), err
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (self *Hasher) Reset() {
|
||||
self.getTree()
|
||||
self.blockLength = nil
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the length of the data subsumed under the hash
|
||||
func (self *Hasher) ResetWithLength(l []byte) {
|
||||
self.Reset()
|
||||
self.blockLength = l
|
||||
}
|
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (self *Hasher) releaseTree() {
|
||||
if self.bmt != nil {
|
||||
n := self.bmt.leaves[self.cur]
|
||||
for ; n != nil; n = n.parent {
|
||||
n.unbalanced = false
|
||||
if n.parent != nil {
|
||||
n.root = false
|
||||
}
|
||||
}
|
||||
self.pool.Release(self.bmt)
|
||||
self.bmt = nil
|
||||
|
||||
}
|
||||
self.cur = 0
|
||||
self.segment = nil
|
||||
}
|
||||
|
||||
func (self *Hasher) writeSegment(i int, s []byte, d int) {
|
||||
h := self.pool.hasher()
|
||||
n := self.bmt.leaves[i]
|
||||
|
||||
if len(s) > self.size && n.parent != nil {
|
||||
go func() {
|
||||
h.Reset()
|
||||
h.Write(s)
|
||||
s = h.Sum(nil)
|
||||
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
self.run(n.parent, h, d, n.index, s)
|
||||
}()
|
||||
return
|
||||
}
|
||||
go self.run(n, h, d, i*2, s)
|
||||
}
|
||||
|
||||
func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
if !n.unbalanced && n.toggle() {
|
||||
return
|
||||
}
|
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
|
||||
h.Reset()
|
||||
h.Write(n.left)
|
||||
h.Write(n.right)
|
||||
s = h.Sum(nil)
|
||||
|
||||
} else {
|
||||
s = append(n.left, n.right...)
|
||||
}
|
||||
|
||||
self.hash = s
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (self *Hasher) getTree() *Tree {
|
||||
if self.bmt != nil {
|
||||
return self.bmt
|
||||
}
|
||||
t := self.pool.Reserve()
|
||||
self.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (self *Node) toggle() bool {
|
||||
return atomic.AddInt32(&self.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
func depth(n int) (d int) {
|
||||
for l := (n - 1) / 2; l > 0; l /= 2 {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (self *Hasher) finalise(n *Node, i int) (d int) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toggle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft
|
||||
n.right = nil
|
||||
if n.initial {
|
||||
n.root = true
|
||||
return d
|
||||
}
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
d++
|
||||
}
|
||||
}
|
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct {
|
||||
Hash []byte // read the hash of the chunk off the error
|
||||
}
|
||||
|
||||
// Error returns the error string
|
||||
func (self *EOC) Error() string {
|
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", self.Hash)
|
||||
}
|
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC {
|
||||
return &EOC{hash}
|
||||
}
|
85
bmt/bmt_r.go
85
bmt/bmt_r.go
@@ -1,85 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
span int
|
||||
section int
|
||||
cap int
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
maxsize := hashsize * count
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
if c > 2 {
|
||||
c /= 2
|
||||
}
|
||||
return &RefHasher{
|
||||
section: 2 * hashsize,
|
||||
span: c * hashsize,
|
||||
cap: maxsize,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte {
|
||||
if len(d) > rh.cap {
|
||||
d = d[:rh.cap]
|
||||
}
|
||||
|
||||
return rh.hash(d, rh.span)
|
||||
}
|
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte {
|
||||
l := len(d)
|
||||
left := d
|
||||
var right []byte
|
||||
if l > rh.section {
|
||||
for ; s >= l; s /= 2 {
|
||||
}
|
||||
left = rh.hash(d[:s], s)
|
||||
right = d[s:]
|
||||
if l-s > rh.section/2 {
|
||||
right = rh.hash(right, s)
|
||||
}
|
||||
}
|
||||
defer rh.h.Reset()
|
||||
rh.h.Write(left)
|
||||
rh.h.Write(right)
|
||||
h := rh.h.Sum(nil)
|
||||
return h
|
||||
}
|
481
bmt/bmt_test.go
481
bmt/bmt_test.go
@@ -1,481 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxproccnt = 8
|
||||
)
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) {
|
||||
hashFunc := sha3.NewKeccak256
|
||||
|
||||
sha3 := func(data ...[]byte) []byte {
|
||||
h := hashFunc()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct {
|
||||
from int64
|
||||
to int64
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 0,
|
||||
to: 64,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 65,
|
||||
to: 96,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), data[64:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 97,
|
||||
to: 128,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), sha3(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 129,
|
||||
to: 160,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 161,
|
||||
to: 192,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 193,
|
||||
to: 224,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 225,
|
||||
to: 256,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for _, x := range tests {
|
||||
for length := x.from; length <= x.to; length++ {
|
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(hashFunc, 128).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDataReader(l int) (r io.Reader) {
|
||||
return io.LimitReader(crand.Reader, int64(l))
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
err := testHasher(testBaseHasher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
|
||||
tdata := testDataReader(4128)
|
||||
data := make([]byte, 4128)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
max := count * size
|
||||
incr := 1
|
||||
for n := 0; n <= max+incr; n += incr {
|
||||
err = f(hasher, data, n, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
}
|
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) {
|
||||
testHasherReuse(maxproccnt, t)
|
||||
}
|
||||
|
||||
func testHasherReuse(i int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, i)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherConcurrency(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, maxproccnt)
|
||||
defer pool.Drain(0)
|
||||
wg := sync.WaitGroup{}
|
||||
cycles := 100
|
||||
wg.Add(maxproccnt * cycles)
|
||||
errc := make(chan error)
|
||||
|
||||
for p := 0; p < maxproccnt; p++ {
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
err = fmt.Errorf("timed out")
|
||||
case err = <-errc:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
|
||||
pool := NewTreePool(hasher, count, 1)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
return testHasherCorrectness(bmt, hasher, d, n, count)
|
||||
}
|
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(data)
|
||||
timeout := time.NewTimer(time.Second)
|
||||
c := make(chan error)
|
||||
|
||||
go func() {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
got := bmt.Sum(nil)
|
||||
if !bytes.Equal(got, exp) {
|
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
err = fmt.Errorf("BMT hash calculation timed out")
|
||||
case err = <-c:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
|
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
|
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
|
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
|
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
|
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
|
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
|
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
|
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
|
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
|
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
|
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
|
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
|
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
|
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
|
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
tdata := testDataReader(64)
|
||||
data := make([]byte, 64)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hasher().Size() + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(maxproccnt)
|
||||
var i int32
|
||||
for j := 0; j < maxproccnt; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasher(n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
size := 1
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, size)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 200
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
bmt := New(pool)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
26
build/ci.go
26
build/ci.go
@@ -330,6 +330,7 @@ func doLint(cmdline []string) {
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--tests",
|
||||
"--deadline=2m",
|
||||
"--disable-all",
|
||||
"--enable=goimports",
|
||||
"--enable=varcheck",
|
||||
@@ -731,7 +732,7 @@ func doAndroidArchive(cmdline []string) {
|
||||
// Build the Android archive and Maven resources
|
||||
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
|
||||
build.MustRun(gomobileTool("init", "--ndk", os.Getenv("ANDROID_NDK")))
|
||||
build.MustRun(gomobileTool("bind", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
|
||||
build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
|
||||
|
||||
if *local {
|
||||
// If we're building locally, copy bundle to build dir and skip Maven
|
||||
@@ -755,14 +756,18 @@ func doAndroidArchive(cmdline []string) {
|
||||
os.Rename(archive, meta.Package+".aar")
|
||||
if *signer != "" && *deploy != "" {
|
||||
// Import the signing key into the local GPG instance
|
||||
if b64key := os.Getenv(*signer); b64key != "" {
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid base64 %s", *signer)
|
||||
}
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
b64key := os.Getenv(*signer)
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid base64 %s", *signer)
|
||||
}
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
|
||||
keyID, err := build.PGPKeyID(string(key))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Upload the artifacts to Sonatype and/or Maven Central
|
||||
repo := *deploy + "/service/local/staging/deploy/maven2"
|
||||
@@ -771,6 +776,7 @@ func doAndroidArchive(cmdline []string) {
|
||||
}
|
||||
build.MustRunCommand("mvn", "gpg:sign-and-deploy-file", "-e", "-X",
|
||||
"-settings=build/mvn.settings", "-Durl="+repo, "-DrepositoryId=ossrh",
|
||||
"-Dgpg.keyname="+keyID,
|
||||
"-DpomFile="+meta.Package+".pom", "-Dfile="+meta.Package+".aar")
|
||||
}
|
||||
}
|
||||
@@ -852,7 +858,7 @@ func doXCodeFramework(cmdline []string) {
|
||||
// Build the iOS XCode framework
|
||||
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
|
||||
build.MustRun(gomobileTool("init"))
|
||||
bind := gomobileTool("bind", "--target", "ios", "--tags", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
||||
bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "--tags", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
||||
|
||||
if *local {
|
||||
// If we're building locally, use the build folder and stop afterwards
|
||||
|
@@ -1,18 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/sh
|
||||
|
||||
find_files() {
|
||||
find . -not \( \
|
||||
find . ! \( \
|
||||
\( \
|
||||
-wholename '.github' \
|
||||
-o -wholename './build/_workspace' \
|
||||
-o -wholename './build/bin' \
|
||||
-o -wholename './crypto/bn256' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
-path '.github' \
|
||||
-o -path './build/_workspace' \
|
||||
-o -path './build/bin' \
|
||||
-o -path './crypto/bn256' \
|
||||
-o -path '*/vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
GOFMT="gofmt -s -w";
|
||||
GOIMPORTS="goimports -w";
|
||||
find_files | xargs $GOFMT;
|
||||
find_files | xargs $GOIMPORTS;
|
||||
GOFMT="gofmt -s -w"
|
||||
GOIMPORTS="goimports -w"
|
||||
find_files | xargs $GOFMT
|
||||
find_files | xargs $GOIMPORTS
|
||||
|
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind")
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind, - for STDIN")
|
||||
binFlag = flag.String("bin", "", "Path to the Ethereum contract bytecode (generate deploy method)")
|
||||
typFlag = flag.String("type", "", "Struct name for the binding (default = package name)")
|
||||
|
||||
@@ -75,16 +75,27 @@ func main() {
|
||||
bins []string
|
||||
types []string
|
||||
)
|
||||
if *solFlag != "" {
|
||||
if *solFlag != "" || *abiFlag == "-" {
|
||||
// Generate the list of types to exclude from binding
|
||||
exclude := make(map[string]bool)
|
||||
for _, kind := range strings.Split(*excFlag, ",") {
|
||||
exclude[strings.ToLower(kind)] = true
|
||||
}
|
||||
contracts, err := compiler.CompileSolidity(*solcFlag, *solFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to build Solidity contract: %v\n", err)
|
||||
os.Exit(-1)
|
||||
|
||||
var contracts map[string]*compiler.Contract
|
||||
var err error
|
||||
if *solFlag != "" {
|
||||
contracts, err = compiler.CompileSolidity(*solcFlag, *solFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to build Solidity contract: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
} else {
|
||||
contracts, err = contractsFromStdin()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read input ABIs from STDIN: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
// Gather all non-excluded contract for binding
|
||||
for name, contract := range contracts {
|
||||
@@ -138,3 +149,12 @@ func main() {
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
|
||||
func contractsFromStdin() (map[string]*compiler.Contract, error) {
|
||||
bytes, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return compiler.ParseCombinedJSON(bytes, "", "", "", "")
|
||||
}
|
||||
|
@@ -12,6 +12,11 @@ synchronised with the chain or a particular Ethereum node that has no built-in (
|
||||
Clef can run as a daemon on the same machine, or off a usb-stick like [usb armory](https://inversepath.com/usbarmory),
|
||||
or a separate VM in a [QubesOS](https://www.qubes-os.org/) type os setup.
|
||||
|
||||
Check out
|
||||
|
||||
* the [tutorial](tutorial.md) for some concrete examples on how the signer works.
|
||||
* the [setup docs](docs/setup.md) for some information on how to configure it to work on QubesOS or USBArmory.
|
||||
|
||||
|
||||
## Command line flags
|
||||
Clef accepts the following command line options:
|
||||
@@ -49,7 +54,6 @@ Example:
|
||||
signer -keystore /my/keystore -chainid 4
|
||||
```
|
||||
|
||||
Check out the [tutorial](tutorial.md) for some concrete examples on how the signer works.
|
||||
|
||||
## Security model
|
||||
|
||||
@@ -862,3 +866,12 @@ A UI should conform to the following rules.
|
||||
along with the UI.
|
||||
|
||||
|
||||
### UI Implementations
|
||||
|
||||
There are a couple of implementation for a UI. We'll try to keep this list up to date.
|
||||
|
||||
| Name | Repo | UI type| No external resources| Blocky support| Verifies permissions | Hash information | No secondary storage | Statically linked| Can modify parameters|
|
||||
| ---- | ---- | -------| ---- | ---- | ---- |---- | ---- | ---- | ---- |
|
||||
| QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
|
||||
| GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
|
||||
| Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
|
||||
|
BIN
cmd/clef/docs/qubes/clef_qubes_http.png
Normal file
BIN
cmd/clef/docs/qubes/clef_qubes_http.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
BIN
cmd/clef/docs/qubes/clef_qubes_qrexec.png
Normal file
BIN
cmd/clef/docs/qubes/clef_qubes_qrexec.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 20 KiB |
BIN
cmd/clef/docs/qubes/qrexec-example.png
Normal file
BIN
cmd/clef/docs/qubes/qrexec-example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 19 KiB |
23
cmd/clef/docs/qubes/qubes-client.py
Normal file
23
cmd/clef/docs/qubes/qubes-client.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
This implements a dispatcher which listens to localhost:8550, and proxies
|
||||
requests via qrexec to the service qubes.EthSign on a target domain
|
||||
"""
|
||||
|
||||
import http.server
|
||||
import socketserver,subprocess
|
||||
|
||||
PORT=8550
|
||||
TARGET_DOMAIN= 'debian-work'
|
||||
|
||||
class Dispatcher(http.server.BaseHTTPRequestHandler):
|
||||
def do_POST(self):
|
||||
post_data = self.rfile.read(int(self.headers['Content-Length']))
|
||||
p = subprocess.Popen(['/usr/bin/qrexec-client-vm',TARGET_DOMAIN,'qubes.Clefsign'],stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
output = p.communicate(post_data)[0]
|
||||
self.wfile.write(output)
|
||||
|
||||
|
||||
with socketserver.TCPServer(("",PORT), Dispatcher) as httpd:
|
||||
print("Serving at port", PORT)
|
||||
httpd.serve_forever()
|
||||
|
16
cmd/clef/docs/qubes/qubes.Clefsign
Normal file
16
cmd/clef/docs/qubes/qubes.Clefsign
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
SIGNER_BIN="/home/user/tools/clef/clef"
|
||||
SIGNER_CMD="/home/user/tools/gtksigner/gtkui.py -s $SIGNER_BIN"
|
||||
|
||||
# Start clef if not already started
|
||||
if [ ! -S /home/user/.clef/clef.ipc ]; then
|
||||
$SIGNER_CMD &
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Should be started by now
|
||||
if [ -S /home/user/.clef/clef.ipc ]; then
|
||||
# Post incoming request to HTTP channel
|
||||
curl -H "Content-Type: application/json" -X POST -d @- http://localhost:8550 2>/dev/null
|
||||
fi
|
BIN
cmd/clef/docs/qubes/qubes_newaccount-1.png
Normal file
BIN
cmd/clef/docs/qubes/qubes_newaccount-1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 25 KiB |
BIN
cmd/clef/docs/qubes/qubes_newaccount-2.png
Normal file
BIN
cmd/clef/docs/qubes/qubes_newaccount-2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 42 KiB |
198
cmd/clef/docs/setup.md
Normal file
198
cmd/clef/docs/setup.md
Normal file
@@ -0,0 +1,198 @@
|
||||
# Setting up Clef
|
||||
|
||||
This document describes how Clef can be used in a more secure manner than executing it from your everyday laptop,
|
||||
in order to ensure that the keys remain safe in the event that your computer should get compromised.
|
||||
|
||||
## Qubes OS
|
||||
|
||||
|
||||
### Background
|
||||
|
||||
The Qubes operating system is based around virtual machines (qubes), where a set of virtual machines are configured, typically for
|
||||
different purposes such as:
|
||||
|
||||
- personal
|
||||
- Your personal email, browsing etc
|
||||
- work
|
||||
- Work email etc
|
||||
- vault
|
||||
- a VM without network access, where gpg-keys and/or keepass credentials are stored.
|
||||
|
||||
A couple of dedicated virtual machines handle externalities:
|
||||
|
||||
- sys-net provides networking to all other (network-enabled) machines
|
||||
- sys-firewall handles firewall rules
|
||||
- sys-usb handles USB devices, and can map usb-devices to certain qubes.
|
||||
|
||||
The goal of this document is to describe how we can set up clef to provide secure transaction
|
||||
signing from a `vault` vm, to another networked qube which runs Dapps.
|
||||
|
||||
### Setup
|
||||
|
||||
There are two ways that this can be achieved: integrated via Qubes or integrated via networking.
|
||||
|
||||
|
||||
#### 1. Qubes Integrated
|
||||
|
||||
Qubes provdes a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request
|
||||
to another qube. The OS then asks the user if the call is permitted.
|
||||
|
||||

|
||||
|
||||
A policy-file can be created to allow such interaction. On the `target` domain, a service is invoked which can read the
|
||||
`stdin` from the `client` qube.
|
||||
|
||||
This is how [Split GPG](https://www.qubes-os.org/doc/split-gpg/) is implemented. We can set up Clef the same way:
|
||||
|
||||
##### Server
|
||||
|
||||

|
||||
|
||||
On the `target` qubes, we need to define the rpc service.
|
||||
|
||||
[qubes.Clefsign](qubes/qubes.Clefsign):
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
SIGNER_BIN="/home/user/tools/clef/clef"
|
||||
SIGNER_CMD="/home/user/tools/gtksigner/gtkui.py -s $SIGNER_BIN"
|
||||
|
||||
# Start clef if not already started
|
||||
if [ ! -S /home/user/.clef/clef.ipc ]; then
|
||||
$SIGNER_CMD &
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Should be started by now
|
||||
if [ -S /home/user/.clef/clef.ipc ]; then
|
||||
# Post incoming request to HTTP channel
|
||||
curl -H "Content-Type: application/json" -X POST -d @- http://localhost:8550 2>/dev/null
|
||||
fi
|
||||
|
||||
```
|
||||
This RPC service is not complete (see notes about HTTP headers below), but works as a proof-of-concept.
|
||||
It will forward the data received on `stdin` (forwarded by the OS) to Clef's HTTP channel.
|
||||
|
||||
It would have been possible to send data directly to the `/home/user/.clef/.clef.ipc`
|
||||
socket via e.g `nc -U /home/user/.clef/clef.ipc`, but the reason for sending the request
|
||||
data over `HTTP` instead of `IPC` is that we want the ability to forward `HTTP` headers.
|
||||
|
||||
To enable the service:
|
||||
|
||||
``` bash
|
||||
sudo cp qubes.Clefsign /etc/qubes-rpc/
|
||||
sudo chmod +x /etc/qubes-rpc/ qubes.Clefsign
|
||||
```
|
||||
|
||||
This setup uses [gtksigner](https://github.com/holiman/gtksigner), which is a very minimal GTK-based UI that works well
|
||||
with minimal requirements.
|
||||
|
||||
##### Client
|
||||
|
||||
|
||||
On the `client` qube, we need to create a listener which will receive the request from the Dapp, and proxy it.
|
||||
|
||||
|
||||
[qubes-client.py](qubes/client/qubes-client.py):
|
||||
|
||||
```python
|
||||
|
||||
"""
|
||||
This implements a dispatcher which listens to localhost:8550, and proxies
|
||||
requests via qrexec to the service qubes.EthSign on a target domain
|
||||
"""
|
||||
|
||||
import http.server
|
||||
import socketserver,subprocess
|
||||
|
||||
PORT=8550
|
||||
TARGET_DOMAIN= 'debian-work'
|
||||
|
||||
class Dispatcher(http.server.BaseHTTPRequestHandler):
|
||||
def do_POST(self):
|
||||
post_data = self.rfile.read(int(self.headers['Content-Length']))
|
||||
p = subprocess.Popen(['/usr/bin/qrexec-client-vm',TARGET_DOMAIN,'qubes.Clefsign'],stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
output = p.communicate(post_data)[0]
|
||||
self.wfile.write(output)
|
||||
|
||||
|
||||
with socketserver.TCPServer(("",PORT), Dispatcher) as httpd:
|
||||
print("Serving at port", PORT)
|
||||
httpd.serve_forever()
|
||||
|
||||
|
||||
```
|
||||
|
||||
#### Testing
|
||||
|
||||
To test the flow, if we have set up `debian-work` as the `target`, we can do
|
||||
|
||||
```bash
|
||||
$ cat newaccnt.json
|
||||
{ "id": 0, "jsonrpc": "2.0","method": "account_new","params": []}
|
||||
|
||||
$ cat newaccnt.json| qrexec-client-vm debian-work qubes.Clefsign
|
||||
```
|
||||
|
||||
This should pop up first a dialog to allow the IPC call:
|
||||
|
||||

|
||||
|
||||
Followed by a GTK-dialog to approve the operation
|
||||
|
||||

|
||||
|
||||
To test the full flow, we use the client wrapper. Start it on the `client` qube:
|
||||
```
|
||||
[user@work qubes]$ python3 qubes-client.py
|
||||
```
|
||||
|
||||
Make the request over http (`client` qube):
|
||||
```
|
||||
[user@work clef]$ cat newaccnt.json | curl -X POST -d @- http://localhost:8550
|
||||
```
|
||||
And it should show the same popups again.
|
||||
|
||||
##### Pros and cons
|
||||
|
||||
The benefits of this setup are:
|
||||
|
||||
- This is the qubes-os intended model for inter-qube communication,
|
||||
- and thus benefits from qubes-os dialogs and policies for user approval
|
||||
|
||||
However, it comes with a couple of drawbacks:
|
||||
|
||||
- The `qubes-gpg-client` must forward the http request via RPC to the `target` qube. When doing so, the proxy
|
||||
will either drop important headers, or replace them.
|
||||
- The `Host` header is most likely `localhost`
|
||||
- The `Origin` header must be forwarded
|
||||
- Information about the remote ip must be added as a `X-Forwarded-For`. However, Clef cannot always trust an `XFF` header,
|
||||
since malicious clients may lie about `XFF` in order to fool the http server into believing it comes from another address.
|
||||
- Even with a policy in place to allow rpc-calls between `caller` and `target`, there will be several popups:
|
||||
- One qubes-specific where the user specifies the `target` vm
|
||||
- One clef-specific to approve the transaction
|
||||
|
||||
|
||||
#### 2. Network integrated
|
||||
|
||||
The second way to set up Clef on a qubes system is to allow networking, and have Clef listen to a port which is accessible
|
||||
form other qubes.
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
## USBArmory
|
||||
|
||||
The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 Mhz ARM processor. It is a pocket-size
|
||||
computer. When inserted into a laptop, it identifies itself as a USB network interface, basically adding another network
|
||||
to your computer. Over this new network interface, you can SSH into the device.
|
||||
|
||||
Running Clef off a USB armory means that you can use the armory as a very versatile offline computer, which only
|
||||
ever connects to a local network between your computer and the device itself.
|
||||
|
||||
Needless to say, the while this model should be fairly secure against remote attacks, an attacker with physical access
|
||||
to the USB Armory would trivially be able to extract the contents of the device filesystem.
|
||||
|
@@ -47,11 +47,11 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// ExternalApiVersion -- see extapi_changelog.md
|
||||
const ExternalApiVersion = "2.0.0"
|
||||
// ExternalAPIVersion -- see extapi_changelog.md
|
||||
const ExternalAPIVersion = "2.0.0"
|
||||
|
||||
// InternalApiVersion -- see intapi_changelog.md
|
||||
const InternalApiVersion = "2.0.0"
|
||||
// InternalAPIVersion -- see intapi_changelog.md
|
||||
const InternalAPIVersion = "2.0.0"
|
||||
|
||||
const legalWarning = `
|
||||
WARNING!
|
||||
@@ -398,10 +398,10 @@ func signer(c *cli.Context) error {
|
||||
}
|
||||
// register signer API with server
|
||||
var (
|
||||
extapiUrl = "n/a"
|
||||
ipcApiUrl = "n/a"
|
||||
extapiURL = "n/a"
|
||||
ipcapiURL = "n/a"
|
||||
)
|
||||
rpcApi := []rpc.API{
|
||||
rpcAPI := []rpc.API{
|
||||
{
|
||||
Namespace: "account",
|
||||
Public: true,
|
||||
@@ -415,12 +415,12 @@ func signer(c *cli.Context) error {
|
||||
|
||||
// start http server
|
||||
httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcApi, []string{"account"}, cors, vhosts)
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start RPC api: %v", err)
|
||||
}
|
||||
extapiUrl = fmt.Sprintf("http://%s", httpEndpoint)
|
||||
log.Info("HTTP endpoint opened", "url", extapiUrl)
|
||||
extapiURL = fmt.Sprintf("http://%s", httpEndpoint)
|
||||
log.Info("HTTP endpoint opened", "url", extapiURL)
|
||||
|
||||
defer func() {
|
||||
listener.Close()
|
||||
@@ -430,19 +430,19 @@ func signer(c *cli.Context) error {
|
||||
}
|
||||
if !c.Bool(utils.IPCDisabledFlag.Name) {
|
||||
if c.IsSet(utils.IPCPathFlag.Name) {
|
||||
ipcApiUrl = c.String(utils.IPCPathFlag.Name)
|
||||
ipcapiURL = c.String(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
ipcApiUrl = filepath.Join(configDir, "clef.ipc")
|
||||
ipcapiURL = filepath.Join(configDir, "clef.ipc")
|
||||
}
|
||||
|
||||
listener, _, err := rpc.StartIPCEndpoint(ipcApiUrl, rpcApi)
|
||||
listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start IPC api: %v", err)
|
||||
}
|
||||
log.Info("IPC endpoint opened", "url", ipcApiUrl)
|
||||
log.Info("IPC endpoint opened", "url", ipcapiURL)
|
||||
defer func() {
|
||||
listener.Close()
|
||||
log.Info("IPC endpoint closed", "url", ipcApiUrl)
|
||||
log.Info("IPC endpoint closed", "url", ipcapiURL)
|
||||
}()
|
||||
|
||||
}
|
||||
@@ -453,10 +453,10 @@ func signer(c *cli.Context) error {
|
||||
}
|
||||
ui.OnSignerStartup(core.StartupInfo{
|
||||
Info: map[string]interface{}{
|
||||
"extapi_version": ExternalApiVersion,
|
||||
"intapi_version": InternalApiVersion,
|
||||
"extapi_http": extapiUrl,
|
||||
"extapi_ipc": ipcApiUrl,
|
||||
"extapi_version": ExternalAPIVersion,
|
||||
"intapi_version": InternalAPIVersion,
|
||||
"extapi_http": extapiURL,
|
||||
"extapi_ipc": ipcapiURL,
|
||||
},
|
||||
})
|
||||
|
||||
|
72
cmd/ethkey/changepassphrase.go
Normal file
72
cmd/ethkey/changepassphrase.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var newPassphraseFlag = cli.StringFlag{
|
||||
Name: "newpasswordfile",
|
||||
Usage: "the file that contains the new passphrase for the keyfile",
|
||||
}
|
||||
|
||||
var commandChangePassphrase = cli.Command{
|
||||
Name: "changepassphrase",
|
||||
Usage: "change the passphrase on a keyfile",
|
||||
ArgsUsage: "<keyfile>",
|
||||
Description: `
|
||||
Change the passphrase of a keyfile.`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
newPassphraseFlag,
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
keyfilepath := ctx.Args().First()
|
||||
|
||||
// Read key from file.
|
||||
keyjson, err := ioutil.ReadFile(keyfilepath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
}
|
||||
|
||||
// Get a new passphrase.
|
||||
fmt.Println("Please provide a new passphrase")
|
||||
var newPhrase string
|
||||
if passFile := ctx.String(newPassphraseFlag.Name); passFile != "" {
|
||||
content, err := ioutil.ReadFile(passFile)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read new passphrase file '%s': %v", passFile, err)
|
||||
}
|
||||
newPhrase = strings.TrimRight(string(content), "\r\n")
|
||||
} else {
|
||||
newPhrase = promptPassphrase(true)
|
||||
}
|
||||
|
||||
// Encrypt the key with the new passphrase.
|
||||
newJson, err := keystore.EncryptKey(key, newPhrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting with new passphrase: %v", err)
|
||||
}
|
||||
|
||||
// Then write the new keyfile in place of the old one.
|
||||
if err := ioutil.WriteFile(keyfilepath, newJson, 600); err != nil {
|
||||
utils.Fatalf("Error writing new keyfile to disk: %v", err)
|
||||
}
|
||||
|
||||
// Don't print anything. Just return successfully,
|
||||
// producing a positive exit code.
|
||||
return nil
|
||||
},
|
||||
}
|
@@ -90,7 +90,7 @@ If you want to encrypt an existing private key, it can be specified by setting
|
||||
}
|
||||
|
||||
// Encrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, true)
|
||||
passphrase := promptPassphrase(true)
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting key: %v", err)
|
||||
|
@@ -60,7 +60,7 @@ make sure to use this feature with great caution!`,
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
|
@@ -38,6 +38,7 @@ func init() {
|
||||
app.Commands = []cli.Command{
|
||||
commandGenerate,
|
||||
commandInspect,
|
||||
commandChangePassphrase,
|
||||
commandSignMessage,
|
||||
commandVerifyMessage,
|
||||
}
|
||||
|
@@ -62,7 +62,7 @@ To sign a message contained in a file, use the --msgfile flag.
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
|
@@ -28,11 +28,32 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// getPassPhrase obtains a passphrase given by the user. It first checks the
|
||||
// --passphrase command line flag and ultimately prompts the user for a
|
||||
// promptPassphrase prompts the user for a passphrase. Set confirmation to true
|
||||
// to require the user to confirm the passphrase.
|
||||
func promptPassphrase(confirmation bool) string {
|
||||
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if passphrase != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
|
||||
return passphrase
|
||||
}
|
||||
|
||||
// getPassphrase obtains a passphrase given by the user. It first checks the
|
||||
// --passfile command line flag and ultimately prompts the user for a
|
||||
// passphrase.
|
||||
func getPassPhrase(ctx *cli.Context, confirmation bool) string {
|
||||
// Look for the --passphrase flag.
|
||||
func getPassphrase(ctx *cli.Context) string {
|
||||
// Look for the --passwordfile flag.
|
||||
passphraseFile := ctx.String(passphraseFlag.Name)
|
||||
if passphraseFile != "" {
|
||||
content, err := ioutil.ReadFile(passphraseFile)
|
||||
@@ -44,20 +65,7 @@ func getPassPhrase(ctx *cli.Context, confirmation bool) string {
|
||||
}
|
||||
|
||||
// Otherwise prompt the user for the passphrase.
|
||||
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if passphrase != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
return passphrase
|
||||
return promptPassphrase(false)
|
||||
}
|
||||
|
||||
// signHash is a helper function that calculates a hash for the given message
|
||||
|
@@ -32,6 +32,8 @@ type JSONLogger struct {
|
||||
cfg *vm.LogConfig
|
||||
}
|
||||
|
||||
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
|
||||
// into the provided stream.
|
||||
func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
}
|
||||
|
@@ -21,12 +21,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
goruntime "runtime"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -86,6 +86,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.BytesToAddress([]byte("sender"))
|
||||
receiver = common.BytesToAddress([]byte("receiver"))
|
||||
blockNumber uint64
|
||||
)
|
||||
if ctx.GlobalBool(MachineFlag.Name) {
|
||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
||||
@@ -97,13 +98,13 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis := gen.ToBlock(db)
|
||||
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
|
||||
chainConfig = gen.Config
|
||||
blockNumber = gen.Number
|
||||
} else {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(db))
|
||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
|
||||
}
|
||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
|
||||
@@ -156,11 +157,12 @@ func runCmd(ctx *cli.Context) error {
|
||||
|
||||
initialGas := ctx.GlobalUint64(GasFlag.Name)
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: initialGas,
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: initialGas,
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
BlockNumber: new(big.Int).SetUint64(blockNumber),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
|
@@ -38,6 +38,8 @@ var stateTestCommand = cli.Command{
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
// StatetestResult contains the execution status after running a state test, any
|
||||
// error that might have occurred and a dump of the final state if requested.
|
||||
type StatetestResult struct {
|
||||
Name string `json:"name"`
|
||||
Pass bool `json:"pass"`
|
||||
|
@@ -77,9 +77,6 @@ var (
|
||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
||||
|
||||
githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access")
|
||||
githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with")
|
||||
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
@@ -474,7 +471,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
|
||||
if err != nil {
|
||||
f.lock.Unlock()
|
||||
if err = sendError(conn, err); err != nil {
|
||||
@@ -638,59 +635,6 @@ func sendSuccess(conn *websocket.Conn, msg string) error {
|
||||
return send(conn, map[string]string{"success": msg}, time.Second)
|
||||
}
|
||||
|
||||
// authGitHub tries to authenticate a faucet request using GitHub gists, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authGitHub(url string) (string, string, common.Address, error) {
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(url, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
return "", "", common.Address{}, errors.New("Anonymous Gists not allowed")
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
content := strings.TrimSpace(file.Content)
|
||||
if len(content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(content)
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", common.Address{}, errors.New("Invalid user... boom!")
|
||||
}
|
||||
// Everything passed validation, return the gathered infos
|
||||
return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil
|
||||
}
|
||||
|
||||
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authTwitter(url string) (string, string, common.Address, error) {
|
||||
|
@@ -340,7 +340,7 @@ func importWallet(ctx *cli.Context) error {
|
||||
if len(keyfile) == 0 {
|
||||
utils.Fatalf("keyfile must be given as argument")
|
||||
}
|
||||
keyJson, err := ioutil.ReadFile(keyfile)
|
||||
keyJSON, err := ioutil.ReadFile(keyfile)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not read wallet file: %v", err)
|
||||
}
|
||||
@@ -349,7 +349,7 @@ func importWallet(ctx *cli.Context) error {
|
||||
passphrase := getPassPhrase("", false, 0, utils.MakePasswordList(ctx))
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
acct, err := ks.ImportPreSaleKey(keyJson, passphrase)
|
||||
acct, err := ks.ImportPreSaleKey(keyJSON, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
|
@@ -41,7 +41,7 @@ var bugCommand = cli.Command{
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
}
|
||||
|
||||
const issueUrl = "https://github.com/ethereum/go-ethereum/issues/new"
|
||||
const issueURL = "https://github.com/ethereum/go-ethereum/issues/new"
|
||||
|
||||
// reportBug reports a bug by opening a new URL to the go-ethereum GH issue
|
||||
// tracker and setting default values as the issue body.
|
||||
@@ -58,8 +58,8 @@ func reportBug(ctx *cli.Context) error {
|
||||
fmt.Fprintln(&buff, header)
|
||||
|
||||
// open a new GH issue
|
||||
if !browser.Open(issueUrl + "?body=" + url.QueryEscape(buff.String())) {
|
||||
fmt.Printf("Please file a new issue at %s using this template:\n\n%s", issueUrl, buff.String())
|
||||
if !browser.Open(issueURL + "?body=" + url.QueryEscape(buff.String())) {
|
||||
fmt.Printf("Please file a new issue at %s using this template:\n\n%s", issueURL, buff.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
@@ -131,8 +131,8 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
|
||||
if genesis != "" {
|
||||
genesisHash = daoGenesisHash
|
||||
}
|
||||
config, err := core.GetChainConfig(db, genesisHash)
|
||||
if err != nil {
|
||||
config := rawdb.ReadChainConfig(db, genesisHash)
|
||||
if config == nil {
|
||||
t.Errorf("test %d: failed to retrieve chain config: %v", test, err)
|
||||
return // we want to return here, the other checks can't make it past this point (nil panic).
|
||||
}
|
||||
|
@@ -77,7 +77,7 @@ var customGenesisTests = []struct {
|
||||
"homesteadBlock" : 314,
|
||||
"daoForkBlock" : 141,
|
||||
"daoForkSupport" : true
|
||||
},
|
||||
}
|
||||
}`,
|
||||
query: "eth.getBlock(0).nonce",
|
||||
result: "0x0000000000000042",
|
||||
|
@@ -19,12 +19,16 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
godebug "runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@@ -140,6 +144,15 @@ var (
|
||||
utils.WhisperMaxMessageSizeFlag,
|
||||
utils.WhisperMinPOWFlag,
|
||||
}
|
||||
|
||||
metricsFlags = []cli.Flag{
|
||||
utils.MetricsEnableInfluxDBFlag,
|
||||
utils.MetricsInfluxDBEndpointFlag,
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -182,12 +195,32 @@ func init() {
|
||||
app.Flags = append(app.Flags, consoleFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, whisperFlags...)
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
if err := debug.Setup(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
// Cap the cache allowance and tune the garbage colelctor
|
||||
var mem gosigar.Mem
|
||||
if err := mem.Get(); err == nil {
|
||||
allowance := int(mem.Total / 1024 / 1024 / 3)
|
||||
if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance {
|
||||
log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance)
|
||||
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance))
|
||||
}
|
||||
}
|
||||
// Ensure Go's GC ignores the database cache for trigger percentage
|
||||
cache := ctx.GlobalInt(utils.CacheFlag.Name)
|
||||
gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024)))
|
||||
|
||||
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
|
||||
godebug.SetGCPercent(int(gogc))
|
||||
|
||||
// Start metrics export if enabled
|
||||
utils.SetupMetrics(ctx)
|
||||
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
|
||||
@@ -223,6 +256,8 @@ func geth(ctx *cli.Context) error {
|
||||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||
// miner.
|
||||
func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
debug.Memsize.Add("node", stack)
|
||||
|
||||
// Start up the node itself
|
||||
utils.StartNode(stack)
|
||||
|
||||
|
@@ -206,11 +206,22 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "LOGGING AND DEBUGGING",
|
||||
Flags: append([]cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
}, debug.Flags...),
|
||||
},
|
||||
{
|
||||
Name: "METRICS AND STATS",
|
||||
Flags: []cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.MetricsEnableInfluxDBFlag,
|
||||
utils.MetricsInfluxDBEndpointFlag,
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "WHISPER (EXPERIMENTAL)",
|
||||
Flags: whisperFlags,
|
||||
|
@@ -180,7 +180,10 @@ func main() {
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showNetwork(ctx *cli.Context) error {
|
||||
@@ -275,9 +278,8 @@ func createNode(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
config := &adapters.NodeConfig{
|
||||
Name: ctx.String("name"),
|
||||
}
|
||||
config := adapters.RandomNodeConfig()
|
||||
config.Name = ctx.String("name")
|
||||
if key := ctx.String("key"); key != "" {
|
||||
privKey, err := crypto.HexToECDSA(key)
|
||||
if err != nil {
|
||||
|
@@ -103,8 +103,8 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
|
||||
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
@@ -284,7 +284,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Params.EIP98Transition = math.MaxUint64
|
||||
|
@@ -609,7 +609,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
}
|
||||
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
||||
"Network": network,
|
||||
"NetworkID": conf.Genesis.Config.ChainId,
|
||||
"NetworkID": conf.Genesis.Config.ChainID,
|
||||
"NetworkTitle": strings.Title(network),
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
|
@@ -49,7 +49,7 @@ func (w *wizard) deployFaucet() {
|
||||
existed := err == nil
|
||||
|
||||
infos.node.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.node.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.node.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
|
@@ -121,7 +121,7 @@ func (w *wizard) makeGenesis() {
|
||||
// Query the user for some custom extras
|
||||
fmt.Println()
|
||||
fmt.Println("Specify your chain/network ID if you want an explicit one (default = random)")
|
||||
genesis.Config.ChainId = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
genesis.Config.ChainID = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
|
||||
// All done, store the genesis and flush to disk
|
||||
log.Info("Configured new genesis block")
|
||||
|
@@ -276,13 +276,3 @@ func (stats serverStats) render() {
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
|
||||
// protips contains a collection of network infos to report pro-tips
|
||||
// based on.
|
||||
type protips struct {
|
||||
genesis string
|
||||
network int64
|
||||
bootFull []string
|
||||
bootLight []string
|
||||
ethstats string
|
||||
}
|
||||
|
@@ -56,7 +56,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
existed := err == nil
|
||||
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out where the user wants to store the persistent data
|
||||
fmt.Println()
|
||||
@@ -107,7 +107,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
// Ethash based miners only need an etherbase to mine against
|
||||
fmt.Println()
|
||||
if infos.etherbase == "" {
|
||||
fmt.Printf("What address should the miner user?\n")
|
||||
fmt.Printf("What address should the miner use?\n")
|
||||
for {
|
||||
if address := w.readAddress(); address != nil {
|
||||
infos.etherbase = address.Hex()
|
||||
@@ -115,7 +115,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("What address should the miner user? (default = %s)\n", infos.etherbase)
|
||||
fmt.Printf("What address should the miner use? (default = %s)\n", infos.etherbase)
|
||||
infos.etherbase = w.readDefaultAddress(common.HexToAddress(infos.etherbase)).Hex()
|
||||
}
|
||||
} else if w.conf.Genesis.Config.Clique != nil {
|
||||
|
@@ -52,7 +52,7 @@ func (w *wizard) deployWallet() {
|
||||
existed := err == nil
|
||||
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@@ -37,6 +38,8 @@ import (
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
)
|
||||
|
||||
const SWARM_VERSION = "0.3"
|
||||
|
||||
var (
|
||||
//flag definition for the dumpconfig command
|
||||
DumpConfigCommand = cli.Command{
|
||||
@@ -58,19 +61,25 @@ var (
|
||||
|
||||
//constants for environment variables
|
||||
const (
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
|
||||
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
|
||||
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
|
||||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
// These settings ensure that TOML keys use the same names as Go struct fields.
|
||||
@@ -92,10 +101,8 @@ var tomlSettings = toml.Config{
|
||||
|
||||
//before booting the swarm node, build the configuration
|
||||
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
|
||||
//check for deprecated flags
|
||||
checkDeprecated(ctx)
|
||||
//start by creating a default config
|
||||
config = bzzapi.NewDefaultConfig()
|
||||
config = bzzapi.NewConfig()
|
||||
//first load settings from config file (if provided)
|
||||
config, err = configFileOverride(config, ctx)
|
||||
if err != nil {
|
||||
@@ -168,7 +175,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
|
||||
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,12 +198,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.SwapEnabled = true
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = true
|
||||
if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = false
|
||||
}
|
||||
|
||||
currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
|
||||
currentConfig.DeliverySkipCheck = true
|
||||
}
|
||||
|
||||
currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
@@ -209,10 +224,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.EnsAPIs = ensAPIs
|
||||
}
|
||||
|
||||
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
|
||||
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
|
||||
}
|
||||
|
||||
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
@@ -221,6 +232,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
|
||||
}
|
||||
|
||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
||||
currentConfig.LocalStoreParams.ChunkDbPath = storePath
|
||||
}
|
||||
|
||||
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
|
||||
}
|
||||
|
||||
if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
|
||||
}
|
||||
@@ -239,7 +262,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
|
||||
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,17 +285,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" {
|
||||
if sync, err := strconv.ParseBool(syncenable); err != nil {
|
||||
currentConfig.SyncEnabled = sync
|
||||
if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
|
||||
if sync, err := strconv.ParseBool(syncdisable); err != nil {
|
||||
currentConfig.SyncEnabled = !sync
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
|
||||
if skipCheck, err := strconv.ParseBool(v); err != nil {
|
||||
currentConfig.DeliverySkipCheck = skipCheck
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
|
||||
if d, err := time.ParseDuration(v); err != nil {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
}
|
||||
|
||||
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
|
||||
currentConfig.SwapApi = swapapi
|
||||
currentConfig.SwapAPI = swapapi
|
||||
}
|
||||
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
@@ -312,18 +347,6 @@ func dumpConfig(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//deprecated flags checked here
|
||||
func checkDeprecated(ctx *cli.Context) {
|
||||
// exit if the deprecated --ethapi flag is set
|
||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||
}
|
||||
// warn if --ens-api flag is set
|
||||
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
|
||||
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
|
||||
}
|
||||
}
|
||||
|
||||
//validate configuration parameters
|
||||
func validateConfig(cfg *bzzapi.Config) (err error) {
|
||||
for _, ensAPI := range cfg.EnsAPIs {
|
||||
|
@@ -34,7 +34,7 @@ import (
|
||||
|
||||
func TestDumpConfig(t *testing.T) {
|
||||
swarm := runSwarm(t, "dumpconfig")
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
@@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsNoBzzAccount(t *testing.T) {
|
||||
func TestConfigFailsNoBzzAccount(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
@@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestCmdLineOverrides(t *testing.T) {
|
||||
func TestConfigCmdLineOverrides(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
|
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
@@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId)
|
||||
if info.NetworkID != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
@@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestFileOverrides(t *testing.T) {
|
||||
func TestConfigFileOverrides(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
@@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) {
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.DeliverySkipCheck = true
|
||||
defaultConf.NetworkID = 54
|
||||
defaultConf.Port = httpPort
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.DbCapacity = 9000000
|
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML string
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
@@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if info.NetworkID != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
if info.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestEnvVars(t *testing.T) {
|
||||
func TestConfigEnvVars(t *testing.T) {
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
@@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) {
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true"))
|
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
@@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId)
|
||||
if info.NetworkID != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID)
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
node.Shutdown()
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
|
||||
func TestCmdLineOverridesFile(t *testing.T) {
|
||||
func TestConfigCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
@@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkID = 54
|
||||
defaultConf.Port = "8588"
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.DbCapacity = 9000000
|
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML file
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
//write file
|
||||
f, err := ioutil.TempFile("", "testconfig.toml")
|
||||
fname := "testconfig.toml"
|
||||
f, err := ioutil.TempFile("", fname)
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
defer os.Remove(fname)
|
||||
//write file
|
||||
_, err = f.WriteString(string(out))
|
||||
if err != nil {
|
||||
@@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
@@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId)
|
||||
if info.NetworkID != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if info.LocalStoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
@@ -30,11 +31,11 @@ import (
|
||||
|
||||
func dbExport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) {
|
||||
|
||||
func dbImport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) {
|
||||
|
||||
func dbClean(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) {
|
||||
store.Cleanup()
|
||||
}
|
||||
|
||||
func openDbStore(path string) (*storage.DbStore, error) {
|
||||
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
|
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||
}
|
||||
hash := storage.MakeHashFunc("SHA3")
|
||||
return storage.NewDbStore(path, hash, 10000000, 0)
|
||||
|
||||
storeparams := storage.NewDefaultStoreParams()
|
||||
ldbparams := storage.NewLDBStoreParams(storeparams, path)
|
||||
ldbparams.BaseKey = basekey
|
||||
return storage.NewLDBStore(ldbparams)
|
||||
}
|
||||
|
85
cmd/swarm/download.go
Normal file
85
cmd/swarm/download.go
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func download(ctx *cli.Context) {
|
||||
log.Debug("downloading content using swarm down")
|
||||
args := ctx.Args()
|
||||
dest := "."
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]")
|
||||
case 1:
|
||||
log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir"))
|
||||
default:
|
||||
log.Trace(fmt.Sprintf("destination path arg: %s", args[1]))
|
||||
if absDest, err := filepath.Abs(args[1]); err == nil {
|
||||
dest = absDest
|
||||
} else {
|
||||
utils.Fatalf("could not get download path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
isRecursive = ctx.Bool(SwarmRecursiveFlag.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
)
|
||||
|
||||
if fi, err := os.Stat(dest); err == nil {
|
||||
if isRecursive && !fi.Mode().IsDir() {
|
||||
utils.Fatalf("destination path is not a directory!")
|
||||
}
|
||||
} else {
|
||||
if !os.IsNotExist(err) {
|
||||
utils.Fatalf("could not stat path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
uri, err := api.Parse(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("could not parse uri argument: %v", err)
|
||||
}
|
||||
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive {
|
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
|
||||
utils.Fatalf("encoutered an error while downloading directory: %v", err)
|
||||
}
|
||||
} else {
|
||||
// we are downloading a file
|
||||
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
|
||||
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest)
|
||||
if err != nil {
|
||||
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
|
||||
}
|
||||
}
|
||||
}
|
139
cmd/swarm/export_test.go
Normal file
139
cmd/swarm/export_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
)
|
||||
|
||||
// TestCLISwarmExportImport perform the following test:
|
||||
// 1. runs swarm node
|
||||
// 2. uploads a random file
|
||||
// 3. runs an export of the local datastore
|
||||
// 4. runs a second swarm node
|
||||
// 5. imports the exported datastore
|
||||
// 6. fetches the uploaded random file from the second node
|
||||
func TestCLISwarmExportImport(t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
|
||||
// generate random 10mb file
|
||||
f, cleanup := generateRandomFile(t, 10000000)
|
||||
defer cleanup()
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
|
||||
var info swarm.Info
|
||||
if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cluster.Stop()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
// generate an export.tar
|
||||
exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
|
||||
exportCmd.ExpectExit()
|
||||
|
||||
// start second cluster
|
||||
cluster2 := newTestCluster(t, 1)
|
||||
|
||||
var info2 swarm.Info
|
||||
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// stop second cluster, so that we close LevelDB
|
||||
cluster2.Stop()
|
||||
defer cluster2.Cleanup()
|
||||
|
||||
// import the export.tar
|
||||
importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
|
||||
importCmd.ExpectExit()
|
||||
|
||||
// spin second cluster back up
|
||||
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
|
||||
|
||||
// try to fetch imported file
|
||||
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
|
||||
}
|
||||
|
||||
// compare downloaded file with the generated random file
|
||||
mustEqualFiles(t, f, res.Body)
|
||||
}
|
||||
|
||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||
h := md5.New()
|
||||
upLen, err := io.Copy(h, up)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
upHash := h.Sum(nil)
|
||||
h.Reset()
|
||||
downLen, err := io.Copy(h, down)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
downHash := h.Sum(nil)
|
||||
|
||||
if !bytes.Equal(upHash, downHash) || upLen != downLen {
|
||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
// write 10mb random data to file
|
||||
buf := make([]byte, 10000000)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
||||
|
||||
return tmp, teardown
|
||||
}
|
127
cmd/swarm/fs.go
Normal file
127
cmd/swarm/fs.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/fuse"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func mount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
if len(args) < 2 {
|
||||
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
|
||||
}
|
||||
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := &fuse.MountInfo{}
|
||||
mountPoint, err := filepath.Abs(filepath.Clean(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error expanding path for mount point: %v", err)
|
||||
}
|
||||
err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func unmount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
|
||||
if len(args) < 1 {
|
||||
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
|
||||
}
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
}
|
||||
fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
|
||||
}
|
||||
|
||||
func listMounts(cliContext *cli.Context) {
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := []fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
}
|
||||
if len(mf) == 0 {
|
||||
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
|
||||
} else {
|
||||
fmt.Printf("Found %d swarmfs mount(s):\n", len(mf))
|
||||
for i, mountInfo := range mf {
|
||||
fmt.Printf("%d:\n", i)
|
||||
fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint)
|
||||
fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest)
|
||||
fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
|
||||
var endpoint string
|
||||
|
||||
if ctx.IsSet(utils.IPCPathFlag.Name) {
|
||||
endpoint = ctx.String(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
utils.Fatalf("swarm ipc endpoint not specified")
|
||||
}
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
|
||||
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
|
||||
// Backwards compatibility with geth < 1.5 which required
|
||||
// these prefixes.
|
||||
endpoint = endpoint[4:]
|
||||
}
|
||||
return rpc.Dial(endpoint)
|
||||
}
|
234
cmd/swarm/fs_test.go
Normal file
234
cmd/swarm/fs_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
type testFile struct {
|
||||
filePath string
|
||||
content string
|
||||
}
|
||||
|
||||
// TestCLISwarmFs is a high-level test of swarmfs
|
||||
func TestCLISwarmFs(t *testing.T) {
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp dir
|
||||
mountPoint, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Debug("swarmfs cli test", "1st mount", mountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(mountPoint)
|
||||
|
||||
handlingNode := cluster.Nodes[0]
|
||||
mhash := doUploadEmptyDir(t, handlingNode)
|
||||
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
mount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mhash,
|
||||
mountPoint,
|
||||
}...)
|
||||
mount.ExpectExit()
|
||||
|
||||
filesToAssert := []*testFile{}
|
||||
|
||||
dirPath, err := createDirInDir(mountPoint, "testSubDir")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
|
||||
|
||||
dummyContent := "somerandomtestcontentthatshouldbeasserted"
|
||||
dirs := []string{
|
||||
mountPoint,
|
||||
dirPath,
|
||||
dirPath2,
|
||||
}
|
||||
files := []string{"f1.tmp", "f2.tmp"}
|
||||
for _, d := range dirs {
|
||||
for _, entry := range files {
|
||||
tFile, err := createTestFileInPath(d, entry, dummyContent)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filesToAssert = append(filesToAssert, tFile)
|
||||
}
|
||||
}
|
||||
if len(filesToAssert) != len(dirs)*len(files) {
|
||||
t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert))
|
||||
}
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mountPoint,
|
||||
}...)
|
||||
_, matches := unmount.ExpectRegexp(hashRegexp)
|
||||
unmount.ExpectExit()
|
||||
|
||||
hash := matches[0]
|
||||
if hash == mhash {
|
||||
t.Fatal("this should not be equal")
|
||||
}
|
||||
log.Debug("swarmfs cli test: asserting no files in mount point")
|
||||
|
||||
//check that there's nothing in the mount folder
|
||||
filesInDir, err := ioutil.ReadDir(mountPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("had an error reading the directory: %v", err)
|
||||
}
|
||||
|
||||
if len(filesInDir) != 0 {
|
||||
t.Fatal("there shouldn't be anything here")
|
||||
}
|
||||
|
||||
secondMountPoint, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(secondMountPoint)
|
||||
|
||||
log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
//remount, check files
|
||||
newMount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
hash, // the latest hash
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
newMount.ExpectExit()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
filesInDir, err = ioutil.ReadDir(secondMountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(filesInDir) == 0 {
|
||||
t.Fatal("there should be something here")
|
||||
}
|
||||
|
||||
log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount")
|
||||
|
||||
for _, file := range filesToAssert {
|
||||
file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1)
|
||||
fileBytes, err := ioutil.ReadFile(file.filePath)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) {
|
||||
t.Fatal("this should be equal")
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmountSec := runSwarm(t, []string{
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
_, matches = unmountSec.ExpectRegexp(hashRegexp)
|
||||
unmountSec.ExpectExit()
|
||||
|
||||
if matches[0] != hash {
|
||||
t.Fatal("these should be equal - no changes made")
|
||||
}
|
||||
}
|
||||
|
||||
func doUploadEmptyDir(t *testing.T, node *testNode) string {
|
||||
// create a tmp dir
|
||||
tmpDir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
|
||||
flags := []string{
|
||||
"--bzzapi", node.URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
tmpDir}
|
||||
|
||||
log.Info("swarmfs cli test: uploading dir with 'swarm up'")
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("swarmfs cli test: dir uploaded", "hash", hash)
|
||||
return hash
|
||||
}
|
||||
|
||||
func createDirInDir(createInDir string, dirToCreate string) (string, error) {
|
||||
fullpath := filepath.Join(createInDir, dirToCreate)
|
||||
err := os.MkdirAll(fullpath, 0777)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fullpath, nil
|
||||
}
|
||||
|
||||
func createTestFileInPath(dir, filename, content string) (*testFile, error) {
|
||||
tFile := &testFile{}
|
||||
filePath := filepath.Join(dir, filename)
|
||||
if file, err := os.Create(filePath); err == nil {
|
||||
tFile.content = content
|
||||
tFile.filePath = filePath
|
||||
|
||||
_, err = io.WriteString(file, content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
||||
return tFile, nil
|
||||
}
|
@@ -38,11 +38,11 @@ func hash(ctx *cli.Context) {
|
||||
defer f.Close()
|
||||
|
||||
stat, _ := f.Stat()
|
||||
chunker := storage.NewTreeChunker(storage.NewChunkerParams())
|
||||
key, err := chunker.Split(f, stat.Size(), nil, nil, nil)
|
||||
fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
|
||||
addr, _, err := fileStore.Store(f, stat.Size(), false)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v\n", err)
|
||||
} else {
|
||||
fmt.Printf("%v\n", key)
|
||||
fmt.Printf("%v\n", addr)
|
||||
}
|
||||
}
|
||||
|
@@ -34,7 +34,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@@ -49,6 +48,22 @@ import (
|
||||
)
|
||||
|
||||
const clientIdentifier = "swarm"
|
||||
const helpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
|
||||
|
||||
CATEGORY:
|
||||
{{.Category}}{{end}}{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if .VisibleFlags}}
|
||||
|
||||
OPTIONS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var (
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
@@ -87,10 +102,6 @@ var (
|
||||
Usage: "Network identifier (integer, default 3=swarm testnet)",
|
||||
EnvVar: SWARM_ENV_NETWORK_ID,
|
||||
}
|
||||
SwarmConfigPathFlag = cli.StringFlag{
|
||||
Name: "bzzconfig",
|
||||
Usage: "DEPRECATED: please use --config path/to/TOML-file",
|
||||
}
|
||||
SwarmSwapEnabledFlag = cli.BoolFlag{
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
@@ -101,10 +112,20 @@ var (
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
EnvVar: SWARM_ENV_SWAP_API,
|
||||
}
|
||||
SwarmSyncEnabledFlag = cli.BoolTFlag{
|
||||
Name: "sync",
|
||||
Usage: "Swarm Syncing enabled (default true)",
|
||||
EnvVar: SWARM_ENV_SYNC_ENABLE,
|
||||
SwarmSyncDisabledFlag = cli.BoolTFlag{
|
||||
Name: "nosync",
|
||||
Usage: "Disable swarm syncing",
|
||||
EnvVar: SWARM_ENV_SYNC_DISABLE,
|
||||
}
|
||||
SwarmSyncUpdateDelay = cli.DurationFlag{
|
||||
Name: "sync-update-delay",
|
||||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
|
||||
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
|
||||
}
|
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
|
||||
Name: "delivery-skip-check",
|
||||
Usage: "Skip chunk delivery check (default false)",
|
||||
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
|
||||
}
|
||||
EnsAPIFlag = cli.StringSliceFlag{
|
||||
Name: "ens-api",
|
||||
@@ -116,7 +137,7 @@ var (
|
||||
Usage: "Swarm HTTP endpoint",
|
||||
Value: "http://127.0.0.1:8500",
|
||||
}
|
||||
SwarmRecursiveUploadFlag = cli.BoolFlag{
|
||||
SwarmRecursiveFlag = cli.BoolFlag{
|
||||
Name: "recursive",
|
||||
Usage: "Upload directories recursively",
|
||||
}
|
||||
@@ -136,20 +157,29 @@ var (
|
||||
Name: "mime",
|
||||
Usage: "force mime type",
|
||||
}
|
||||
SwarmEncryptedFlag = cli.BoolFlag{
|
||||
Name: "encrypt",
|
||||
Usage: "use encrypted upload",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
EnvVar: SWARM_ENV_CORS,
|
||||
}
|
||||
|
||||
// the following flags are deprecated and should be removed in the future
|
||||
DeprecatedEthAPIFlag = cli.StringFlag{
|
||||
Name: "ethapi",
|
||||
Usage: "DEPRECATED: please use --ens-api and --swap-api",
|
||||
SwarmStorePath = cli.StringFlag{
|
||||
Name: "store.path",
|
||||
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
|
||||
EnvVar: SWARM_ENV_STORE_PATH,
|
||||
}
|
||||
DeprecatedEnsAddrFlag = cli.StringFlag{
|
||||
Name: "ens-addr",
|
||||
Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format",
|
||||
SwarmStoreCapacity = cli.Uint64Flag{
|
||||
Name: "store.size",
|
||||
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
|
||||
EnvVar: SWARM_ENV_STORE_CAPACITY,
|
||||
}
|
||||
SwarmStoreCacheCapacity = cli.UintFlag{
|
||||
Name: "store.cache.size",
|
||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -180,91 +210,130 @@ func init() {
|
||||
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Action: version,
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
The output of this command is supposed to be machine-readable.
|
||||
`,
|
||||
Action: version,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
Description: "The output of this command is supposed to be machine-readable",
|
||||
},
|
||||
{
|
||||
Action: upload,
|
||||
Name: "up",
|
||||
Usage: "upload a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: " <file>",
|
||||
Description: `
|
||||
"upload a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
`,
|
||||
Action: upload,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "up",
|
||||
Usage: "uploads a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
},
|
||||
{
|
||||
Action: list,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: " <manifest> [<prefix>]",
|
||||
Description: `
|
||||
Lists files and directories contained in a manifest.
|
||||
`,
|
||||
Action: list,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: "<manifest> [<prefix>]",
|
||||
Description: "Lists files and directories contained in a manifest",
|
||||
},
|
||||
{
|
||||
Action: hash,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: " <file>",
|
||||
Description: `
|
||||
Prints the swarm hash of file or directory.
|
||||
`,
|
||||
Action: hash,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: "<file>",
|
||||
Description: "Prints the swarm hash of file or directory",
|
||||
},
|
||||
{
|
||||
Name: "manifest",
|
||||
Usage: "update a MANIFEST",
|
||||
ArgsUsage: "manifest COMMAND",
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `
|
||||
Updates a MANIFEST by adding/removing/updating the hash of a path.
|
||||
Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
|
||||
`,
|
||||
},
|
||||
|
||||
{
|
||||
Name: "manifest",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform operations on swarm manifests",
|
||||
ArgsUsage: "COMMAND",
|
||||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: add,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
Description: `
|
||||
Adds a new path to the manifest
|
||||
`,
|
||||
Action: add,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
Description: "Adds a new path to the manifest",
|
||||
},
|
||||
{
|
||||
Action: update,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
Description: `
|
||||
Update the hash for an already existing path in the manifest
|
||||
`,
|
||||
Action: update,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
Description: "Update the hash for an already existing path in the manifest",
|
||||
},
|
||||
{
|
||||
Action: remove,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: `
|
||||
Removes a path from the manifest
|
||||
`,
|
||||
Action: remove,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: "Removes a path from the manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: `
|
||||
Manage the local chunk database.
|
||||
`,
|
||||
Name: "fs",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform FUSE operations",
|
||||
ArgsUsage: "fs COMMAND",
|
||||
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Action: mount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "mount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "mount a swarm hash to a mount point",
|
||||
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
|
||||
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: unmount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "unmount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "unmount a swarmfs mount",
|
||||
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
|
||||
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: listMounts,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "list",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "list swarmfs mounts",
|
||||
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
|
||||
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: "Manage the local chunk database",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||
|
||||
@@ -277,10 +346,11 @@ pv(1) tool to get a progress bar:
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbImport,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Action: dbImport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
@@ -293,27 +363,16 @@ pv(1) tool to get a progress bar:
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: `
|
||||
Remove corrupt entries from a local chunk database.
|
||||
`,
|
||||
Action: dbClean,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: "Remove corrupt entries from a local chunk database",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: func(ctx *cli.Context) {
|
||||
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
|
||||
},
|
||||
Name: "cleandb",
|
||||
Usage: "DEPRECATED: use 'swarm db clean'",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
DEPRECATED: use 'swarm db clean'.
|
||||
`,
|
||||
},
|
||||
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
}
|
||||
@@ -339,10 +398,11 @@ DEPRECATED: use 'swarm db clean'.
|
||||
CorsStringFlag,
|
||||
EnsAPIFlag,
|
||||
SwarmTomlConfigPathFlag,
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSwapAPIFlag,
|
||||
SwarmSyncEnabledFlag,
|
||||
SwarmSyncDisabledFlag,
|
||||
SwarmSyncUpdateDelay,
|
||||
SwarmDeliverySkipCheckFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
SwarmAccountFlag,
|
||||
@@ -350,15 +410,24 @@ DEPRECATED: use 'swarm db clean'.
|
||||
ChequebookAddrFlag,
|
||||
// upload flags
|
||||
SwarmApiFlag,
|
||||
SwarmRecursiveUploadFlag,
|
||||
SwarmRecursiveFlag,
|
||||
SwarmWantManifestFlag,
|
||||
SwarmUploadDefaultPath,
|
||||
SwarmUpFromStdinFlag,
|
||||
SwarmUploadMimeType,
|
||||
//deprecated flags
|
||||
DeprecatedEthAPIFlag,
|
||||
DeprecatedEnsAddrFlag,
|
||||
// storage flags
|
||||
SwarmStorePath,
|
||||
SwarmStoreCapacity,
|
||||
SwarmStoreCacheCapacity,
|
||||
}
|
||||
rpcFlags := []cli.Flag{
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
}
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, swarmmetrics.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
@@ -383,16 +452,12 @@ func main() {
|
||||
}
|
||||
|
||||
func version(ctx *cli.Context) error {
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", params.Version)
|
||||
fmt.Println("Version:", SWARM_VERSION)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
fmt.Println("OS:", runtime.GOOS)
|
||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||
fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -405,6 +470,10 @@ func bzzd(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
cfg := defaultNodeConfig
|
||||
|
||||
//pss operates on ws
|
||||
cfg.WSModules = append(cfg.WSModules, "pss")
|
||||
|
||||
//geth only supports --datadir via command line
|
||||
//in order to be consistent within swarm, if we pass --datadir via environment variable
|
||||
//or via config file, we get the same directory for geth and swarm
|
||||
@@ -421,7 +490,7 @@ func bzzd(ctx *cli.Context) error {
|
||||
//due to overriding behavior
|
||||
initSwarmNode(bzzconfig, stack, ctx)
|
||||
//register BZZ as node.Service in the ethereum node
|
||||
registerBzzService(bzzconfig, ctx, stack)
|
||||
registerBzzService(bzzconfig, stack)
|
||||
//start the node
|
||||
utils.StartNode(stack)
|
||||
|
||||
@@ -439,7 +508,7 @@ func bzzd(ctx *cli.Context) error {
|
||||
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
|
||||
injectBootnodes(stack.Server(), bootnodes)
|
||||
} else {
|
||||
if bzzconfig.NetworkId == 3 {
|
||||
if bzzconfig.NetworkID == 3 {
|
||||
injectBootnodes(stack.Server(), testbetBootNodes)
|
||||
}
|
||||
}
|
||||
@@ -448,21 +517,11 @@ func bzzd(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
|
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) {
|
||||
//define the swarm service boot function
|
||||
boot := func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var swapClient *ethclient.Client
|
||||
var err error
|
||||
if bzzconfig.SwapApi != "" {
|
||||
log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi)
|
||||
swapClient, err = ethclient.Dial(bzzconfig.SwapApi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err)
|
||||
}
|
||||
}
|
||||
|
||||
return swarm.NewSwarm(ctx, swapClient, bzzconfig)
|
||||
boot := func(_ *node.ServiceContext) (node.Service, error) {
|
||||
// In production, mockStore must be always nil.
|
||||
return swarm.NewSwarm(bzzconfig, nil)
|
||||
}
|
||||
//register within the ethereum node
|
||||
if err := stack.Register(boot); err != nil {
|
||||
|
@@ -131,13 +131,13 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
|
||||
//TODO: check if the "hash" to add is valid and present in swarm
|
||||
_, err = client.DownloadManifest(hash)
|
||||
_, _, err = client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Hash to add is not present: %v", err)
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
mroot.Entries = append(mroot.Entries, newEntry)
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
@@ -257,7 +257,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
@@ -323,7 +323,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
|
@@ -81,6 +81,7 @@ type testCluster struct {
|
||||
//
|
||||
// When starting more than one node, they are connected together using the
|
||||
// admin SetPeer RPC method.
|
||||
|
||||
func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster := &testCluster{}
|
||||
defer func() {
|
||||
@@ -96,18 +97,7 @@ func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster.TmpDir = tmpdir
|
||||
|
||||
// start the nodes
|
||||
cluster.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes, node)
|
||||
}
|
||||
cluster.StartNewNodes(t, size)
|
||||
|
||||
if size == 1 {
|
||||
return cluster
|
||||
@@ -145,14 +135,51 @@ func (c *testCluster) Shutdown() {
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
func (c *testCluster) Stop() {
|
||||
for _, node := range c.Nodes {
|
||||
node.Shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) StartNewNodes(t *testing.T, size int) {
|
||||
c.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
c.Nodes = append(c.Nodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
|
||||
c.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
node := existingTestNode(t, dir, bzzaccount)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
c.Nodes = append(c.Nodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) Cleanup() {
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
type testNode struct {
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
IpcPath string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
}
|
||||
|
||||
const testPassphrase = "swarm-test-passphrase"
|
||||
@@ -181,6 +208,72 @@ func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accoun
|
||||
return conf, account
|
||||
}
|
||||
|
||||
func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
conf, _ := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" {
|
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
|
||||
}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--ens-api", "",
|
||||
"--bzzaccount", bzzaccount,
|
||||
"--bzznetworkid", "321",
|
||||
"--bzzport", httpPort,
|
||||
"--verbosity", "6",
|
||||
)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
|
||||
node.URL = "http://" + node.Addr
|
||||
|
||||
var nodeInfo p2p.NodeInfo
|
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
|
||||
conf, account := getTestAccount(t, dir)
|
||||
@@ -239,6 +332,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
node.IpcPath = conf.IPCPath
|
||||
|
||||
return node
|
||||
}
|
||||
|
101
cmd/swarm/swarm-smoke/main.go
Normal file
101
cmd/swarm/swarm-smoke/main.go
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
endpoints []string
|
||||
includeLocalhost bool
|
||||
cluster string
|
||||
scheme string
|
||||
filesize int
|
||||
from int
|
||||
to int
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "smoke-test"
|
||||
app.Usage = ""
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster-endpoint",
|
||||
Value: "testing",
|
||||
Usage: "cluster to point to (open, or testing)",
|
||||
Destination: &cluster,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cluster-from",
|
||||
Value: 8501,
|
||||
Usage: "swarm node (from)",
|
||||
Destination: &from,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cluster-to",
|
||||
Value: 8512,
|
||||
Usage: "swarm node (to)",
|
||||
Destination: &to,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cluster-scheme",
|
||||
Value: "http",
|
||||
Usage: "http or https",
|
||||
Destination: &scheme,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "include-localhost",
|
||||
Usage: "whether to include localhost:8500 as an endpoint",
|
||||
Destination: &includeLocalhost,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "filesize",
|
||||
Value: 1,
|
||||
Usage: "file size for generated random file in MB",
|
||||
Destination: &filesize,
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "upload_and_sync",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "upload and sync",
|
||||
Action: cliUploadAndSync,
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(cli.FlagsByName(app.Flags))
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
}
|
||||
}
|
184
cmd/swarm/swarm-smoke/upload_and_sync.go
Normal file
184
cmd/swarm/swarm-smoke/upload_and_sync.go
Normal file
@@ -0,0 +1,184 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func generateEndpoints(scheme string, cluster string, from int, to int) {
|
||||
for port := from; port <= to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster))
|
||||
}
|
||||
|
||||
if includeLocalhost {
|
||||
endpoints = append(endpoints, "http://localhost:8500")
|
||||
}
|
||||
}
|
||||
|
||||
func cliUploadAndSync(c *cli.Context) error {
|
||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now())
|
||||
|
||||
generateEndpoints(scheme, cluster, from, to)
|
||||
|
||||
log.Info("uploading to " + endpoints[0] + " and syncing")
|
||||
|
||||
f, cleanup := generateRandomFile(filesize * 1000000)
|
||||
defer cleanup()
|
||||
|
||||
hash, err := upload(f, endpoints[0])
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
fhash, err := digest(f)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
||||
|
||||
if filesize < 10 {
|
||||
time.Sleep(15 * time.Second)
|
||||
} else {
|
||||
time.Sleep(2 * time.Duration(filesize) * time.Second)
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for _, endpoint := range endpoints {
|
||||
endpoint := endpoint
|
||||
ruid := uuid.New()[:8]
|
||||
wg.Add(1)
|
||||
go func(endpoint string, ruid string) {
|
||||
for {
|
||||
err := fetch(hash, endpoint, fhash, ruid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(endpoint, ruid)
|
||||
}
|
||||
wg.Wait()
|
||||
log.Info("all endpoints synced random file successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||
log.Trace("sleeping", "ruid", ruid)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
||||
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
rdigest, err := digest(res.Body)
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdigest, original) {
|
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
|
||||
func upload(f *os.File, endpoint string) (string, error) {
|
||||
var out bytes.Buffer
|
||||
cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hash := strings.TrimRight(out.String(), "\r\n")
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func digest(r io.Reader) ([]byte, error) {
|
||||
h := md5.New()
|
||||
_, err := io.Copy(h, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// generateRandomFile is creating a temporary file with the requested byte size
|
||||
func generateRandomFile(size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
buf := make([]byte, size)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
||||
|
||||
return tmp, teardown
|
||||
}
|
@@ -40,12 +40,13 @@ func upload(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name)
|
||||
recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
|
||||
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
|
||||
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
|
||||
file string
|
||||
)
|
||||
|
||||
@@ -76,7 +77,7 @@ func upload(ctx *cli.Context) {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
hash, err := client.UploadRaw(f, f.Size)
|
||||
hash, err := client.UploadRaw(f, f.Size, toEncrypt)
|
||||
if err != nil {
|
||||
utils.Fatalf("Upload failed: %s", err)
|
||||
}
|
||||
@@ -97,7 +98,7 @@ func upload(ctx *cli.Context) {
|
||||
if !recursive {
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
return client.UploadDirectory(file, defaultPath, "")
|
||||
return client.UploadDirectory(file, defaultPath, "", toEncrypt)
|
||||
}
|
||||
} else {
|
||||
doUpload = func() (string, error) {
|
||||
@@ -110,7 +111,7 @@ func upload(ctx *cli.Context) {
|
||||
mimeType = detectMimeType(file)
|
||||
}
|
||||
f.ContentType = mimeType
|
||||
return client.Upload(f, "")
|
||||
return client.Upload(f, "", toEncrypt)
|
||||
}
|
||||
}
|
||||
hash, err := doUpload()
|
||||
|
@@ -17,60 +17,259 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUp(t *testing.T) {
|
||||
// start 3 node cluster
|
||||
t.Log("starting 3 node cluster")
|
||||
testCLISwarmUp(false, t)
|
||||
}
|
||||
func TestCLISwarmUpRecursive(t *testing.T) {
|
||||
testCLISwarmUpRecursive(false, t)
|
||||
}
|
||||
|
||||
// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUpEncrypted(t *testing.T) {
|
||||
testCLISwarmUp(true, t)
|
||||
}
|
||||
func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
|
||||
testCLISwarmUpRecursive(true, t)
|
||||
}
|
||||
|
||||
func testCLISwarmUp(toEncrypt bool, t *testing.T) {
|
||||
log.Info("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
assertNil(t, err)
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = io.WriteString(tmp, "data")
|
||||
assertNil(t, err)
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
t.Log("uploading file with 'swarm up'")
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
t.Logf("file uploaded with hash %s", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
t.Logf("getting file from %s", node.Name)
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
assertNil(t, err)
|
||||
assertHTTPResponse(t, res, http.StatusOK, "data")
|
||||
}
|
||||
}
|
||||
|
||||
func assertNil(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
|
||||
// write data to file
|
||||
data := "notsorandomdata"
|
||||
_, err = io.WriteString(tmp, data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
flags := []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"up",
|
||||
tmp.Name()}
|
||||
if toEncrypt {
|
||||
hashRegexp = `[a-f\d]{128}`
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmp.Name()}
|
||||
}
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("file uploaded", "hash", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
log.Info("getting file from node", "node", node.Name)
|
||||
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
reply, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("expected HTTP status 200, got %s", res.Status)
|
||||
}
|
||||
if string(reply) != data {
|
||||
t.Fatalf("expected HTTP body %q, got %q", data, reply)
|
||||
}
|
||||
log.Debug("verifying uploaded file using `swarm down`")
|
||||
//try to get the content with `swarm down`
|
||||
tmpDownload, err := ioutil.TempDir("", "swarm-test")
|
||||
tmpDownload = path.Join(tmpDownload, "tmpfile.tmp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDownload)
|
||||
|
||||
bzzLocator := "bzz:/" + hash
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"down",
|
||||
bzzLocator,
|
||||
tmpDownload,
|
||||
}
|
||||
|
||||
down := runSwarm(t, flags...)
|
||||
down.ExpectExit()
|
||||
|
||||
fi, err := os.Stat(tmpDownload)
|
||||
if err != nil {
|
||||
t.Fatalf("could not stat path: %v", err)
|
||||
}
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsRegular():
|
||||
downloadedBytes, err := ioutil.ReadFile(tmpDownload)
|
||||
if err != nil {
|
||||
t.Fatalf("had an error reading the downloaded file: %v", err)
|
||||
}
|
||||
if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) {
|
||||
t.Fatalf("retrieved data and posted data not equal!")
|
||||
}
|
||||
|
||||
default:
|
||||
t.Fatalf("expected to download regular file, got %s", fi.Mode())
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.Duration(2 * time.Second)
|
||||
httpClient := http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
// try to squeeze a timeout by getting an non-existent hash from each node
|
||||
for _, node := range cluster.Nodes {
|
||||
_, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340")
|
||||
// we're speeding up the timeout here since netstore has a 60 seconds timeout on a request
|
||||
if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// this is disabled since it takes 60s due to netstore timeout
|
||||
// if res.StatusCode != 404 {
|
||||
// t.Fatalf("expected HTTP status 404, got %s", res.Status)
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) {
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != expectedStatus {
|
||||
t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status)
|
||||
func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
||||
fmt.Println("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
assertNil(t, err)
|
||||
if string(data) != expectedBody {
|
||||
t.Fatalf("expected HTTP body %q, got %q", expectedBody, data)
|
||||
defer os.RemoveAll(tmpUploadDir)
|
||||
// create tmp files
|
||||
data := "notsorandomdata"
|
||||
for _, path := range []string{"tmp1", "tmp2"} {
|
||||
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
flags := []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
tmpUploadDir}
|
||||
if toEncrypt {
|
||||
hashRegexp = `[a-f\d]{128}`
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmpUploadDir}
|
||||
}
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("dir uploaded", "hash", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
log.Info("getting file from node", "node", node.Name)
|
||||
//try to get the content with `swarm down`
|
||||
tmpDownload, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDownload)
|
||||
bzzLocator := "bzz:/" + hash
|
||||
flagss := []string{}
|
||||
flagss = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"down",
|
||||
"--recursive",
|
||||
bzzLocator,
|
||||
tmpDownload,
|
||||
}
|
||||
|
||||
fmt.Println("downloading from swarm with recursive")
|
||||
down := runSwarm(t, flagss...)
|
||||
down.ExpectExit()
|
||||
|
||||
files, err := ioutil.ReadDir(tmpDownload)
|
||||
for _, v := range files {
|
||||
fi, err := os.Stat(path.Join(tmpDownload, v.Name()))
|
||||
if err != nil {
|
||||
t.Fatalf("got an error: %v", err)
|
||||
}
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsRegular():
|
||||
if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
|
||||
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
|
||||
} else {
|
||||
ff := make([]byte, len(data))
|
||||
io.ReadFull(file, ff)
|
||||
buf := bytes.NewBufferString(data)
|
||||
|
||||
if !bytes.Equal(ff, buf.Bytes()) {
|
||||
t.Fatalf("retrieved data and posted data not equal!")
|
||||
}
|
||||
}
|
||||
default:
|
||||
t.Fatalf("this shouldnt happen")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not list files at: %v", files)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
@@ -271,15 +272,13 @@ func ImportPreimages(db *ethdb.LDBDatabase, fn string) error {
|
||||
// Accumulate the preimages and flush when enough ws gathered
|
||||
preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
|
||||
if len(preimages) > 1024 {
|
||||
if err := core.WritePreimages(db, 0, preimages); err != nil {
|
||||
return err
|
||||
}
|
||||
rawdb.WritePreimages(db, 0, preimages)
|
||||
preimages = make(map[common.Hash][]byte)
|
||||
}
|
||||
}
|
||||
// Flush the last batch preimage data
|
||||
if len(preimages) > 0 {
|
||||
return core.WritePreimages(db, 0, preimages)
|
||||
rawdb.WritePreimages(db, 0, preimages)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
@@ -48,6 +49,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/metrics/influxdb"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
@@ -158,11 +160,11 @@ var (
|
||||
}
|
||||
FastSyncFlag = cli.BoolFlag{
|
||||
Name: "fast",
|
||||
Usage: "Enable fast syncing through state downloads",
|
||||
Usage: "Enable fast syncing through state downloads (replaced by --syncmode)",
|
||||
}
|
||||
LightModeFlag = cli.BoolFlag{
|
||||
Name: "light",
|
||||
Usage: "Enable light client mode",
|
||||
Usage: "Enable light client mode (replaced by --syncmode)",
|
||||
}
|
||||
defaultSyncMode = eth.DefaultConfig.SyncMode
|
||||
SyncModeFlag = TextMarshalerFlag{
|
||||
@@ -360,10 +362,6 @@ var (
|
||||
Name: "ethstats",
|
||||
Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)",
|
||||
}
|
||||
MetricsEnabledFlag = cli.BoolFlag{
|
||||
Name: metrics.MetricsEnabledFlag,
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
}
|
||||
FakePoWFlag = cli.BoolFlag{
|
||||
Name: "fakepow",
|
||||
Usage: "Disables proof-of-work verification",
|
||||
@@ -532,6 +530,45 @@ var (
|
||||
Usage: "Minimum POW accepted",
|
||||
Value: whisper.DefaultMinimumPoW,
|
||||
}
|
||||
|
||||
// Metrics flags
|
||||
MetricsEnabledFlag = cli.BoolFlag{
|
||||
Name: metrics.MetricsEnabledFlag,
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
}
|
||||
MetricsEnableInfluxDBFlag = cli.BoolFlag{
|
||||
Name: "metrics.influxdb",
|
||||
Usage: "Enable metrics export/push to an external InfluxDB database",
|
||||
}
|
||||
MetricsInfluxDBEndpointFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.endpoint",
|
||||
Usage: "InfluxDB API endpoint to report metrics to",
|
||||
Value: "http://localhost:8086",
|
||||
}
|
||||
MetricsInfluxDBDatabaseFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.database",
|
||||
Usage: "InfluxDB database name to push reported metrics to",
|
||||
Value: "geth",
|
||||
}
|
||||
MetricsInfluxDBUsernameFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.username",
|
||||
Usage: "Username to authorize access to the database",
|
||||
Value: "test",
|
||||
}
|
||||
MetricsInfluxDBPasswordFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.password",
|
||||
Usage: "Password to authorize access to the database",
|
||||
Value: "test",
|
||||
}
|
||||
// The `host` tag is part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
|
||||
// It is used so that we can group all nodes and average a measurement across all of them, but also so
|
||||
// that we can select a specific node and inspect its measurements.
|
||||
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
|
||||
MetricsInfluxDBHostTagFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.host.tag",
|
||||
Usage: "InfluxDB `host` tag attached to all measurements",
|
||||
Value: "localhost",
|
||||
}
|
||||
)
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
@@ -1084,6 +1121,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 1337
|
||||
}
|
||||
// Create new developer account or reuse existing one
|
||||
var (
|
||||
developer accounts.Account
|
||||
@@ -1181,6 +1221,27 @@ func SetupNetwork(ctx *cli.Context) {
|
||||
params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name)
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx *cli.Context) {
|
||||
if metrics.Enabled {
|
||||
log.Info("Enabling metrics collection")
|
||||
var (
|
||||
enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name)
|
||||
endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
|
||||
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
|
||||
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
|
||||
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
|
||||
hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name)
|
||||
)
|
||||
|
||||
if enableExport {
|
||||
log.Info("Enabling metrics export to InfluxDB")
|
||||
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", map[string]string{
|
||||
"host": hosttag,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
var (
|
||||
|
@@ -140,8 +140,8 @@ func processArgs() {
|
||||
}
|
||||
|
||||
if *asymmetricMode && len(*argPub) > 0 {
|
||||
pub = crypto.ToECDSAPub(common.FromHex(*argPub))
|
||||
if !isKeyValid(pub) {
|
||||
var err error
|
||||
if pub, err = crypto.UnmarshalPubkey(common.FromHex(*argPub)); err != nil {
|
||||
utils.Fatalf("invalid public key")
|
||||
}
|
||||
}
|
||||
@@ -271,7 +271,9 @@ func initialize() {
|
||||
|
||||
if *mailServerMode {
|
||||
shh.RegisterServer(&mailServer)
|
||||
mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW)
|
||||
if err := mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW); err != nil {
|
||||
utils.Fatalf("Failed to init MailServer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
server = &p2p.Server{
|
||||
@@ -319,10 +321,6 @@ func startServer() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func isKeyValid(k *ecdsa.PublicKey) bool {
|
||||
return k.X != nil && k.Y != nil
|
||||
}
|
||||
|
||||
func configureNode() {
|
||||
var err error
|
||||
var p2pAccept bool
|
||||
@@ -338,9 +336,8 @@ func configureNode() {
|
||||
if b == nil {
|
||||
utils.Fatalf("Error: can not convert hexadecimal string")
|
||||
}
|
||||
pub = crypto.ToECDSAPub(b)
|
||||
if !isKeyValid(pub) {
|
||||
utils.Fatalf("Error: invalid public key")
|
||||
if pub, err = crypto.UnmarshalPubkey(b); err != nil {
|
||||
utils.Fatalf("Error: invalid peer public key")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -19,15 +19,20 @@ package common
|
||||
|
||||
import "encoding/hex"
|
||||
|
||||
// ToHex returns the hex representation of b, prefixed with '0x'.
|
||||
// For empty slices, the return value is "0x0".
|
||||
//
|
||||
// Deprecated: use hexutil.Encode instead.
|
||||
func ToHex(b []byte) string {
|
||||
hex := Bytes2Hex(b)
|
||||
// Prefer output of "0x0" instead of "0x"
|
||||
if len(hex) == 0 {
|
||||
hex = "0"
|
||||
}
|
||||
return "0x" + hex
|
||||
}
|
||||
|
||||
// FromHex returns the bytes represented by the hexadecimal string s.
|
||||
// s may be prefixed with "0x".
|
||||
func FromHex(s string) []byte {
|
||||
if len(s) > 1 {
|
||||
if s[0:2] == "0x" || s[0:2] == "0X" {
|
||||
@@ -40,9 +45,7 @@ func FromHex(s string) []byte {
|
||||
return Hex2Bytes(s)
|
||||
}
|
||||
|
||||
// Copy bytes
|
||||
//
|
||||
// Returns an exact copy of the provided bytes
|
||||
// CopyBytes returns an exact copy of the provided bytes.
|
||||
func CopyBytes(b []byte) (copiedBytes []byte) {
|
||||
if b == nil {
|
||||
return nil
|
||||
@@ -53,14 +56,17 @@ func CopyBytes(b []byte) (copiedBytes []byte) {
|
||||
return
|
||||
}
|
||||
|
||||
// hasHexPrefix validates str begins with '0x' or '0X'.
|
||||
func hasHexPrefix(str string) bool {
|
||||
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
|
||||
}
|
||||
|
||||
// isHexCharacter returns bool of c being a valid hexadecimal.
|
||||
func isHexCharacter(c byte) bool {
|
||||
return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
|
||||
}
|
||||
|
||||
// isHex validates whether each byte is valid hexadecimal string.
|
||||
func isHex(str string) bool {
|
||||
if len(str)%2 != 0 {
|
||||
return false
|
||||
@@ -73,31 +79,32 @@ func isHex(str string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Bytes2Hex returns the hexadecimal encoding of d.
|
||||
func Bytes2Hex(d []byte) string {
|
||||
return hex.EncodeToString(d)
|
||||
}
|
||||
|
||||
// Hex2Bytes returns the bytes represented by the hexadecimal string str.
|
||||
func Hex2Bytes(str string) []byte {
|
||||
h, _ := hex.DecodeString(str)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Hex2BytesFixed returns bytes of a specified fixed length flen.
|
||||
func Hex2BytesFixed(str string, flen int) []byte {
|
||||
h, _ := hex.DecodeString(str)
|
||||
if len(h) == flen {
|
||||
return h
|
||||
} else {
|
||||
if len(h) > flen {
|
||||
return h[len(h)-flen:]
|
||||
} else {
|
||||
hh := make([]byte, flen)
|
||||
copy(hh[flen-len(h):flen], h[:])
|
||||
return hh
|
||||
}
|
||||
}
|
||||
if len(h) > flen {
|
||||
return h[len(h)-flen:]
|
||||
}
|
||||
hh := make([]byte, flen)
|
||||
copy(hh[flen-len(h):flen], h[:])
|
||||
return hh
|
||||
}
|
||||
|
||||
// RightPadBytes zero-pads slice to the right up to length l.
|
||||
func RightPadBytes(slice []byte, l int) []byte {
|
||||
if l <= len(slice) {
|
||||
return slice
|
||||
@@ -109,6 +116,7 @@ func RightPadBytes(slice []byte, l int) []byte {
|
||||
return padded
|
||||
}
|
||||
|
||||
// LeftPadBytes zero-pads slice to the left up to length l.
|
||||
func LeftPadBytes(slice []byte, l int) []byte {
|
||||
if l <= len(slice) {
|
||||
return slice
|
||||
|
@@ -31,11 +31,17 @@ import (
|
||||
|
||||
var versionRegexp = regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`)
|
||||
|
||||
// Contract contains information about a compiled contract, alongside its code.
|
||||
type Contract struct {
|
||||
Code string `json:"code"`
|
||||
Info ContractInfo `json:"info"`
|
||||
}
|
||||
|
||||
// ContractInfo contains information about a compiled contract, including access
|
||||
// to the ABI definition, user and developer docs, and metadata.
|
||||
//
|
||||
// Depending on the source, language version, compiler version, and compiler
|
||||
// options will provide information about how the contract was compiled.
|
||||
type ContractInfo struct {
|
||||
Source string `json:"source"`
|
||||
Language string `json:"language"`
|
||||
@@ -142,8 +148,22 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("solc: %v\n%s", err, stderr.Bytes())
|
||||
}
|
||||
|
||||
return ParseCombinedJSON(stdout.Bytes(), source, s.Version, s.Version, strings.Join(s.makeArgs(), " "))
|
||||
}
|
||||
|
||||
// ParseCombinedJSON takes the direct output of a solc --combined-output run and
|
||||
// parses it into a map of string contract name to Contract structs. The
|
||||
// provided source, language and compiler version, and compiler options are all
|
||||
// passed through into the Contract structs.
|
||||
//
|
||||
// The solc output is expected to contain ABI, user docs, and dev docs.
|
||||
//
|
||||
// Returns an error if the JSON is malformed or missing data, or if the JSON
|
||||
// embedded within the JSON is malformed.
|
||||
func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion string, compilerVersion string, compilerOptions string) (map[string]*Contract, error) {
|
||||
var output solcOutput
|
||||
if err := json.Unmarshal(stdout.Bytes(), &output); err != nil {
|
||||
if err := json.Unmarshal(combinedJSON, &output); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -168,9 +188,9 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro
|
||||
Info: ContractInfo{
|
||||
Source: source,
|
||||
Language: "Solidity",
|
||||
LanguageVersion: s.Version,
|
||||
CompilerVersion: s.Version,
|
||||
CompilerOptions: strings.Join(s.makeArgs(), " "),
|
||||
LanguageVersion: languageVersion,
|
||||
CompilerVersion: compilerVersion,
|
||||
CompilerOptions: compilerOptions,
|
||||
AbiDefinition: abi,
|
||||
UserDoc: userdoc,
|
||||
DeveloperDoc: devdoc,
|
||||
|
@@ -39,6 +39,7 @@ import (
|
||||
|
||||
const uintBits = 32 << (uint64(^uint(0)) >> 63)
|
||||
|
||||
// Errors
|
||||
var (
|
||||
ErrEmptyString = &decError{"empty hex string"}
|
||||
ErrSyntax = &decError{"invalid hex string"}
|
||||
|
@@ -22,12 +22,13 @@ import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Various big integer limit values.
|
||||
var (
|
||||
tt255 = BigPow(2, 255)
|
||||
tt256 = BigPow(2, 256)
|
||||
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
tt63 = BigPow(2, 63)
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
|
||||
)
|
||||
|
||||
@@ -78,7 +79,7 @@ func ParseBig256(s string) (*big.Int, bool) {
|
||||
return bigint, ok
|
||||
}
|
||||
|
||||
// MustParseBig parses s as a 256 bit big integer and panics if the string is invalid.
|
||||
// MustParseBig256 parses s as a 256 bit big integer and panics if the string is invalid.
|
||||
func MustParseBig256(s string) *big.Int {
|
||||
v, ok := ParseBig256(s)
|
||||
if !ok {
|
||||
@@ -186,9 +187,8 @@ func U256(x *big.Int) *big.Int {
|
||||
func S256(x *big.Int) *big.Int {
|
||||
if x.Cmp(tt255) < 0 {
|
||||
return x
|
||||
} else {
|
||||
return new(big.Int).Sub(x, tt256)
|
||||
}
|
||||
return new(big.Int).Sub(x, tt256)
|
||||
}
|
||||
|
||||
// Exp implements exponentiation by squaring.
|
||||
|
@@ -21,8 +21,8 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Integer limit values.
|
||||
const (
|
||||
// Integer limit values.
|
||||
MaxInt8 = 1<<7 - 1
|
||||
MinInt8 = -1 << 7
|
||||
MaxInt16 = 1<<15 - 1
|
||||
|
@@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// package mclock is a wrapper for a monotonic clock source
|
||||
// Package mclock is a wrapper for a monotonic clock source
|
||||
package mclock
|
||||
|
||||
import (
|
||||
@@ -23,8 +23,10 @@ import (
|
||||
"github.com/aristanetworks/goarista/monotime"
|
||||
)
|
||||
|
||||
type AbsTime time.Duration // absolute monotonic time
|
||||
// AbsTime represents absolute monotonic time.
|
||||
type AbsTime time.Duration
|
||||
|
||||
// Now returns the current absolute monotonic time.
|
||||
func Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
@@ -1,197 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package number
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var tt256 = new(big.Int).Lsh(big.NewInt(1), 256)
|
||||
var tt256m1 = new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(1))
|
||||
var tt255 = new(big.Int).Lsh(big.NewInt(1), 255)
|
||||
|
||||
func limitUnsigned256(x *Number) *Number {
|
||||
x.num.And(x.num, tt256m1)
|
||||
return x
|
||||
}
|
||||
|
||||
func limitSigned256(x *Number) *Number {
|
||||
if x.num.Cmp(tt255) < 0 {
|
||||
return x
|
||||
} else {
|
||||
x.num.Sub(x.num, tt256)
|
||||
return x
|
||||
}
|
||||
}
|
||||
|
||||
// Number function
|
||||
type Initialiser func(n int64) *Number
|
||||
|
||||
// A Number represents a generic integer with a bounding function limiter. Limit is called after each operations
|
||||
// to give "fake" bounded integers. New types of Number can be created through NewInitialiser returning a lambda
|
||||
// with the new Initialiser.
|
||||
type Number struct {
|
||||
num *big.Int
|
||||
limit func(n *Number) *Number
|
||||
}
|
||||
|
||||
// Returns a new initialiser for a new *Number without having to expose certain fields
|
||||
func NewInitialiser(limiter func(*Number) *Number) Initialiser {
|
||||
return func(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limiter}
|
||||
}
|
||||
}
|
||||
|
||||
// Return a Number with a UNSIGNED limiter up to 256 bits
|
||||
func Uint256(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limitUnsigned256}
|
||||
}
|
||||
|
||||
// Return a Number with a SIGNED limiter up to 256 bits
|
||||
func Int256(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limitSigned256}
|
||||
}
|
||||
|
||||
// Returns a Number with a SIGNED unlimited size
|
||||
func Big(n int64) *Number {
|
||||
return &Number{big.NewInt(n), func(x *Number) *Number { return x }}
|
||||
}
|
||||
|
||||
// Sets i to sum of x+y
|
||||
func (i *Number) Add(x, y *Number) *Number {
|
||||
i.num.Add(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to difference of x-y
|
||||
func (i *Number) Sub(x, y *Number) *Number {
|
||||
i.num.Sub(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to product of x*y
|
||||
func (i *Number) Mul(x, y *Number) *Number {
|
||||
i.num.Mul(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to the quotient prodject of x/y
|
||||
func (i *Number) Div(x, y *Number) *Number {
|
||||
i.num.Div(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to x % y
|
||||
func (i *Number) Mod(x, y *Number) *Number {
|
||||
i.num.Mod(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to x << s
|
||||
func (i *Number) Lsh(x *Number, s uint) *Number {
|
||||
i.num.Lsh(x.num, s)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to x^y
|
||||
func (i *Number) Pow(x, y *Number) *Number {
|
||||
i.num.Exp(x.num, y.num, big.NewInt(0))
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Setters
|
||||
|
||||
// Set x to i
|
||||
func (i *Number) Set(x *Number) *Number {
|
||||
i.num.Set(x.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Set x bytes to i
|
||||
func (i *Number) SetBytes(x []byte) *Number {
|
||||
i.num.SetBytes(x)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Cmp compares x and y and returns:
|
||||
//
|
||||
// -1 if x < y
|
||||
// 0 if x == y
|
||||
// +1 if x > y
|
||||
func (i *Number) Cmp(x *Number) int {
|
||||
return i.num.Cmp(x.num)
|
||||
}
|
||||
|
||||
// Getters
|
||||
|
||||
// Returns the string representation of i
|
||||
func (i *Number) String() string {
|
||||
return i.num.String()
|
||||
}
|
||||
|
||||
// Returns the byte representation of i
|
||||
func (i *Number) Bytes() []byte {
|
||||
return i.num.Bytes()
|
||||
}
|
||||
|
||||
// Uint64 returns the Uint64 representation of x. If x cannot be represented in an int64, the result is undefined.
|
||||
func (i *Number) Uint64() uint64 {
|
||||
return i.num.Uint64()
|
||||
}
|
||||
|
||||
// Int64 returns the int64 representation of x. If x cannot be represented in an int64, the result is undefined.
|
||||
func (i *Number) Int64() int64 {
|
||||
return i.num.Int64()
|
||||
}
|
||||
|
||||
// Returns the signed version of i
|
||||
func (i *Number) Int256() *Number {
|
||||
return Int(0).Set(i)
|
||||
}
|
||||
|
||||
// Returns the unsigned version of i
|
||||
func (i *Number) Uint256() *Number {
|
||||
return Uint(0).Set(i)
|
||||
}
|
||||
|
||||
// Returns the index of the first bit that's set to 1
|
||||
func (i *Number) FirstBitSet() int {
|
||||
for j := 0; j < i.num.BitLen(); j++ {
|
||||
if i.num.Bit(j) > 0 {
|
||||
return j
|
||||
}
|
||||
}
|
||||
|
||||
return i.num.BitLen()
|
||||
}
|
||||
|
||||
// Variables
|
||||
|
||||
var (
|
||||
Zero = Uint(0)
|
||||
One = Uint(1)
|
||||
Two = Uint(2)
|
||||
MaxUint256 = Uint(0).SetBytes(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
|
||||
|
||||
MinOne = Int(-1)
|
||||
|
||||
// "typedefs"
|
||||
Uint = Uint256
|
||||
Int = Int256
|
||||
)
|
@@ -1,108 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package number
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
a := Uint(0)
|
||||
b := Uint(10)
|
||||
a.Set(b)
|
||||
if a.num.Cmp(b.num) != 0 {
|
||||
t.Error("didn't compare", a, b)
|
||||
}
|
||||
|
||||
c := Uint(0).SetBytes(common.Hex2Bytes("0a"))
|
||||
if c.num.Cmp(big.NewInt(10)) != 0 {
|
||||
t.Error("c set bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialiser(t *testing.T) {
|
||||
check := false
|
||||
init := NewInitialiser(func(x *Number) *Number {
|
||||
check = true
|
||||
return x
|
||||
})
|
||||
a := init(0).Add(init(1), init(2))
|
||||
if a.Cmp(init(3)) != 0 {
|
||||
t.Error("expected 3. got", a)
|
||||
}
|
||||
if !check {
|
||||
t.Error("expected limiter to be called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
a := Uint(10)
|
||||
if a.Uint64() != 10 {
|
||||
t.Error("expected to get 10. got", a.Uint64())
|
||||
}
|
||||
|
||||
a = Uint(10)
|
||||
if a.Int64() != 10 {
|
||||
t.Error("expected to get 10. got", a.Int64())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmp(t *testing.T) {
|
||||
a := Uint(10)
|
||||
b := Uint(10)
|
||||
c := Uint(11)
|
||||
|
||||
if a.Cmp(b) != 0 {
|
||||
t.Error("a b == 0 failed", a, b)
|
||||
}
|
||||
|
||||
if a.Cmp(c) >= 0 {
|
||||
t.Error("a c < 0 failed", a, c)
|
||||
}
|
||||
|
||||
if c.Cmp(b) <= 0 {
|
||||
t.Error("c b > 0 failed", c, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxArith(t *testing.T) {
|
||||
a := Uint(0).Add(MaxUint256, One)
|
||||
if a.Cmp(Zero) != 0 {
|
||||
t.Error("expected max256 + 1 = 0 got", a)
|
||||
}
|
||||
|
||||
a = Uint(0).Sub(Uint(0), One)
|
||||
if a.Cmp(MaxUint256) != 0 {
|
||||
t.Error("expected 0 - 1 = max256 got", a)
|
||||
}
|
||||
|
||||
a = Int(0).Sub(Int(0), One)
|
||||
if a.Cmp(MinOne) != 0 {
|
||||
t.Error("expected 0 - 1 = -1 got", a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConversion(t *testing.T) {
|
||||
a := Int(-1)
|
||||
b := a.Uint256()
|
||||
if b.Cmp(MaxUint256) != 0 {
|
||||
t.Error("expected -1 => unsigned to return max. got", b)
|
||||
}
|
||||
}
|
@@ -30,6 +30,7 @@ func MakeName(name, version string) string {
|
||||
return fmt.Sprintf("%s/v%s/%s/%s", name, version, runtime.GOOS, runtime.Version())
|
||||
}
|
||||
|
||||
// FileExist checks if a file exists at filePath.
|
||||
func FileExist(filePath string) bool {
|
||||
_, err := os.Stat(filePath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
@@ -39,9 +40,10 @@ func FileExist(filePath string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func AbsolutePath(Datadir string, filename string) string {
|
||||
// AbsolutePath returns datadir + filename, or filename if it is absolute.
|
||||
func AbsolutePath(datadir string, filename string) string {
|
||||
if filepath.IsAbs(filename) {
|
||||
return filename
|
||||
}
|
||||
return filepath.Join(Datadir, filename)
|
||||
return filepath.Join(datadir, filename)
|
||||
}
|
||||
|
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
// Lengths of hashes and addresses in bytes.
|
||||
const (
|
||||
HashLength = 32
|
||||
AddressLength = 20
|
||||
@@ -42,19 +43,30 @@ var (
|
||||
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
|
||||
type Hash [HashLength]byte
|
||||
|
||||
// BytesToHash sets b to hash.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func BytesToHash(b []byte) Hash {
|
||||
var h Hash
|
||||
h.SetBytes(b)
|
||||
return h
|
||||
}
|
||||
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
|
||||
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
|
||||
|
||||
// Get the string representation of the underlying hash
|
||||
func (h Hash) Str() string { return string(h[:]) }
|
||||
// BigToHash sets byte representation of b to hash.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
|
||||
|
||||
// HexToHash sets byte representation of s to hash.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
|
||||
|
||||
// Bytes gets the byte representation of the underlying hash.
|
||||
func (h Hash) Bytes() []byte { return h[:] }
|
||||
|
||||
// Big converts a hash to a big integer.
|
||||
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
|
||||
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
|
||||
|
||||
// Hex converts a hash to a hex string.
|
||||
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
|
||||
|
||||
// TerminalString implements log.TerminalStringer, formatting a string for console
|
||||
// output during logging.
|
||||
@@ -89,7 +101,8 @@ func (h Hash) MarshalText() ([]byte, error) {
|
||||
return hexutil.Bytes(h[:]).MarshalText()
|
||||
}
|
||||
|
||||
// Sets the hash to the value of b. If b is larger than len(h), 'b' will be cropped (from the left).
|
||||
// SetBytes sets the hash to the value of b.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func (h *Hash) SetBytes(b []byte) {
|
||||
if len(b) > len(h) {
|
||||
b = b[len(b)-HashLength:]
|
||||
@@ -98,16 +111,6 @@ func (h *Hash) SetBytes(b []byte) {
|
||||
copy(h[HashLength-len(b):], b)
|
||||
}
|
||||
|
||||
// Set string `s` to h. If s is larger than len(h) s will be cropped (from left) to fit.
|
||||
func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) }
|
||||
|
||||
// Sets h to other
|
||||
func (h *Hash) Set(other Hash) {
|
||||
for i, v := range other {
|
||||
h[i] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Generate implements testing/quick.Generator.
|
||||
func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := rand.Intn(len(h))
|
||||
@@ -117,10 +120,6 @@ func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
return reflect.ValueOf(h)
|
||||
}
|
||||
|
||||
func EmptyHash(h Hash) bool {
|
||||
return h == Hash{}
|
||||
}
|
||||
|
||||
// UnprefixedHash allows marshaling a Hash without 0x prefix.
|
||||
type UnprefixedHash Hash
|
||||
|
||||
@@ -139,13 +138,21 @@ func (h UnprefixedHash) MarshalText() ([]byte, error) {
|
||||
// Address represents the 20 byte address of an Ethereum account.
|
||||
type Address [AddressLength]byte
|
||||
|
||||
// BytesToAddress returns Address with value b.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func BytesToAddress(b []byte) Address {
|
||||
var a Address
|
||||
a.SetBytes(b)
|
||||
return a
|
||||
}
|
||||
|
||||
// BigToAddress returns Address with byte values of b.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
|
||||
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }
|
||||
|
||||
// HexToAddress returns Address with byte values of s.
|
||||
// If s is larger than len(h), s will be cropped from the left.
|
||||
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }
|
||||
|
||||
// IsHexAddress verifies whether a string can represent a valid hex-encoded
|
||||
// Ethereum address or not.
|
||||
@@ -156,11 +163,14 @@ func IsHexAddress(s string) bool {
|
||||
return len(s) == 2*AddressLength && isHex(s)
|
||||
}
|
||||
|
||||
// Get the string representation of the underlying address
|
||||
func (a Address) Str() string { return string(a[:]) }
|
||||
// Bytes gets the string representation of the underlying address.
|
||||
func (a Address) Bytes() []byte { return a[:] }
|
||||
|
||||
// Big converts an address to a big integer.
|
||||
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
|
||||
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
|
||||
|
||||
// Hash converts an address to a hash by left-padding it with zeros.
|
||||
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
|
||||
|
||||
// Hex returns an EIP55-compliant hex string representation of the address.
|
||||
func (a Address) Hex() string {
|
||||
@@ -184,7 +194,7 @@ func (a Address) Hex() string {
|
||||
return "0x" + string(result)
|
||||
}
|
||||
|
||||
// String implements the stringer interface and is used also by the logger.
|
||||
// String implements fmt.Stringer.
|
||||
func (a Address) String() string {
|
||||
return a.Hex()
|
||||
}
|
||||
@@ -195,7 +205,8 @@ func (a Address) Format(s fmt.State, c rune) {
|
||||
fmt.Fprintf(s, "%"+string(c), a[:])
|
||||
}
|
||||
|
||||
// Sets the address to the value of b. If b is larger than len(a) it will panic
|
||||
// SetBytes sets the address to the value of b.
|
||||
// If b is larger than len(a) it will panic.
|
||||
func (a *Address) SetBytes(b []byte) {
|
||||
if len(b) > len(a) {
|
||||
b = b[len(b)-AddressLength:]
|
||||
@@ -203,16 +214,6 @@ func (a *Address) SetBytes(b []byte) {
|
||||
copy(a[AddressLength-len(b):], b)
|
||||
}
|
||||
|
||||
// Set string `s` to a. If s is larger than len(a) it will panic
|
||||
func (a *Address) SetString(s string) { a.SetBytes([]byte(s)) }
|
||||
|
||||
// Sets a to other
|
||||
func (a *Address) Set(other Address) {
|
||||
for i, v := range other {
|
||||
a[i] = v
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText returns the hex representation of a.
|
||||
func (a Address) MarshalText() ([]byte, error) {
|
||||
return hexutil.Bytes(a[:]).MarshalText()
|
||||
@@ -228,7 +229,7 @@ func (a *Address) UnmarshalJSON(input []byte) error {
|
||||
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
|
||||
}
|
||||
|
||||
// UnprefixedHash allows marshaling an Address without 0x prefix.
|
||||
// UnprefixedAddress allows marshaling an Address without 0x prefix.
|
||||
type UnprefixedAddress Address
|
||||
|
||||
// UnmarshalText decodes the address from hex. The 0x prefix is optional.
|
||||
|
@@ -1,64 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build none
|
||||
//sed -e 's/_N_/Hash/g' -e 's/_S_/32/g' -e '1d' types_template.go | gofmt -w hash.go
|
||||
|
||||
package common
|
||||
|
||||
import "math/big"
|
||||
|
||||
type _N_ [_S_]byte
|
||||
|
||||
func BytesTo_N_(b []byte) _N_ {
|
||||
var h _N_
|
||||
h.SetBytes(b)
|
||||
return h
|
||||
}
|
||||
func StringTo_N_(s string) _N_ { return BytesTo_N_([]byte(s)) }
|
||||
func BigTo_N_(b *big.Int) _N_ { return BytesTo_N_(b.Bytes()) }
|
||||
func HexTo_N_(s string) _N_ { return BytesTo_N_(FromHex(s)) }
|
||||
|
||||
// Don't use the default 'String' method in case we want to overwrite
|
||||
|
||||
// Get the string representation of the underlying hash
|
||||
func (h _N_) Str() string { return string(h[:]) }
|
||||
func (h _N_) Bytes() []byte { return h[:] }
|
||||
func (h _N_) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
|
||||
func (h _N_) Hex() string { return "0x" + Bytes2Hex(h[:]) }
|
||||
|
||||
// Sets the hash to the value of b. If b is larger than len(h) it will panic
|
||||
func (h *_N_) SetBytes(b []byte) {
|
||||
// Use the right most bytes
|
||||
if len(b) > len(h) {
|
||||
b = b[len(b)-_S_:]
|
||||
}
|
||||
|
||||
// Reverse the loop
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
h[_S_-len(b)+i] = b[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Set string `s` to h. If s is larger than len(h) it will panic
|
||||
func (h *_N_) SetString(s string) { h.SetBytes([]byte(s)) }
|
||||
|
||||
// Sets h to other
|
||||
func (h *_N_) Set(other _N_) {
|
||||
for i, v := range other {
|
||||
h[i] = v
|
||||
}
|
||||
}
|
@@ -383,7 +383,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
|
||||
// If an on-disk checkpoint snapshot can be found, use that
|
||||
if number%checkpointInterval == 0 {
|
||||
if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
|
||||
log.Trace("Loaded voting snapshot form disk", "number", number, "hash", hash)
|
||||
log.Trace("Loaded voting snapshot from disk", "number", number, "hash", hash)
|
||||
snap = s
|
||||
break
|
||||
}
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
@@ -81,7 +82,7 @@ func (r *testerChainReader) GetBlock(common.Hash, uint64) *types.Block { panic
|
||||
func (r *testerChainReader) GetHeaderByHash(common.Hash) *types.Header { panic("not supported") }
|
||||
func (r *testerChainReader) GetHeaderByNumber(number uint64) *types.Header {
|
||||
if number == 0 {
|
||||
return core.GetHeader(r.db, core.GetCanonicalHash(r.db, 0), 0)
|
||||
return rawdb.ReadHeader(r.db, rawdb.ReadCanonicalHash(r.db, 0), 0)
|
||||
}
|
||||
panic("not supported")
|
||||
}
|
||||
@@ -351,7 +352,7 @@ func TestVoting(t *testing.T) {
|
||||
copy(genesis.ExtraData[extraVanity+j*common.AddressLength:], signer[:])
|
||||
}
|
||||
// Create a pristine blockchain with the genesis injected
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis.Commit(db)
|
||||
|
||||
// Assemble a chain of headers from the cast votes
|
||||
|
@@ -94,14 +94,25 @@ func calcDatasetSize(epoch int) uint64 {
|
||||
// reused between hash runs instead of requiring new ones to be created.
|
||||
type hasher func(dest []byte, data []byte)
|
||||
|
||||
// makeHasher creates a repetitive hasher, allowing the same hash data structures
|
||||
// to be reused between hash runs instead of requiring new ones to be created.
|
||||
// The returned function is not thread safe!
|
||||
// makeHasher creates a repetitive hasher, allowing the same hash data structures to
|
||||
// be reused between hash runs instead of requiring new ones to be created. The returned
|
||||
// function is not thread safe!
|
||||
func makeHasher(h hash.Hash) hasher {
|
||||
// sha3.state supports Read to get the sum, use it to avoid the overhead of Sum.
|
||||
// Read alters the state but we reset the hash before every operation.
|
||||
type readerHash interface {
|
||||
hash.Hash
|
||||
Read([]byte) (int, error)
|
||||
}
|
||||
rh, ok := h.(readerHash)
|
||||
if !ok {
|
||||
panic("can't find Read method on hash")
|
||||
}
|
||||
outputLen := rh.Size()
|
||||
return func(dest []byte, data []byte) {
|
||||
h.Write(data)
|
||||
h.Sum(dest[:0])
|
||||
h.Reset()
|
||||
rh.Reset()
|
||||
rh.Write(data)
|
||||
rh.Read(dest[:outputLen])
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -156,7 +156,7 @@ type lru struct {
|
||||
futureItem interface{}
|
||||
}
|
||||
|
||||
// newlru create a new least-recently-used cache for ither the verification caches
|
||||
// newlru create a new least-recently-used cache for either the verification caches
|
||||
// or the mining datasets.
|
||||
func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru {
|
||||
if maxItems <= 0 {
|
||||
@@ -389,7 +389,7 @@ type Config struct {
|
||||
PowMode Mode
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proot-of-work implementing the ethash
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
// algorithm.
|
||||
type Ethash struct {
|
||||
config Config
|
||||
|
@@ -271,7 +271,7 @@ func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
|
||||
}
|
||||
|
||||
type jsonrpcCall struct {
|
||||
Id int64
|
||||
ID int64
|
||||
Method string
|
||||
Params []interface{}
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
resps, _ := call.Otto.Object("new Array()")
|
||||
for _, req := range reqs {
|
||||
resp, _ := call.Otto.Object(`({"jsonrpc":"2.0"})`)
|
||||
resp.Set("id", req.Id)
|
||||
resp.Set("id", req.ID)
|
||||
var result json.RawMessage
|
||||
err = b.client.Call(&result, req.Method, req.Params...)
|
||||
switch err := err.(type) {
|
||||
|
@@ -60,7 +60,7 @@ type Config struct {
|
||||
Preload []string // Absolute paths to JavaScript files to preload
|
||||
}
|
||||
|
||||
// Console is a JavaScript interpreted runtime environment. It is a fully fleged
|
||||
// Console is a JavaScript interpreted runtime environment. It is a fully fledged
|
||||
// JavaScript console attached to a running node via an external or in-process RPC
|
||||
// client.
|
||||
type Console struct {
|
||||
@@ -73,6 +73,8 @@ type Console struct {
|
||||
printer io.Writer // Output writer to serialize any display strings to
|
||||
}
|
||||
|
||||
// New initializes a JavaScript interpreted runtime environment and sets defaults
|
||||
// with the config struct.
|
||||
func New(config Config) (*Console, error) {
|
||||
// Handle unset config values gracefully
|
||||
if config.Prompter == nil {
|
||||
|
@@ -95,7 +95,7 @@ func ensParentNode(name string) (common.Hash, common.Hash) {
|
||||
}
|
||||
}
|
||||
|
||||
func ensNode(name string) common.Hash {
|
||||
func EnsNode(name string) common.Hash {
|
||||
parentNode, parentLabel := ensParentNode(name)
|
||||
return crypto.Keccak256Hash(parentNode[:], parentLabel[:])
|
||||
}
|
||||
@@ -136,7 +136,7 @@ func (self *ENS) getRegistrar(node [32]byte) (*contract.FIFSRegistrarSession, er
|
||||
|
||||
// Resolve is a non-transactional call that returns the content hash associated with a name.
|
||||
func (self *ENS) Resolve(name string) (common.Hash, error) {
|
||||
node := ensNode(name)
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
if err != nil {
|
||||
@@ -165,7 +165,7 @@ func (self *ENS) Register(name string) (*types.Transaction, error) {
|
||||
// SetContentHash sets the content hash associated with a name. Only works if the caller
|
||||
// owns the name, and the associated resolver implements a `setContent` function.
|
||||
func (self *ENS) SetContentHash(name string, hash common.Hash) (*types.Transaction, error) {
|
||||
node := ensNode(name)
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
if err != nil {
|
||||
|
@@ -55,7 +55,7 @@ func TestENS(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("can't deploy resolver: %v", err)
|
||||
}
|
||||
if _, err := ens.SetResolver(ensNode(name), resolverAddr); err != nil {
|
||||
if _, err := ens.SetResolver(EnsNode(name), resolverAddr); err != nil {
|
||||
t.Fatalf("can't set resolver: %v", err)
|
||||
}
|
||||
contractBackend.Commit()
|
||||
|
@@ -51,7 +51,7 @@ func NewCompiler(debug bool) *Compiler {
|
||||
// the compiler.
|
||||
//
|
||||
// feed is the first pass in the compile stage as it
|
||||
// collect the used labels in the program and keeps a
|
||||
// collects the used labels in the program and keeps a
|
||||
// program counter which is used to determine the locations
|
||||
// of the jump dests. The labels can than be used in the
|
||||
// second stage to push labels and determine the right
|
||||
@@ -120,7 +120,7 @@ func (c *Compiler) next() token {
|
||||
return token
|
||||
}
|
||||
|
||||
// compile line compiles a single line instruction e.g.
|
||||
// compileLine compiles a single line instruction e.g.
|
||||
// "push 1", "jump @label".
|
||||
func (c *Compiler) compileLine() error {
|
||||
n := c.next()
|
||||
|
@@ -242,7 +242,7 @@ func lexLabel(l *lexer) stateFn {
|
||||
}
|
||||
|
||||
// lexInsideString lexes the inside of a string until
|
||||
// until the state function finds the closing quote.
|
||||
// the state function finds the closing quote.
|
||||
// It returns the lex text state function.
|
||||
func lexInsideString(l *lexer) stateFn {
|
||||
if l.acceptRunUntil('"') {
|
||||
|
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@@ -148,7 +149,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
||||
// Create the database in memory or in a temporary directory.
|
||||
var db ethdb.Database
|
||||
if !disk {
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
db = ethdb.NewMemDatabase()
|
||||
} else {
|
||||
dir, err := ioutil.TempDir("", "eth-core-bench")
|
||||
if err != nil {
|
||||
@@ -234,13 +235,15 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
|
||||
ReceiptHash: types.EmptyRootHash,
|
||||
}
|
||||
hash = header.Hash()
|
||||
WriteHeader(db, header)
|
||||
WriteCanonicalHash(db, hash, n)
|
||||
WriteTd(db, hash, n, big.NewInt(int64(n+1)))
|
||||
|
||||
rawdb.WriteHeader(db, header)
|
||||
rawdb.WriteCanonicalHash(db, hash, n)
|
||||
rawdb.WriteTd(db, hash, n, big.NewInt(int64(n+1)))
|
||||
|
||||
if full || n == 0 {
|
||||
block := types.NewBlockWithHeader(header)
|
||||
WriteBody(db, hash, n, block.Body())
|
||||
WriteBlockReceipts(db, hash, n, nil)
|
||||
rawdb.WriteBody(db, hash, n, block.Body())
|
||||
rawdb.WriteReceipts(db, hash, n, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -292,11 +295,10 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
||||
header := chain.GetHeaderByNumber(n)
|
||||
if full {
|
||||
hash := header.Hash()
|
||||
GetBody(db, hash, n)
|
||||
GetBlockReceipts(db, hash, n)
|
||||
rawdb.ReadBody(db, hash, n)
|
||||
rawdb.ReadReceipts(db, hash, n)
|
||||
}
|
||||
}
|
||||
|
||||
chain.Stop()
|
||||
db.Close()
|
||||
}
|
||||
|
@@ -32,7 +32,7 @@ import (
|
||||
func TestHeaderVerification(t *testing.T) {
|
||||
// Create a simple chain to verify
|
||||
var (
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
testdb = ethdb.NewMemDatabase()
|
||||
gspec = &Genesis{Config: params.TestChainConfig}
|
||||
genesis = gspec.MustCommit(testdb)
|
||||
blocks, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), testdb, 8, nil)
|
||||
@@ -84,7 +84,7 @@ func TestHeaderConcurrentVerification32(t *testing.T) { testHeaderConcurrentVeri
|
||||
func testHeaderConcurrentVerification(t *testing.T, threads int) {
|
||||
// Create a simple chain to verify
|
||||
var (
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
testdb = ethdb.NewMemDatabase()
|
||||
gspec = &Genesis{Config: params.TestChainConfig}
|
||||
genesis = gspec.MustCommit(testdb)
|
||||
blocks, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), testdb, 8, nil)
|
||||
@@ -156,7 +156,7 @@ func TestHeaderConcurrentAbortion32(t *testing.T) { testHeaderConcurrentAbortion
|
||||
func testHeaderConcurrentAbortion(t *testing.T, threads int) {
|
||||
// Create a simple chain to verify
|
||||
var (
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
testdb = ethdb.NewMemDatabase()
|
||||
gspec = &Genesis{Config: params.TestChainConfig}
|
||||
genesis = gspec.MustCommit(testdb)
|
||||
blocks, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), testdb, 1024, nil)
|
||||
|
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
@@ -202,7 +203,7 @@ func (bc *BlockChain) getProcInterrupt() bool {
|
||||
// assumes that the chain manager mutex is held.
|
||||
func (bc *BlockChain) loadLastState() error {
|
||||
// Restore the last known head block
|
||||
head := GetHeadBlockHash(bc.db)
|
||||
head := rawdb.ReadHeadBlockHash(bc.db)
|
||||
if head == (common.Hash{}) {
|
||||
// Corrupt or empty database, init from scratch
|
||||
log.Warn("Empty database, resetting chain")
|
||||
@@ -228,7 +229,7 @@ func (bc *BlockChain) loadLastState() error {
|
||||
|
||||
// Restore the last known head header
|
||||
currentHeader := currentBlock.Header()
|
||||
if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) {
|
||||
if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
|
||||
if header := bc.GetHeaderByHash(head); header != nil {
|
||||
currentHeader = header
|
||||
}
|
||||
@@ -237,7 +238,7 @@ func (bc *BlockChain) loadLastState() error {
|
||||
|
||||
// Restore the last known head fast block
|
||||
bc.currentFastBlock.Store(currentBlock)
|
||||
if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
|
||||
if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
|
||||
if block := bc.GetBlockByHash(head); block != nil {
|
||||
bc.currentFastBlock.Store(block)
|
||||
}
|
||||
@@ -268,8 +269,8 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
// Rewind the header chain, deleting all block bodies until then
|
||||
delFn := func(hash common.Hash, num uint64) {
|
||||
DeleteBody(bc.db, hash, num)
|
||||
delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) {
|
||||
rawdb.DeleteBody(db, hash, num)
|
||||
}
|
||||
bc.hc.SetHead(head, delFn)
|
||||
currentHeader := bc.hc.CurrentHeader()
|
||||
@@ -303,12 +304,10 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
}
|
||||
currentBlock := bc.CurrentBlock()
|
||||
currentFastBlock := bc.CurrentFastBlock()
|
||||
if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil {
|
||||
log.Crit("Failed to reset head full block", "err", err)
|
||||
}
|
||||
if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil {
|
||||
log.Crit("Failed to reset head fast block", "err", err)
|
||||
}
|
||||
|
||||
rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
|
||||
rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
|
||||
|
||||
return bc.loadLastState()
|
||||
}
|
||||
|
||||
@@ -406,9 +405,8 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
|
||||
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
||||
log.Crit("Failed to write genesis block TD", "err", err)
|
||||
}
|
||||
if err := WriteBlock(bc.db, genesis); err != nil {
|
||||
log.Crit("Failed to write genesis block", "err", err)
|
||||
}
|
||||
rawdb.WriteBlock(bc.db, genesis)
|
||||
|
||||
bc.genesisBlock = genesis
|
||||
bc.insert(bc.genesisBlock)
|
||||
bc.currentBlock.Store(bc.genesisBlock)
|
||||
@@ -474,24 +472,19 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
|
||||
// Note, this function assumes that the `mu` mutex is held!
|
||||
func (bc *BlockChain) insert(block *types.Block) {
|
||||
// If the block is on a side chain or an unknown one, force other heads onto it too
|
||||
updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
|
||||
updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
|
||||
|
||||
// Add the block to the canonical chain number scheme and mark as the head
|
||||
if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil {
|
||||
log.Crit("Failed to insert block number", "err", err)
|
||||
}
|
||||
if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil {
|
||||
log.Crit("Failed to insert head block hash", "err", err)
|
||||
}
|
||||
rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
|
||||
rawdb.WriteHeadBlockHash(bc.db, block.Hash())
|
||||
|
||||
bc.currentBlock.Store(block)
|
||||
|
||||
// If the block is better than our head or is on a different chain, force update heads
|
||||
if updateHeads {
|
||||
bc.hc.SetCurrentHeader(block.Header())
|
||||
rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
|
||||
|
||||
if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil {
|
||||
log.Crit("Failed to insert head fast block hash", "err", err)
|
||||
}
|
||||
bc.currentFastBlock.Store(block)
|
||||
}
|
||||
}
|
||||
@@ -509,7 +502,11 @@ func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
|
||||
body := cached.(*types.Body)
|
||||
return body
|
||||
}
|
||||
body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash))
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
if number == nil {
|
||||
return nil
|
||||
}
|
||||
body := rawdb.ReadBody(bc.db, hash, *number)
|
||||
if body == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -525,7 +522,11 @@ func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
|
||||
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
|
||||
return cached.(rlp.RawValue)
|
||||
}
|
||||
body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash))
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
if number == nil {
|
||||
return nil
|
||||
}
|
||||
body := rawdb.ReadBodyRLP(bc.db, hash, *number)
|
||||
if len(body) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -539,8 +540,7 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
|
||||
if bc.blockCache.Contains(hash) {
|
||||
return true
|
||||
}
|
||||
ok, _ := bc.db.Has(blockBodyKey(hash, number))
|
||||
return ok
|
||||
return rawdb.HasBody(bc.db, hash, number)
|
||||
}
|
||||
|
||||
// HasState checks if state trie is fully present in the database or not.
|
||||
@@ -567,7 +567,7 @@ func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
||||
if block, ok := bc.blockCache.Get(hash); ok {
|
||||
return block.(*types.Block)
|
||||
}
|
||||
block := GetBlock(bc.db, hash, number)
|
||||
block := rawdb.ReadBlock(bc.db, hash, number)
|
||||
if block == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -578,13 +578,17 @@ func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
||||
|
||||
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
|
||||
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
|
||||
return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash))
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
if number == nil {
|
||||
return nil
|
||||
}
|
||||
return bc.GetBlock(hash, *number)
|
||||
}
|
||||
|
||||
// GetBlockByNumber retrieves a block from the database by number, caching it
|
||||
// (associated with its hash) if found.
|
||||
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
|
||||
hash := GetCanonicalHash(bc.db, number)
|
||||
hash := rawdb.ReadCanonicalHash(bc.db, number)
|
||||
if hash == (common.Hash{}) {
|
||||
return nil
|
||||
}
|
||||
@@ -593,21 +597,28 @@ func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
|
||||
|
||||
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
|
||||
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||
return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash))
|
||||
number := rawdb.ReadHeaderNumber(bc.db, hash)
|
||||
if number == nil {
|
||||
return nil
|
||||
}
|
||||
return rawdb.ReadReceipts(bc.db, hash, *number)
|
||||
}
|
||||
|
||||
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
|
||||
// [deprecated by eth/62]
|
||||
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
if number == nil {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
block := bc.GetBlock(hash, number)
|
||||
block := bc.GetBlock(hash, *number)
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
hash = block.ParentHash()
|
||||
number--
|
||||
*number--
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -661,9 +672,9 @@ func (bc *BlockChain) Stop() {
|
||||
}
|
||||
}
|
||||
for !bc.triegc.Empty() {
|
||||
triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{})
|
||||
triedb.Dereference(bc.triegc.PopItem().(common.Hash))
|
||||
}
|
||||
if size := triedb.Size(); size != 0 {
|
||||
if size, _ := triedb.Size(); size != 0 {
|
||||
log.Error("Dangling trie nodes after full cleanup")
|
||||
}
|
||||
}
|
||||
@@ -712,12 +723,12 @@ func (bc *BlockChain) Rollback(chain []common.Hash) {
|
||||
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
|
||||
newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
|
||||
bc.currentFastBlock.Store(newFastBlock)
|
||||
WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
|
||||
rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
|
||||
}
|
||||
if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
|
||||
newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
|
||||
bc.currentBlock.Store(newBlock)
|
||||
WriteHeadBlockHash(bc.db, newBlock.Hash())
|
||||
rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -802,15 +813,10 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
return i, fmt.Errorf("failed to set receipts data: %v", err)
|
||||
}
|
||||
// Write all the data out into the database
|
||||
if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||
return i, fmt.Errorf("failed to write block body: %v", err)
|
||||
}
|
||||
if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
|
||||
return i, fmt.Errorf("failed to write block receipts: %v", err)
|
||||
}
|
||||
if err := WriteTxLookupEntries(batch, block); err != nil {
|
||||
return i, fmt.Errorf("failed to write lookup metadata: %v", err)
|
||||
}
|
||||
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
|
||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
|
||||
stats.processed++
|
||||
|
||||
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||
@@ -834,9 +840,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
|
||||
currentFastBlock := bc.CurrentFastBlock()
|
||||
if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
|
||||
if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil {
|
||||
log.Crit("Failed to update head fast block hash", "err", err)
|
||||
}
|
||||
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
|
||||
bc.currentFastBlock.Store(head)
|
||||
}
|
||||
}
|
||||
@@ -864,9 +868,8 @@ func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (e
|
||||
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := WriteBlock(bc.db, block); err != nil {
|
||||
return err
|
||||
}
|
||||
rawdb.WriteBlock(bc.db, block)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -894,9 +897,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
}
|
||||
// Write other block data using a batch.
|
||||
batch := bc.db.NewBatch()
|
||||
if err := WriteBlock(batch, block); err != nil {
|
||||
return NonStatTy, err
|
||||
}
|
||||
rawdb.WriteBlock(batch, block)
|
||||
|
||||
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
|
||||
if err != nil {
|
||||
return NonStatTy, err
|
||||
@@ -914,33 +916,29 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
bc.triegc.Push(root, -float32(block.NumberU64()))
|
||||
|
||||
if current := block.NumberU64(); current > triesInMemory {
|
||||
// If we exceeded our memory allowance, flush matured singleton nodes to disk
|
||||
var (
|
||||
nodes, imgs = triedb.Size()
|
||||
limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
|
||||
)
|
||||
if nodes > limit || imgs > 4*1024*1024 {
|
||||
triedb.Cap(limit - ethdb.IdealBatchSize)
|
||||
}
|
||||
// Find the next state trie we need to commit
|
||||
header := bc.GetHeaderByNumber(current - triesInMemory)
|
||||
chosen := header.Number.Uint64()
|
||||
|
||||
// Only write to disk if we exceeded our memory allowance *and* also have at
|
||||
// least a given number of tries gapped.
|
||||
var (
|
||||
size = triedb.Size()
|
||||
limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
|
||||
)
|
||||
if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit {
|
||||
// If we exceeded out time allowance, flush an entire trie to disk
|
||||
if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
|
||||
// If we're exceeding limits but haven't reached a large enough memory gap,
|
||||
// warn the user that the system is becoming unstable.
|
||||
if chosen < lastWrite+triesInMemory {
|
||||
switch {
|
||||
case size >= 2*limit:
|
||||
log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit:
|
||||
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
}
|
||||
}
|
||||
// If optimum or critical limits reached, write to disk
|
||||
if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
|
||||
triedb.Commit(header.Root, true)
|
||||
lastWrite = chosen
|
||||
bc.gcproc = 0
|
||||
if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
|
||||
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
}
|
||||
// Flush an entire trie and restart the counters
|
||||
triedb.Commit(header.Root, true)
|
||||
lastWrite = chosen
|
||||
bc.gcproc = 0
|
||||
}
|
||||
// Garbage collect anything below our required write retention
|
||||
for !bc.triegc.Empty() {
|
||||
@@ -949,13 +947,12 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
bc.triegc.Push(root, number)
|
||||
break
|
||||
}
|
||||
triedb.Dereference(root.(common.Hash), common.Hash{})
|
||||
triedb.Dereference(root.(common.Hash))
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
|
||||
return NonStatTy, err
|
||||
}
|
||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
|
||||
|
||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
||||
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
||||
@@ -972,14 +969,10 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
return NonStatTy, err
|
||||
}
|
||||
}
|
||||
// Write the positional metadata for transaction and receipt lookups
|
||||
if err := WriteTxLookupEntries(batch, block); err != nil {
|
||||
return NonStatTy, err
|
||||
}
|
||||
// Write hash preimages
|
||||
if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil {
|
||||
return NonStatTy, err
|
||||
}
|
||||
// Write the positional metadata for transaction/receipt lookups and preimages
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
|
||||
|
||||
status = CanonStatTy
|
||||
} else {
|
||||
status = SideStatTy
|
||||
@@ -1012,6 +1005,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
// only reason this method exists as a separate one is to make locking cleaner
|
||||
// with deferred statements.
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
|
||||
// Sanity check that we have something meaningful to import
|
||||
if len(chain) == 0 {
|
||||
return 0, nil, nil, nil
|
||||
}
|
||||
// Do a sanity check that the provided chain is actually ordered and linked
|
||||
for i := 1; i < len(chain); i++ {
|
||||
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
|
||||
@@ -1050,6 +1047,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
|
||||
defer close(abort)
|
||||
|
||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
|
||||
|
||||
// Iterate over the blocks and insert when the verifier permits
|
||||
for i, block := range chain {
|
||||
// If the chain is terminating, stop processing blocks
|
||||
@@ -1184,7 +1184,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
}
|
||||
stats.processed++
|
||||
stats.usedGas += usedGas
|
||||
stats.report(chain, i, bc.stateCache.TrieDB().Size())
|
||||
|
||||
cache, _ := bc.stateCache.TrieDB().Size()
|
||||
stats.report(chain, i, cache)
|
||||
}
|
||||
// Append a single chain head event if we've progressed the chain
|
||||
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
||||
@@ -1256,9 +1258,13 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
// collectLogs collects the logs that were generated during the
|
||||
// processing of the block that corresponds with the given hash.
|
||||
// These logs are later announced as deleted.
|
||||
collectLogs = func(h common.Hash) {
|
||||
collectLogs = func(hash common.Hash) {
|
||||
// Coalesce logs and set 'Removed'.
|
||||
receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h))
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
if number == nil {
|
||||
return
|
||||
}
|
||||
receipts := rawdb.ReadReceipts(bc.db, hash, *number)
|
||||
for _, receipt := range receipts {
|
||||
for _, log := range receipt.Logs {
|
||||
del := *log
|
||||
@@ -1327,18 +1333,19 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
// insert the block in the canonical way, re-writing history
|
||||
bc.insert(newChain[i])
|
||||
// write lookup entries for hash based transaction/receipt searches
|
||||
if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
rawdb.WriteTxLookupEntries(bc.db, newChain[i])
|
||||
addedTxs = append(addedTxs, newChain[i].Transactions()...)
|
||||
}
|
||||
// calculate the difference between deleted and added transactions
|
||||
diff := types.TxDifference(deletedTxs, addedTxs)
|
||||
// When transactions get deleted from the database that means the
|
||||
// receipts that were created in the fork must also be deleted
|
||||
batch := bc.db.NewBatch()
|
||||
for _, tx := range diff {
|
||||
DeleteTxLookupEntry(bc.db, tx.Hash())
|
||||
rawdb.DeleteTxLookupEntry(batch, tx.Hash())
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
if len(deletedLogs) > 0 {
|
||||
go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
|
||||
}
|
||||
@@ -1388,27 +1395,21 @@ func (bc *BlockChain) update() {
|
||||
}
|
||||
}
|
||||
|
||||
// BadBlockArgs represents the entries in the list returned when bad blocks are queried.
|
||||
type BadBlockArgs struct {
|
||||
Hash common.Hash `json:"hash"`
|
||||
Header *types.Header `json:"header"`
|
||||
}
|
||||
|
||||
// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
|
||||
func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) {
|
||||
headers := make([]BadBlockArgs, 0, bc.badBlocks.Len())
|
||||
func (bc *BlockChain) BadBlocks() []*types.Block {
|
||||
blocks := make([]*types.Block, 0, bc.badBlocks.Len())
|
||||
for _, hash := range bc.badBlocks.Keys() {
|
||||
if hdr, exist := bc.badBlocks.Peek(hash); exist {
|
||||
header := hdr.(*types.Header)
|
||||
headers = append(headers, BadBlockArgs{header.Hash(), header})
|
||||
if blk, exist := bc.badBlocks.Peek(hash); exist {
|
||||
block := blk.(*types.Block)
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
}
|
||||
return headers, nil
|
||||
return blocks
|
||||
}
|
||||
|
||||
// addBadBlock adds a bad block to the bad-block LRU cache
|
||||
func (bc *BlockChain) addBadBlock(block *types.Block) {
|
||||
bc.badBlocks.Add(block.Header().Hash(), block.Header())
|
||||
bc.badBlocks.Add(block.Hash(), block)
|
||||
}
|
||||
|
||||
// reportBlock logs a bad block error.
|
||||
@@ -1526,6 +1527,18 @@ func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []com
|
||||
return bc.hc.GetBlockHashesFromHash(hash, max)
|
||||
}
|
||||
|
||||
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
||||
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
||||
// number of blocks to be individually checked before we reach the canonical chain.
|
||||
//
|
||||
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
||||
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||
bc.chainmu.Lock()
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
|
||||
}
|
||||
|
||||
// GetHeaderByNumber retrieves a block header from the database by number,
|
||||
// caching it (associated with its hash) if found.
|
||||
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
||||
|
@@ -25,7 +25,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
@@ -34,6 +36,39 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// So we can deterministically seed different blockchains
|
||||
var (
|
||||
canonicalSeed = 1
|
||||
forkSeed = 2
|
||||
)
|
||||
|
||||
// newCanonical creates a chain database, and injects a deterministic canonical
|
||||
// chain. Depending on the full flag, if creates either a full block chain or a
|
||||
// header only chain.
|
||||
func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) {
|
||||
var (
|
||||
db = ethdb.NewMemDatabase()
|
||||
genesis = new(Genesis).MustCommit(db)
|
||||
)
|
||||
|
||||
// Initialize a fresh chain with only a genesis block
|
||||
blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{})
|
||||
// Create and inject the requested chain
|
||||
if n == 0 {
|
||||
return db, blockchain, nil
|
||||
}
|
||||
if full {
|
||||
// Full block-chain requested
|
||||
blocks := makeBlockChain(genesis, n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertChain(blocks)
|
||||
return db, blockchain, err
|
||||
}
|
||||
// Header-only chain requested
|
||||
headers := makeHeaderChain(genesis.Header(), n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertHeaderChain(headers, 1)
|
||||
return db, blockchain, err
|
||||
}
|
||||
|
||||
// Test fork of length N starting from block i
|
||||
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
||||
// Copy old chain up to #i into a new db
|
||||
@@ -128,8 +163,8 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||
return err
|
||||
}
|
||||
blockchain.mu.Lock()
|
||||
WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash())))
|
||||
WriteBlock(blockchain.db, block)
|
||||
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash())))
|
||||
rawdb.WriteBlock(blockchain.db, block)
|
||||
statedb.Commit(false)
|
||||
blockchain.mu.Unlock()
|
||||
}
|
||||
@@ -146,8 +181,8 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
|
||||
}
|
||||
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
||||
blockchain.mu.Lock()
|
||||
WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash)))
|
||||
WriteHeader(blockchain.db, header)
|
||||
rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash)))
|
||||
rawdb.WriteHeader(blockchain.db, header)
|
||||
blockchain.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
@@ -173,7 +208,7 @@ func TestLastBlock(t *testing.T) {
|
||||
if _, err := blockchain.InsertChain(blocks); err != nil {
|
||||
t.Fatalf("Failed to insert block: %v", err)
|
||||
}
|
||||
if blocks[len(blocks)-1].Hash() != GetHeadBlockHash(blockchain.db) {
|
||||
if blocks[len(blocks)-1].Hash() != rawdb.ReadHeadBlockHash(blockchain.db) {
|
||||
t.Fatalf("Write/Get HeadBlockHash failed")
|
||||
}
|
||||
}
|
||||
@@ -568,16 +603,16 @@ func testInsertNonceError(t *testing.T, full bool) {
|
||||
func TestFastVsFullChains(t *testing.T) {
|
||||
// Configure and generate a sample block chain
|
||||
var (
|
||||
gendb, _ = ethdb.NewMemDatabase()
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||
funds = big.NewInt(1000000000)
|
||||
gspec = &Genesis{
|
||||
gendb = ethdb.NewMemDatabase()
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||
funds = big.NewInt(1000000000)
|
||||
gspec = &Genesis{
|
||||
Config: params.TestChainConfig,
|
||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
||||
}
|
||||
genesis = gspec.MustCommit(gendb)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1024, func(i int, block *BlockGen) {
|
||||
block.SetCoinbase(common.Address{0x00})
|
||||
@@ -598,7 +633,7 @@ func TestFastVsFullChains(t *testing.T) {
|
||||
}
|
||||
})
|
||||
// Import the chain as an archive node for the comparison baseline
|
||||
archiveDb, _ := ethdb.NewMemDatabase()
|
||||
archiveDb := ethdb.NewMemDatabase()
|
||||
gspec.MustCommit(archiveDb)
|
||||
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
defer archive.Stop()
|
||||
@@ -607,7 +642,7 @@ func TestFastVsFullChains(t *testing.T) {
|
||||
t.Fatalf("failed to process block %d: %v", n, err)
|
||||
}
|
||||
// Fast import the chain as a non-archive node to test
|
||||
fastDb, _ := ethdb.NewMemDatabase()
|
||||
fastDb := ethdb.NewMemDatabase()
|
||||
gspec.MustCommit(fastDb)
|
||||
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
defer fast.Stop()
|
||||
@@ -639,13 +674,13 @@ func TestFastVsFullChains(t *testing.T) {
|
||||
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(ablock.Uncles()) {
|
||||
t.Errorf("block #%d [%x]: uncles mismatch: have %v, want %v", num, hash, fblock.Uncles(), ablock.Uncles())
|
||||
}
|
||||
if freceipts, areceipts := GetBlockReceipts(fastDb, hash, GetBlockNumber(fastDb, hash)), GetBlockReceipts(archiveDb, hash, GetBlockNumber(archiveDb, hash)); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
|
||||
if freceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash)), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash)); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
|
||||
t.Errorf("block #%d [%x]: receipts mismatch: have %v, want %v", num, hash, freceipts, areceipts)
|
||||
}
|
||||
}
|
||||
// Check that the canonical chains are the same between the databases
|
||||
for i := 0; i < len(blocks)+1; i++ {
|
||||
if fhash, ahash := GetCanonicalHash(fastDb, uint64(i)), GetCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
|
||||
if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
|
||||
t.Errorf("block #%d: canonical hash mismatch: have %v, want %v", i, fhash, ahash)
|
||||
}
|
||||
}
|
||||
@@ -656,12 +691,12 @@ func TestFastVsFullChains(t *testing.T) {
|
||||
func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
// Configure and generate a sample block chain
|
||||
var (
|
||||
gendb, _ = ethdb.NewMemDatabase()
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||
funds = big.NewInt(1000000000)
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
||||
genesis = gspec.MustCommit(gendb)
|
||||
gendb = ethdb.NewMemDatabase()
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||
funds = big.NewInt(1000000000)
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
||||
genesis = gspec.MustCommit(gendb)
|
||||
)
|
||||
height := uint64(1024)
|
||||
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
|
||||
@@ -684,7 +719,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Import the chain as an archive node and ensure all pointers are updated
|
||||
archiveDb, _ := ethdb.NewMemDatabase()
|
||||
archiveDb := ethdb.NewMemDatabase()
|
||||
gspec.MustCommit(archiveDb)
|
||||
|
||||
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
@@ -698,7 +733,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
assert(t, "archive", archive, height/2, height/2, height/2)
|
||||
|
||||
// Import the chain as a non-archive node and ensure all pointers are updated
|
||||
fastDb, _ := ethdb.NewMemDatabase()
|
||||
fastDb := ethdb.NewMemDatabase()
|
||||
gspec.MustCommit(fastDb)
|
||||
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
defer fast.Stop()
|
||||
@@ -718,7 +753,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
assert(t, "fast", fast, height/2, height/2, 0)
|
||||
|
||||
// Import the chain as a light node and ensure all pointers are updated
|
||||
lightDb, _ := ethdb.NewMemDatabase()
|
||||
lightDb := ethdb.NewMemDatabase()
|
||||
gspec.MustCommit(lightDb)
|
||||
|
||||
light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
@@ -741,7 +776,7 @@ func TestChainTxReorgs(t *testing.T) {
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
||||
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
db = ethdb.NewMemDatabase()
|
||||
gspec = &Genesis{
|
||||
Config: params.TestChainConfig,
|
||||
GasLimit: 3141592,
|
||||
@@ -752,7 +787,7 @@ func TestChainTxReorgs(t *testing.T) {
|
||||
},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
|
||||
// Create two transactions shared between the chains:
|
||||
@@ -821,28 +856,28 @@ func TestChainTxReorgs(t *testing.T) {
|
||||
|
||||
// removed tx
|
||||
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
|
||||
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
|
||||
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn != nil {
|
||||
t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn)
|
||||
}
|
||||
if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt != nil {
|
||||
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash()); rcpt != nil {
|
||||
t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt)
|
||||
}
|
||||
}
|
||||
// added tx
|
||||
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
|
||||
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil {
|
||||
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
|
||||
t.Errorf("add %d: expected tx to be found", i)
|
||||
}
|
||||
if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt == nil {
|
||||
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash()); rcpt == nil {
|
||||
t.Errorf("add %d: expected receipt to be found", i)
|
||||
}
|
||||
}
|
||||
// shared tx
|
||||
for i, tx := range (types.Transactions{postponed, swapped}) {
|
||||
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil {
|
||||
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
|
||||
t.Errorf("share %d: expected tx to be found", i)
|
||||
}
|
||||
if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt == nil {
|
||||
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash()); rcpt == nil {
|
||||
t.Errorf("share %d: expected receipt to be found", i)
|
||||
}
|
||||
}
|
||||
@@ -853,12 +888,12 @@ func TestLogReorgs(t *testing.T) {
|
||||
var (
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
db = ethdb.NewMemDatabase()
|
||||
// this code generates a log
|
||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
|
||||
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
@@ -897,7 +932,7 @@ func TestLogReorgs(t *testing.T) {
|
||||
|
||||
func TestReorgSideEvent(t *testing.T) {
|
||||
var (
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
db = ethdb.NewMemDatabase()
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
gspec = &Genesis{
|
||||
@@ -905,7 +940,7 @@ func TestReorgSideEvent(t *testing.T) {
|
||||
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
|
||||
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
@@ -997,14 +1032,14 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
|
||||
|
||||
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
|
||||
for {
|
||||
ch := GetCanonicalHash(blockchain.db, block.NumberU64())
|
||||
ch := rawdb.ReadCanonicalHash(blockchain.db, block.NumberU64())
|
||||
if ch == (common.Hash{}) {
|
||||
continue // busy wait for canonical hash to be written
|
||||
}
|
||||
if ch != block.Hash() {
|
||||
t.Fatalf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
|
||||
}
|
||||
fb := GetBlock(blockchain.db, ch, block.NumberU64())
|
||||
fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64())
|
||||
if fb == nil {
|
||||
t.Fatalf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
|
||||
}
|
||||
@@ -1025,13 +1060,13 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
|
||||
func TestEIP155Transition(t *testing.T) {
|
||||
// Configure and generate a sample block chain
|
||||
var (
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
db = ethdb.NewMemDatabase()
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||
funds = big.NewInt(1000000000)
|
||||
deleteAddr = common.Address{1}
|
||||
gspec = &Genesis{
|
||||
Config: ¶ms.ChainConfig{ChainId: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
|
||||
Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
|
||||
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
@@ -1062,7 +1097,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
}
|
||||
block.AddTx(tx)
|
||||
|
||||
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainId))
|
||||
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1074,7 +1109,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
}
|
||||
block.AddTx(tx)
|
||||
|
||||
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainId))
|
||||
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1102,7 +1137,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
}
|
||||
|
||||
// generate an invalid chain id transaction
|
||||
config := ¶ms.ChainConfig{ChainId: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}
|
||||
config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}
|
||||
blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
|
||||
var (
|
||||
tx *types.Transaction
|
||||
@@ -1129,14 +1164,14 @@ func TestEIP155Transition(t *testing.T) {
|
||||
func TestEIP161AccountRemoval(t *testing.T) {
|
||||
// Configure and generate a sample block chain
|
||||
var (
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
db = ethdb.NewMemDatabase()
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||
funds = big.NewInt(1000000000)
|
||||
theAddr = common.Address{1}
|
||||
gspec = &Genesis{
|
||||
Config: ¶ms.ChainConfig{
|
||||
ChainId: big.NewInt(1),
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: new(big.Int),
|
||||
EIP155Block: new(big.Int),
|
||||
EIP158Block: big.NewInt(2),
|
||||
@@ -1152,7 +1187,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||
var (
|
||||
tx *types.Transaction
|
||||
err error
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
switch i {
|
||||
case 0:
|
||||
@@ -1201,7 +1236,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
|
||||
// Generate a canonical chain to act as the main dataset
|
||||
engine := ethash.NewFaker()
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis := new(Genesis).MustCommit(db)
|
||||
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
||||
|
||||
@@ -1217,7 +1252,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
|
||||
}
|
||||
// Import the canonical and fork chain side by side, verifying the current block
|
||||
// and current header consistency
|
||||
diskdb, _ := ethdb.NewMemDatabase()
|
||||
diskdb := ethdb.NewMemDatabase()
|
||||
new(Genesis).MustCommit(diskdb)
|
||||
|
||||
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
|
||||
@@ -1246,7 +1281,7 @@ func TestTrieForkGC(t *testing.T) {
|
||||
// Generate a canonical chain to act as the main dataset
|
||||
engine := ethash.NewFaker()
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis := new(Genesis).MustCommit(db)
|
||||
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*triesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
||||
|
||||
@@ -1261,7 +1296,7 @@ func TestTrieForkGC(t *testing.T) {
|
||||
forks[i] = fork[0]
|
||||
}
|
||||
// Import the canonical and fork chain side by side, forcing the trie cache to cache both
|
||||
diskdb, _ := ethdb.NewMemDatabase()
|
||||
diskdb := ethdb.NewMemDatabase()
|
||||
new(Genesis).MustCommit(diskdb)
|
||||
|
||||
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
|
||||
@@ -1278,8 +1313,8 @@ func TestTrieForkGC(t *testing.T) {
|
||||
}
|
||||
// Dereference all the recent tries and ensure no past trie is left in
|
||||
for i := 0; i < triesInMemory; i++ {
|
||||
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root(), common.Hash{})
|
||||
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root(), common.Hash{})
|
||||
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
|
||||
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
|
||||
}
|
||||
if len(chain.stateCache.TrieDB().Nodes()) > 0 {
|
||||
t.Fatalf("stale tries still alive after garbase collection")
|
||||
@@ -1292,7 +1327,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
|
||||
// Generate the original common chain segment and the two competing forks
|
||||
engine := ethash.NewFaker()
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis := new(Genesis).MustCommit(db)
|
||||
|
||||
shared, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
||||
@@ -1300,7 +1335,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
|
||||
competitor, _ := GenerateChain(params.TestChainConfig, shared[len(shared)-1], engine, db, 2*triesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
|
||||
|
||||
// Import the shared chain and the original canonical one
|
||||
diskdb, _ := ethdb.NewMemDatabase()
|
||||
diskdb := ethdb.NewMemDatabase()
|
||||
new(Genesis).MustCommit(diskdb)
|
||||
|
||||
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
|
||||
@@ -1360,7 +1395,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
|
||||
)
|
||||
// Generate the original common chain segment and the two competing forks
|
||||
engine := ethash.NewFaker()
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis := gspec.MustCommit(db)
|
||||
|
||||
blockGenerator := func(i int, block *BlockGen) {
|
||||
@@ -1382,7 +1417,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Import the shared chain and the original canonical one
|
||||
diskdb, _ := ethdb.NewMemDatabase()
|
||||
diskdb := ethdb.NewMemDatabase()
|
||||
gspec.MustCommit(diskdb)
|
||||
|
||||
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
@@ -206,7 +207,7 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainE
|
||||
|
||||
// TODO(karalabe): This operation is expensive and might block, causing the event system to
|
||||
// potentially also lock up. We need to do with on a different thread somehow.
|
||||
if h := FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
|
||||
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
|
||||
c.newHead(h.Number.Uint64(), true)
|
||||
}
|
||||
}
|
||||
@@ -349,11 +350,11 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
|
||||
}
|
||||
|
||||
for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
|
||||
hash := GetCanonicalHash(c.chainDb, number)
|
||||
hash := rawdb.ReadCanonicalHash(c.chainDb, number)
|
||||
if hash == (common.Hash{}) {
|
||||
return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
|
||||
}
|
||||
header := GetHeader(c.chainDb, hash, number)
|
||||
header := rawdb.ReadHeader(c.chainDb, hash, number)
|
||||
if header == nil {
|
||||
return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4])
|
||||
} else if header.ParentHash != lastHead {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user