Compare commits
149 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
37685930d9 | ||
|
51df1c1f20 | ||
|
f524ec4326 | ||
|
eb794af833 | ||
|
67a7857124 | ||
|
c73b654fd1 | ||
|
9da128db70 | ||
|
4e5d1f1c39 | ||
|
d57e85ecc9 | ||
|
1990c9e621 | ||
|
319098cc1c | ||
|
223d943481 | ||
|
8974e2e5e0 | ||
|
a4a2343cdc | ||
|
fdfd6d3c39 | ||
|
b5537c5601 | ||
|
e916f9786d | ||
|
909e968ebb | ||
|
598f786aab | ||
|
461291882e | ||
|
1f0f6f0272 | ||
|
0a22ae5572 | ||
|
b0cfd9c786 | ||
|
6d8a1bfb08 | ||
|
4895665670 | ||
|
eaff89291c | ||
|
e187711c65 | ||
|
d926bf2c7e | ||
|
8db8d074e2 | ||
|
1a70338734 | ||
|
61a5976368 | ||
|
88c42ab4e7 | ||
|
4210dd1500 | ||
|
c971ab617d | ||
|
f1986f86f2 | ||
|
28aca90716 | ||
|
9b1536b26a | ||
|
3e57c33147 | ||
|
baa7eb901e | ||
|
574378edb5 | ||
|
c95e4a80d1 | ||
|
897ea01d5f | ||
|
ec192f18b4 | ||
|
aa34173f13 | ||
|
c343f75c26 | ||
|
52b1d09457 | ||
|
9402f96597 | ||
|
d0fd8d6fc2 | ||
|
cfde0b5f52 | ||
|
de06185fc3 | ||
|
3fb5f3ae11 | ||
|
e75d0a6e4c | ||
|
947e0afeb3 | ||
|
1836366ac1 | ||
|
591cef17d4 | ||
|
e33a5de454 | ||
|
f04c0e341e | ||
|
ea89f40f0d | ||
|
1fc54d92ec | ||
|
8c4a7fa8d3 | ||
|
423d4254f5 | ||
|
dea1ce052a | ||
|
25982375a8 | ||
|
049f5b3572 | ||
|
0255951587 | ||
|
85cd64df0e | ||
|
9608ccf106 | ||
|
3f06da7b5f | ||
|
546d42179e | ||
|
90829a04bf | ||
|
f991995918 | ||
|
aab7ab04b0 | ||
|
43b940ec5a | ||
|
b487bdf0ba | ||
|
a3267ed929 | ||
|
9f7592c802 | ||
|
99483e85b9 | ||
|
1d666cf27e | ||
|
eac16f9824 | ||
|
69c52bde3f | ||
|
2977538ac0 | ||
|
7f0726f706 | ||
|
13af276418 | ||
|
ea06da0892 | ||
|
feb6620c34 | ||
|
90b22773e9 | ||
|
9e4f96a1a6 | ||
|
01a7e267dc | ||
|
e8ea5aa0d5 | ||
|
5bee5d69d7 | ||
|
cbfb40b0aa | ||
|
4cf2b4110e | ||
|
0029a869f0 | ||
|
2ab24a2a8f | ||
|
400332b99d | ||
|
a5237a27ea | ||
|
7a22e89080 | ||
|
e3a993d774 | ||
|
ed40767355 | ||
|
a20cc75b4a | ||
|
b659718fd0 | ||
|
be2aec092d | ||
|
17f80cc2e2 | ||
|
143c4341d8 | ||
|
3f33a7c8ce | ||
|
c8dcb9584e | ||
|
af28d12847 | ||
|
0ad32d3be7 | ||
|
68b0d30d4a | ||
|
eae63c511c | ||
|
ca34e8230e | ||
|
342ec83d67 | ||
|
38c7eb0f26 | ||
|
d51faee240 | ||
|
426f62f1a8 | ||
|
7677ec1f34 | ||
|
d258eee211 | ||
|
84f8c0cc1f | ||
|
998f6564b2 | ||
|
40a2c52397 | ||
|
a9c6ef6905 | ||
|
ccc0debb63 | ||
|
ff9b14617e | ||
|
d6ed2f67a8 | ||
|
54294b45b1 | ||
|
d31802312a | ||
|
55b579e02c | ||
|
be22ee8dda | ||
|
56de337e57 | ||
|
c934c06cc1 | ||
|
fbf57d53e2 | ||
|
6ce21a4744 | ||
|
9af364e42b | ||
|
09d44247f7 | ||
|
0fe47e98c4 | ||
|
415969f534 | ||
|
d9cee2c172 | ||
|
ab6bdbd9b0 | ||
|
953b5ac015 | ||
|
f2fdb75dd9 | ||
|
f9c456e02d | ||
|
579bd0f9fb | ||
|
49719e21bc | ||
|
a2e43d28d0 | ||
|
6286c255f1 | ||
|
f6bc65fc68 | ||
|
ff8a033f18 | ||
|
247b5f0369 | ||
|
49ec4f0cd1 |
38
.github/CODEOWNERS
vendored
38
.github/CODEOWNERS
vendored
@@ -1,12 +1,32 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
whisper/ @gballet @gluk256
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
swarm/bmt @zelig
|
||||
swarm/dev @lmars
|
||||
swarm/fuse @jmozah @holisticode
|
||||
swarm/grafana_dashboards @nonsense
|
||||
swarm/metrics @nonsense @holisticode
|
||||
swarm/multihash @nolash
|
||||
swarm/network/bitvector @zelig @janos @gbalint
|
||||
swarm/network/priorityqueue @zelig @janos @gbalint
|
||||
swarm/network/simulations @zelig
|
||||
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
|
||||
swarm/network/stream/intervals @janos
|
||||
swarm/network/stream/testing @zelig
|
||||
swarm/pot @zelig
|
||||
swarm/pss @nolash @zelig @nonsense
|
||||
swarm/services @zelig
|
||||
swarm/state @justelad
|
||||
swarm/storage/encryption @gbalint @zelig @nagydani
|
||||
swarm/storage/mock @janos
|
||||
swarm/storage/mru @nolash
|
||||
swarm/testutil @lmars
|
||||
whisper/ @gballet @gluk256
|
||||
|
12
.travis.yml
12
.travis.yml
@@ -126,7 +126,7 @@ matrix:
|
||||
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- os: linux
|
||||
dist: precise # Needed for the android tools
|
||||
dist: trusty
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
@@ -146,16 +146,16 @@ matrix:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.10.1.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.10.3.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r16b-linux-x86_64.zip -o android-ndk-r16b.zip
|
||||
- unzip -q android-ndk-r16b.zip && rm android-ndk-r16b.zip
|
||||
- mv android-ndk-r16b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r16b
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r17b-linux-x86_64.zip -o android-ndk-r17b.zip
|
||||
- unzip -q android-ndk-r17b.zip && rm android-ndk-r17b.zip
|
||||
- mv android-ndk-r17b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r17b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
|
@@ -111,9 +111,14 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the output interface is a struct, make sure names don't collide
|
||||
|
||||
// If the interface is a struct, get of abi->struct_field mapping
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
if err := requireUniqueStructFieldNames(arguments); err != nil {
|
||||
var err error
|
||||
abi2struct, err = mapAbiToStructFields(arguments, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -123,9 +128,10 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
err := unpackStruct(value, reflectValue, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
@@ -151,17 +157,22 @@ func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interf
|
||||
if len(marshalledValues) != 1 {
|
||||
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
||||
}
|
||||
|
||||
elem := reflect.ValueOf(v).Elem()
|
||||
kind := elem.Kind()
|
||||
reflectValue := reflect.ValueOf(marshalledValues[0])
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
//make sure names don't collide
|
||||
if err := requireUniqueStructFieldNames(arguments); err != nil {
|
||||
var err error
|
||||
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unpackStruct(elem, reflectValue, arguments[0])
|
||||
arg := arguments.NonIndexed()[0]
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
return set(elem.FieldByName(structField), reflectValue, arg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
||||
@@ -277,18 +288,3 @@ func capitalise(input string) string {
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
}
|
||||
|
||||
//unpackStruct extracts each argument into its corresponding struct field
|
||||
func unpackStruct(value, reflectValue reflect.Value, arg Argument) error {
|
||||
name := capitalise(arg.Name)
|
||||
typ := value.Type()
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if typ.Field(j).Name == name {
|
||||
if err := set(value.Field(j), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -454,7 +454,7 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
||||
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
<-quit
|
||||
return nil
|
||||
|
@@ -58,12 +58,28 @@ var jsonEventPledge = []byte(`{
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
var jsonEventMixedCase = []byte(`{
|
||||
"anonymous": false,
|
||||
"inputs": [{
|
||||
"indexed": false, "name": "value", "type": "uint256"
|
||||
}, {
|
||||
"indexed": false, "name": "_value", "type": "uint256"
|
||||
}, {
|
||||
"indexed": false, "name": "Value", "type": "uint256"
|
||||
}],
|
||||
"name": "MixedCase",
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
// 1000000
|
||||
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
|
||||
|
||||
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
|
||||
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
// 1000000,2218516807680,1000001
|
||||
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
|
||||
|
||||
func TestEventId(t *testing.T) {
|
||||
var table = []struct {
|
||||
definition string
|
||||
@@ -121,6 +137,27 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
Value *big.Int
|
||||
}
|
||||
|
||||
type EventTransferWithTag struct {
|
||||
// this is valid because `value` is not exportable,
|
||||
// so value is only unmarshalled into `Value1`.
|
||||
value *big.Int
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithSameFieldAndTag struct {
|
||||
Value *big.Int
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithDuplicatedTag struct {
|
||||
Value1 *big.Int `abi:"value"`
|
||||
Value2 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithEmptyTag struct {
|
||||
Value *big.Int `abi:""`
|
||||
}
|
||||
|
||||
type EventPledge struct {
|
||||
Who common.Address
|
||||
Wad *big.Int
|
||||
@@ -133,9 +170,16 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
Currency [3]byte
|
||||
}
|
||||
|
||||
type EventMixedCase struct {
|
||||
Value1 *big.Int `abi:"value"`
|
||||
Value2 *big.Int `abi:"_value"`
|
||||
Value3 *big.Int `abi:"Value"`
|
||||
}
|
||||
|
||||
bigint := new(big.Int)
|
||||
bigintExpected := big.NewInt(1000000)
|
||||
bigintExpected2 := big.NewInt(2218516807680)
|
||||
bigintExpected3 := big.NewInt(1000001)
|
||||
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
|
||||
var testCases = []struct {
|
||||
data string
|
||||
@@ -158,6 +202,34 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into slice",
|
||||
}, {
|
||||
transferData1,
|
||||
&EventTransferWithTag{},
|
||||
&EventTransferWithTag{Value1: bigintExpected},
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into structure with abi: tag",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithDuplicatedTag{},
|
||||
&BadEventTransferWithDuplicatedTag{},
|
||||
jsonEventTransfer,
|
||||
"struct: abi tag in 'Value2' already mapped",
|
||||
"Can not unpack ERC20 Transfer event with duplicated abi tag",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithSameFieldAndTag{},
|
||||
&BadEventTransferWithSameFieldAndTag{},
|
||||
jsonEventTransfer,
|
||||
"abi: multiple variables maps to the same abi field 'value'",
|
||||
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithEmptyTag{},
|
||||
&BadEventTransferWithEmptyTag{},
|
||||
jsonEventTransfer,
|
||||
"struct: abi tag in 'Value' is empty",
|
||||
"Can not unpack ERC20 Transfer event with an empty tag",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&EventPledge{},
|
||||
@@ -216,6 +288,13 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal tuple into map[string]interface {}",
|
||||
"Can not unpack Pledge event into map",
|
||||
}, {
|
||||
mixedCaseData1,
|
||||
&EventMixedCase{},
|
||||
&EventMixedCase{Value1: bigintExpected, Value2: bigintExpected2, Value3: bigintExpected3},
|
||||
jsonEventMixedCase,
|
||||
"",
|
||||
"Can unpack abi variables with mixed case",
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -227,7 +306,7 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
assert.Nil(err, "Should be able to unpack event data.")
|
||||
assert.Equal(tc.expected, tc.dest, tc.name)
|
||||
} else {
|
||||
assert.EqualError(err, tc.error)
|
||||
assert.EqualError(err, tc.error, tc.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@@ -31,29 +31,14 @@ var (
|
||||
uint16T = reflect.TypeOf(uint16(0))
|
||||
uint32T = reflect.TypeOf(uint32(0))
|
||||
uint64T = reflect.TypeOf(uint64(0))
|
||||
intT = reflect.TypeOf(int(0))
|
||||
int8T = reflect.TypeOf(int8(0))
|
||||
int16T = reflect.TypeOf(int16(0))
|
||||
int32T = reflect.TypeOf(int32(0))
|
||||
int64T = reflect.TypeOf(int64(0))
|
||||
addressT = reflect.TypeOf(common.Address{})
|
||||
intTS = reflect.TypeOf([]int(nil))
|
||||
int8TS = reflect.TypeOf([]int8(nil))
|
||||
int16TS = reflect.TypeOf([]int16(nil))
|
||||
int32TS = reflect.TypeOf([]int32(nil))
|
||||
int64TS = reflect.TypeOf([]int64(nil))
|
||||
)
|
||||
|
||||
// U256 converts a big Int into a 256bit EVM number.
|
||||
func U256(n *big.Int) []byte {
|
||||
return math.PaddedBigBytes(math.U256(n), 32)
|
||||
}
|
||||
|
||||
// checks whether the given reflect value is signed. This also works for slices with a number type
|
||||
func isSigned(v reflect.Value) bool {
|
||||
switch v.Type() {
|
||||
case intTS, int8TS, int16TS, int32TS, int64TS, intT, int8T, int16T, int32T, int64T:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@@ -19,7 +19,6 @@ package abi
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -32,13 +31,3 @@ func TestNumberTypes(t *testing.T) {
|
||||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigned(t *testing.T) {
|
||||
if isSigned(reflect.ValueOf(uint(10))) {
|
||||
t.Error("signed")
|
||||
}
|
||||
|
||||
if !isSigned(reflect.ValueOf(int(10))) {
|
||||
t.Error("not signed")
|
||||
}
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ package abi
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
@@ -111,18 +112,101 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireUniqueStructFieldNames makes sure field names don't collide
|
||||
func requireUniqueStructFieldNames(args Arguments) error {
|
||||
exists := make(map[string]bool)
|
||||
for _, arg := range args {
|
||||
field := capitalise(arg.Name)
|
||||
if field == "" {
|
||||
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
// mapAbiToStringField maps abi to struct fields.
|
||||
// first round: for each Exportable field that contains a `abi:""` tag
|
||||
// and this field name exists in the arguments, pair them together.
|
||||
// second round: for each argument field that has not been already linked,
|
||||
// find what variable is expected to be mapped into, if it exists and has not been
|
||||
// used, pair them.
|
||||
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
|
||||
|
||||
typ := value.Type()
|
||||
|
||||
abi2struct := make(map[string]string)
|
||||
struct2abi := make(map[string]string)
|
||||
|
||||
// first round ~~~
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
structFieldName := typ.Field(i).Name
|
||||
|
||||
// skip private struct fields.
|
||||
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
|
||||
continue
|
||||
}
|
||||
if exists[field] {
|
||||
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
|
||||
|
||||
// skip fields that have no abi:"" tag.
|
||||
var ok bool
|
||||
var tagName string
|
||||
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
|
||||
continue
|
||||
}
|
||||
exists[field] = true
|
||||
|
||||
// check if tag is empty.
|
||||
if tagName == "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
|
||||
}
|
||||
|
||||
// check which argument field matches with the abi tag.
|
||||
found := false
|
||||
for _, abiField := range args.NonIndexed() {
|
||||
if abiField.Name == tagName {
|
||||
if abi2struct[abiField.Name] != "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
|
||||
}
|
||||
// pair them
|
||||
abi2struct[abiField.Name] = structFieldName
|
||||
struct2abi[structFieldName] = abiField.Name
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
// check if this tag has been mapped.
|
||||
if !found {
|
||||
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
|
||||
// second round ~~~
|
||||
for _, arg := range args {
|
||||
|
||||
abiFieldName := arg.Name
|
||||
structFieldName := capitalise(abiFieldName)
|
||||
|
||||
if structFieldName == "" {
|
||||
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
}
|
||||
|
||||
// this abi has already been paired, skip it... unless there exists another, yet unassigned
|
||||
// struct field with the same field name. If so, raise an error:
|
||||
// abi: [ { "name": "value" } ]
|
||||
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
|
||||
if abi2struct[abiFieldName] != "" {
|
||||
if abi2struct[abiFieldName] != structFieldName &&
|
||||
struct2abi[structFieldName] == "" &&
|
||||
value.FieldByName(structFieldName).IsValid() {
|
||||
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// return an error if this struct field has already been paired.
|
||||
if struct2abi[structFieldName] != "" {
|
||||
return nil, fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", structFieldName)
|
||||
}
|
||||
|
||||
if value.FieldByName(structFieldName).IsValid() {
|
||||
// pair them
|
||||
abi2struct[abiFieldName] = structFieldName
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
} else {
|
||||
// not paired, but annotate as used, to detect cases like
|
||||
// abi : [ { "name": "value" }, { "name": "_value" } ]
|
||||
// struct { Value *big.Int }
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return abi2struct, nil
|
||||
}
|
||||
|
@@ -50,7 +50,7 @@ var (
|
||||
var KeyStoreType = reflect.TypeOf(&KeyStore{})
|
||||
|
||||
// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
var KeyStoreScheme = "keystore"
|
||||
const KeyStoreScheme = "keystore"
|
||||
|
||||
// Maximum time between wallet refreshes (if filesystem notifications don't work).
|
||||
const walletRefreshCycle = 3 * time.Second
|
||||
|
@@ -36,7 +36,7 @@ func Type(msg proto.Message) uint16 {
|
||||
}
|
||||
|
||||
// Name returns the friendly message type name of a specific protocol buffer
|
||||
// type numbers.
|
||||
// type number.
|
||||
func Name(kind uint16) string {
|
||||
name := MessageType_name[int32(kind)]
|
||||
if len(name) < 12 {
|
||||
|
@@ -302,7 +302,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requested
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.1.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
560
bmt/bmt.go
560
bmt/bmt.go
@@ -1,560 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
|
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution
|
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for rightmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
depth: depth(p.SegmentCount),
|
||||
size: p.SegmentSize,
|
||||
blocksize: p.SegmentSize,
|
||||
count: p.SegmentCount,
|
||||
result: make(chan []byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct {
|
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte
|
||||
}
|
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node {
|
||||
return &Node{
|
||||
parent: parent,
|
||||
level: level,
|
||||
index: index,
|
||||
initial: index == 0,
|
||||
isLeft: index%2 == 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *Tree
|
||||
hasher BaseHasher
|
||||
SegmentSize int
|
||||
SegmentCount int
|
||||
Capacity int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
|
||||
return &TreePool{
|
||||
c: make(chan *Tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: hasher().Size(),
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool until it has no more than n resources
|
||||
func (p *TreePool) Drain(n int) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
for len(p.c) > n {
|
||||
<-p.c
|
||||
p.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (p *TreePool) Reserve() *Tree {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
var t *Tree
|
||||
if p.count == p.Capacity {
|
||||
return <-p.c
|
||||
}
|
||||
select {
|
||||
case t = <-p.c:
|
||||
default:
|
||||
t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount)
|
||||
p.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (p *TreePool) Release(t *Tree) {
|
||||
p.c <- t // can never fail but...
|
||||
}
|
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct {
|
||||
leaves []*Node
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (t *Tree) Draw(hash []byte, d int) string {
|
||||
var left, right []string
|
||||
var anc []*Node
|
||||
for i, n := range t.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = t.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*Node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
|
||||
n := NewNode(0, 0, nil)
|
||||
n.root = true
|
||||
prevlevel := []*Node{n}
|
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1
|
||||
count := 2
|
||||
for d := 1; d <= depth(segmentCount); d++ {
|
||||
nodes := make([]*Node, count)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
parent := prevlevel[i/2]
|
||||
t := NewNode(level, i, parent)
|
||||
nodes[i] = t
|
||||
}
|
||||
prevlevel = nodes
|
||||
level++
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{
|
||||
leaves: prevlevel,
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (h *Hasher) Size() int {
|
||||
return h.size
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (h *Hasher) BlockSize() int {
|
||||
return h.blocksize
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (h *Hasher) Sum(b []byte) (r []byte) {
|
||||
t := h.bmt
|
||||
i := h.cur
|
||||
n := t.leaves[i]
|
||||
j := i
|
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(h.segment) > h.size && i > 0 && n.parent != nil {
|
||||
n = n.parent
|
||||
} else {
|
||||
i *= 2
|
||||
}
|
||||
d := h.finalise(n, i)
|
||||
h.writeSegment(j, h.segment, d)
|
||||
c := <-h.result
|
||||
h.releaseTree()
|
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if h.blockLength == nil {
|
||||
return c
|
||||
}
|
||||
res := h.pool.hasher()
|
||||
res.Reset()
|
||||
res.Write(h.blockLength)
|
||||
res.Write(c)
|
||||
return res.Sum(nil)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (h *Hasher) Hash() []byte {
|
||||
return <-h.result
|
||||
}
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (h *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
s := h.segment
|
||||
i := h.cur
|
||||
count := (h.count + 1) / 2
|
||||
need := h.count*h.size - h.cur*2*h.size
|
||||
size := h.size
|
||||
if need > size {
|
||||
size *= 2
|
||||
}
|
||||
if l < need {
|
||||
need = l
|
||||
}
|
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s)
|
||||
if need < rest {
|
||||
rest = need
|
||||
}
|
||||
s = append(s, b[:rest]...)
|
||||
need -= rest
|
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 {
|
||||
// push all finished chunks we read
|
||||
h.writeSegment(i, s, h.depth)
|
||||
need -= size
|
||||
if need < 0 {
|
||||
size += need
|
||||
}
|
||||
s = b[rest : rest+size]
|
||||
rest += size
|
||||
i++
|
||||
}
|
||||
h.segment = s
|
||||
h.cur = i
|
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
|
||||
bufsize := h.size*h.count - h.size*h.cur - len(h.segment)
|
||||
buf := make([]byte, bufsize)
|
||||
var read int
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Read(buf)
|
||||
read += n
|
||||
if err == io.EOF || read == len(buf) {
|
||||
hash := h.Sum(buf[:n])
|
||||
if read == len(buf) {
|
||||
err = NewEOC(hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
n, err = h.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return int64(read), err
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (h *Hasher) Reset() {
|
||||
h.getTree()
|
||||
h.blockLength = nil
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the length of the data subsumed under the hash
|
||||
func (h *Hasher) ResetWithLength(l []byte) {
|
||||
h.Reset()
|
||||
h.blockLength = l
|
||||
}
|
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (h *Hasher) releaseTree() {
|
||||
if h.bmt != nil {
|
||||
n := h.bmt.leaves[h.cur]
|
||||
for ; n != nil; n = n.parent {
|
||||
n.unbalanced = false
|
||||
if n.parent != nil {
|
||||
n.root = false
|
||||
}
|
||||
}
|
||||
h.pool.Release(h.bmt)
|
||||
h.bmt = nil
|
||||
|
||||
}
|
||||
h.cur = 0
|
||||
h.segment = nil
|
||||
}
|
||||
|
||||
func (h *Hasher) writeSegment(i int, s []byte, d int) {
|
||||
hash := h.pool.hasher()
|
||||
n := h.bmt.leaves[i]
|
||||
|
||||
if len(s) > h.size && n.parent != nil {
|
||||
go func() {
|
||||
hash.Reset()
|
||||
hash.Write(s)
|
||||
s = hash.Sum(nil)
|
||||
|
||||
if n.root {
|
||||
h.result <- s
|
||||
return
|
||||
}
|
||||
h.run(n.parent, hash, d, n.index, s)
|
||||
}()
|
||||
return
|
||||
}
|
||||
go h.run(n, hash, d, i*2, s)
|
||||
}
|
||||
|
||||
func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
if !n.unbalanced && n.toggle() {
|
||||
return
|
||||
}
|
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
|
||||
hash.Reset()
|
||||
hash.Write(n.left)
|
||||
hash.Write(n.right)
|
||||
s = hash.Sum(nil)
|
||||
|
||||
} else {
|
||||
s = append(n.left, n.right...)
|
||||
}
|
||||
|
||||
h.hash = s
|
||||
if n.root {
|
||||
h.result <- s
|
||||
return
|
||||
}
|
||||
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (h *Hasher) getTree() *Tree {
|
||||
if h.bmt != nil {
|
||||
return h.bmt
|
||||
}
|
||||
t := h.pool.Reserve()
|
||||
h.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (n *Node) toggle() bool {
|
||||
return atomic.AddInt32(&n.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
func depth(n int) (d int) {
|
||||
for l := (n - 1) / 2; l > 0; l /= 2 {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (h *Hasher) finalise(n *Node, i int) (d int) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toggle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft
|
||||
n.right = nil
|
||||
if n.initial {
|
||||
n.root = true
|
||||
return d
|
||||
}
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
d++
|
||||
}
|
||||
}
|
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct {
|
||||
Hash []byte // read the hash of the chunk off the error
|
||||
}
|
||||
|
||||
// Error returns the error string
|
||||
func (e *EOC) Error() string {
|
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash)
|
||||
}
|
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC {
|
||||
return &EOC{hash}
|
||||
}
|
85
bmt/bmt_r.go
85
bmt/bmt_r.go
@@ -1,85 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
span int
|
||||
section int
|
||||
cap int
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
maxsize := hashsize * count
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
if c > 2 {
|
||||
c /= 2
|
||||
}
|
||||
return &RefHasher{
|
||||
section: 2 * hashsize,
|
||||
span: c * hashsize,
|
||||
cap: maxsize,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte {
|
||||
if len(d) > rh.cap {
|
||||
d = d[:rh.cap]
|
||||
}
|
||||
|
||||
return rh.hash(d, rh.span)
|
||||
}
|
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte {
|
||||
l := len(d)
|
||||
left := d
|
||||
var right []byte
|
||||
if l > rh.section {
|
||||
for ; s >= l; s /= 2 {
|
||||
}
|
||||
left = rh.hash(d[:s], s)
|
||||
right = d[s:]
|
||||
if l-s > rh.section/2 {
|
||||
right = rh.hash(right, s)
|
||||
}
|
||||
}
|
||||
defer rh.h.Reset()
|
||||
rh.h.Write(left)
|
||||
rh.h.Write(right)
|
||||
h := rh.h.Sum(nil)
|
||||
return h
|
||||
}
|
481
bmt/bmt_test.go
481
bmt/bmt_test.go
@@ -1,481 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxproccnt = 8
|
||||
)
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) {
|
||||
hashFunc := sha3.NewKeccak256
|
||||
|
||||
sha3 := func(data ...[]byte) []byte {
|
||||
h := hashFunc()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct {
|
||||
from int64
|
||||
to int64
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 0,
|
||||
to: 64,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 65,
|
||||
to: 96,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), data[64:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 97,
|
||||
to: 128,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), sha3(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 129,
|
||||
to: 160,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 161,
|
||||
to: 192,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 193,
|
||||
to: 224,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 225,
|
||||
to: 256,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for _, x := range tests {
|
||||
for length := x.from; length <= x.to; length++ {
|
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(hashFunc, 128).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDataReader(l int) (r io.Reader) {
|
||||
return io.LimitReader(crand.Reader, int64(l))
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
err := testHasher(testBaseHasher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
|
||||
tdata := testDataReader(4128)
|
||||
data := make([]byte, 4128)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
max := count * size
|
||||
incr := 1
|
||||
for n := 0; n <= max+incr; n += incr {
|
||||
err = f(hasher, data, n, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
}
|
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) {
|
||||
testHasherReuse(maxproccnt, t)
|
||||
}
|
||||
|
||||
func testHasherReuse(i int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, i)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherConcurrency(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, maxproccnt)
|
||||
defer pool.Drain(0)
|
||||
wg := sync.WaitGroup{}
|
||||
cycles := 100
|
||||
wg.Add(maxproccnt * cycles)
|
||||
errc := make(chan error)
|
||||
|
||||
for p := 0; p < maxproccnt; p++ {
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
err = fmt.Errorf("timed out")
|
||||
case err = <-errc:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
|
||||
pool := NewTreePool(hasher, count, 1)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
return testHasherCorrectness(bmt, hasher, d, n, count)
|
||||
}
|
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(data)
|
||||
timeout := time.NewTimer(time.Second)
|
||||
c := make(chan error)
|
||||
|
||||
go func() {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
got := bmt.Sum(nil)
|
||||
if !bytes.Equal(got, exp) {
|
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
err = fmt.Errorf("BMT hash calculation timed out")
|
||||
case err = <-c:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
|
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
|
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
|
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
|
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
|
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
|
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
|
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
|
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
|
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
|
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
|
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
|
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
|
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
|
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
|
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
tdata := testDataReader(64)
|
||||
data := make([]byte, 64)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hasher().Size() + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(maxproccnt)
|
||||
var i int32
|
||||
for j := 0; j < maxproccnt; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasher(n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
size := 1
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, size)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 200
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
bmt := New(pool)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
@@ -330,6 +330,7 @@ func doLint(cmdline []string) {
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--tests",
|
||||
"--deadline=2m",
|
||||
"--disable-all",
|
||||
"--enable=goimports",
|
||||
"--enable=varcheck",
|
||||
|
@@ -1,18 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/sh
|
||||
|
||||
find_files() {
|
||||
find . -not \( \
|
||||
find . ! \( \
|
||||
\( \
|
||||
-wholename '.github' \
|
||||
-o -wholename './build/_workspace' \
|
||||
-o -wholename './build/bin' \
|
||||
-o -wholename './crypto/bn256' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
-path '.github' \
|
||||
-o -path './build/_workspace' \
|
||||
-o -path './build/bin' \
|
||||
-o -path './crypto/bn256' \
|
||||
-o -path '*/vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
GOFMT="gofmt -s -w";
|
||||
GOIMPORTS="goimports -w";
|
||||
find_files | xargs $GOFMT;
|
||||
find_files | xargs $GOIMPORTS;
|
||||
GOFMT="gofmt -s -w"
|
||||
GOIMPORTS="goimports -w"
|
||||
find_files | xargs $GOFMT
|
||||
find_files | xargs $GOIMPORTS
|
||||
|
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind")
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind, - for STDIN")
|
||||
binFlag = flag.String("bin", "", "Path to the Ethereum contract bytecode (generate deploy method)")
|
||||
typFlag = flag.String("type", "", "Struct name for the binding (default = package name)")
|
||||
|
||||
@@ -75,16 +75,27 @@ func main() {
|
||||
bins []string
|
||||
types []string
|
||||
)
|
||||
if *solFlag != "" {
|
||||
if *solFlag != "" || *abiFlag == "-" {
|
||||
// Generate the list of types to exclude from binding
|
||||
exclude := make(map[string]bool)
|
||||
for _, kind := range strings.Split(*excFlag, ",") {
|
||||
exclude[strings.ToLower(kind)] = true
|
||||
}
|
||||
contracts, err := compiler.CompileSolidity(*solcFlag, *solFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to build Solidity contract: %v\n", err)
|
||||
os.Exit(-1)
|
||||
|
||||
var contracts map[string]*compiler.Contract
|
||||
var err error
|
||||
if *solFlag != "" {
|
||||
contracts, err = compiler.CompileSolidity(*solcFlag, *solFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to build Solidity contract: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
} else {
|
||||
contracts, err = contractsFromStdin()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read input ABIs from STDIN: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
// Gather all non-excluded contract for binding
|
||||
for name, contract := range contracts {
|
||||
@@ -138,3 +149,12 @@ func main() {
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
|
||||
func contractsFromStdin() (map[string]*compiler.Contract, error) {
|
||||
bytes, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return compiler.ParseCombinedJSON(bytes, "", "", "", "")
|
||||
}
|
||||
|
72
cmd/ethkey/changepassphrase.go
Normal file
72
cmd/ethkey/changepassphrase.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var newPassphraseFlag = cli.StringFlag{
|
||||
Name: "newpasswordfile",
|
||||
Usage: "the file that contains the new passphrase for the keyfile",
|
||||
}
|
||||
|
||||
var commandChangePassphrase = cli.Command{
|
||||
Name: "changepassphrase",
|
||||
Usage: "change the passphrase on a keyfile",
|
||||
ArgsUsage: "<keyfile>",
|
||||
Description: `
|
||||
Change the passphrase of a keyfile.`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
newPassphraseFlag,
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
keyfilepath := ctx.Args().First()
|
||||
|
||||
// Read key from file.
|
||||
keyjson, err := ioutil.ReadFile(keyfilepath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
}
|
||||
|
||||
// Get a new passphrase.
|
||||
fmt.Println("Please provide a new passphrase")
|
||||
var newPhrase string
|
||||
if passFile := ctx.String(newPassphraseFlag.Name); passFile != "" {
|
||||
content, err := ioutil.ReadFile(passFile)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read new passphrase file '%s': %v", passFile, err)
|
||||
}
|
||||
newPhrase = strings.TrimRight(string(content), "\r\n")
|
||||
} else {
|
||||
newPhrase = promptPassphrase(true)
|
||||
}
|
||||
|
||||
// Encrypt the key with the new passphrase.
|
||||
newJson, err := keystore.EncryptKey(key, newPhrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting with new passphrase: %v", err)
|
||||
}
|
||||
|
||||
// Then write the new keyfile in place of the old one.
|
||||
if err := ioutil.WriteFile(keyfilepath, newJson, 600); err != nil {
|
||||
utils.Fatalf("Error writing new keyfile to disk: %v", err)
|
||||
}
|
||||
|
||||
// Don't print anything. Just return successfully,
|
||||
// producing a positive exit code.
|
||||
return nil
|
||||
},
|
||||
}
|
@@ -90,7 +90,7 @@ If you want to encrypt an existing private key, it can be specified by setting
|
||||
}
|
||||
|
||||
// Encrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, true)
|
||||
passphrase := promptPassphrase(true)
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting key: %v", err)
|
||||
|
@@ -60,7 +60,7 @@ make sure to use this feature with great caution!`,
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
|
@@ -38,6 +38,7 @@ func init() {
|
||||
app.Commands = []cli.Command{
|
||||
commandGenerate,
|
||||
commandInspect,
|
||||
commandChangePassphrase,
|
||||
commandSignMessage,
|
||||
commandVerifyMessage,
|
||||
}
|
||||
|
@@ -62,7 +62,7 @@ To sign a message contained in a file, use the --msgfile flag.
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
|
@@ -28,11 +28,32 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// getPassPhrase obtains a passphrase given by the user. It first checks the
|
||||
// --passphrase command line flag and ultimately prompts the user for a
|
||||
// promptPassphrase prompts the user for a passphrase. Set confirmation to true
|
||||
// to require the user to confirm the passphrase.
|
||||
func promptPassphrase(confirmation bool) string {
|
||||
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if passphrase != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
|
||||
return passphrase
|
||||
}
|
||||
|
||||
// getPassphrase obtains a passphrase given by the user. It first checks the
|
||||
// --passfile command line flag and ultimately prompts the user for a
|
||||
// passphrase.
|
||||
func getPassPhrase(ctx *cli.Context, confirmation bool) string {
|
||||
// Look for the --passphrase flag.
|
||||
func getPassphrase(ctx *cli.Context) string {
|
||||
// Look for the --passwordfile flag.
|
||||
passphraseFile := ctx.String(passphraseFlag.Name)
|
||||
if passphraseFile != "" {
|
||||
content, err := ioutil.ReadFile(passphraseFile)
|
||||
@@ -44,20 +65,7 @@ func getPassPhrase(ctx *cli.Context, confirmation bool) string {
|
||||
}
|
||||
|
||||
// Otherwise prompt the user for the passphrase.
|
||||
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if passphrase != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
return passphrase
|
||||
return promptPassphrase(false)
|
||||
}
|
||||
|
||||
// signHash is a helper function that calculates a hash for the given message
|
||||
|
@@ -77,9 +77,6 @@ var (
|
||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
||||
|
||||
githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access")
|
||||
githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with")
|
||||
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
@@ -474,7 +471,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
|
||||
if err != nil {
|
||||
f.lock.Unlock()
|
||||
if err = sendError(conn, err); err != nil {
|
||||
@@ -638,59 +635,6 @@ func sendSuccess(conn *websocket.Conn, msg string) error {
|
||||
return send(conn, map[string]string{"success": msg}, time.Second)
|
||||
}
|
||||
|
||||
// authGitHub tries to authenticate a faucet request using GitHub gists, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authGitHub(url string) (string, string, common.Address, error) {
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(url, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
return "", "", common.Address{}, errors.New("Anonymous Gists not allowed")
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
content := strings.TrimSpace(file.Content)
|
||||
if len(content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(content)
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", common.Address{}, errors.New("Invalid user... boom!")
|
||||
}
|
||||
// Everything passed validation, return the gathered infos
|
||||
return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil
|
||||
}
|
||||
|
||||
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authTwitter(url string) (string, string, common.Address, error) {
|
||||
|
@@ -77,7 +77,7 @@ var customGenesisTests = []struct {
|
||||
"homesteadBlock" : 314,
|
||||
"daoForkBlock" : 141,
|
||||
"daoForkSupport" : true
|
||||
},
|
||||
}
|
||||
}`,
|
||||
query: "eth.getBlock(0).nonce",
|
||||
result: "0x0000000000000042",
|
||||
|
@@ -19,12 +19,16 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
godebug "runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@@ -140,6 +144,15 @@ var (
|
||||
utils.WhisperMaxMessageSizeFlag,
|
||||
utils.WhisperMinPOWFlag,
|
||||
}
|
||||
|
||||
metricsFlags = []cli.Flag{
|
||||
utils.MetricsEnableInfluxDBFlag,
|
||||
utils.MetricsInfluxDBEndpointFlag,
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -182,12 +195,32 @@ func init() {
|
||||
app.Flags = append(app.Flags, consoleFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, whisperFlags...)
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
if err := debug.Setup(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
// Cap the cache allowance and tune the garbage colelctor
|
||||
var mem gosigar.Mem
|
||||
if err := mem.Get(); err == nil {
|
||||
allowance := int(mem.Total / 1024 / 1024 / 3)
|
||||
if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance {
|
||||
log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance)
|
||||
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance))
|
||||
}
|
||||
}
|
||||
// Ensure Go's GC ignores the database cache for trigger percentage
|
||||
cache := ctx.GlobalInt(utils.CacheFlag.Name)
|
||||
gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024)))
|
||||
|
||||
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
|
||||
godebug.SetGCPercent(int(gogc))
|
||||
|
||||
// Start metrics export if enabled
|
||||
utils.SetupMetrics(ctx)
|
||||
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
|
||||
|
@@ -206,11 +206,22 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "LOGGING AND DEBUGGING",
|
||||
Flags: append([]cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
}, debug.Flags...),
|
||||
},
|
||||
{
|
||||
Name: "METRICS AND STATS",
|
||||
Flags: []cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.MetricsEnableInfluxDBFlag,
|
||||
utils.MetricsInfluxDBEndpointFlag,
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "WHISPER (EXPERIMENTAL)",
|
||||
Flags: whisperFlags,
|
||||
|
@@ -180,7 +180,10 @@ func main() {
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showNetwork(ctx *cli.Context) error {
|
||||
@@ -275,9 +278,8 @@ func createNode(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
config := &adapters.NodeConfig{
|
||||
Name: ctx.String("name"),
|
||||
}
|
||||
config := adapters.RandomNodeConfig()
|
||||
config.Name = ctx.String("name")
|
||||
if key := ctx.String("key"); key != "" {
|
||||
privKey, err := crypto.HexToECDSA(key)
|
||||
if err != nil {
|
||||
|
@@ -103,8 +103,8 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
|
||||
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
@@ -284,7 +284,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Params.EIP98Transition = math.MaxUint64
|
||||
|
@@ -609,7 +609,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
}
|
||||
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
||||
"Network": network,
|
||||
"NetworkID": conf.Genesis.Config.ChainId,
|
||||
"NetworkID": conf.Genesis.Config.ChainID,
|
||||
"NetworkTitle": strings.Title(network),
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
|
@@ -49,7 +49,7 @@ func (w *wizard) deployFaucet() {
|
||||
existed := err == nil
|
||||
|
||||
infos.node.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.node.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.node.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
|
@@ -121,7 +121,7 @@ func (w *wizard) makeGenesis() {
|
||||
// Query the user for some custom extras
|
||||
fmt.Println()
|
||||
fmt.Println("Specify your chain/network ID if you want an explicit one (default = random)")
|
||||
genesis.Config.ChainId = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
genesis.Config.ChainID = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
|
||||
// All done, store the genesis and flush to disk
|
||||
log.Info("Configured new genesis block")
|
||||
|
@@ -276,13 +276,3 @@ func (stats serverStats) render() {
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
|
||||
// protips contains a collection of network infos to report pro-tips
|
||||
// based on.
|
||||
type protips struct {
|
||||
genesis string
|
||||
network int64
|
||||
bootFull []string
|
||||
bootLight []string
|
||||
ethstats string
|
||||
}
|
||||
|
@@ -56,7 +56,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
existed := err == nil
|
||||
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out where the user wants to store the persistent data
|
||||
fmt.Println()
|
||||
@@ -107,7 +107,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
// Ethash based miners only need an etherbase to mine against
|
||||
fmt.Println()
|
||||
if infos.etherbase == "" {
|
||||
fmt.Printf("What address should the miner user?\n")
|
||||
fmt.Printf("What address should the miner use?\n")
|
||||
for {
|
||||
if address := w.readAddress(); address != nil {
|
||||
infos.etherbase = address.Hex()
|
||||
@@ -115,7 +115,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("What address should the miner user? (default = %s)\n", infos.etherbase)
|
||||
fmt.Printf("What address should the miner use? (default = %s)\n", infos.etherbase)
|
||||
infos.etherbase = w.readDefaultAddress(common.HexToAddress(infos.etherbase)).Hex()
|
||||
}
|
||||
} else if w.conf.Genesis.Config.Clique != nil {
|
||||
|
@@ -52,7 +52,7 @@ func (w *wizard) deployWallet() {
|
||||
existed := err == nil
|
||||
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@@ -37,6 +38,8 @@ import (
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
)
|
||||
|
||||
const SWARM_VERSION = "0.3"
|
||||
|
||||
var (
|
||||
//flag definition for the dumpconfig command
|
||||
DumpConfigCommand = cli.Command{
|
||||
@@ -58,19 +61,25 @@ var (
|
||||
|
||||
//constants for environment variables
|
||||
const (
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
|
||||
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
|
||||
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
|
||||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
// These settings ensure that TOML keys use the same names as Go struct fields.
|
||||
@@ -92,10 +101,8 @@ var tomlSettings = toml.Config{
|
||||
|
||||
//before booting the swarm node, build the configuration
|
||||
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
|
||||
//check for deprecated flags
|
||||
checkDeprecated(ctx)
|
||||
//start by creating a default config
|
||||
config = bzzapi.NewDefaultConfig()
|
||||
config = bzzapi.NewConfig()
|
||||
//first load settings from config file (if provided)
|
||||
config, err = configFileOverride(config, ctx)
|
||||
if err != nil {
|
||||
@@ -168,7 +175,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
|
||||
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,12 +198,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.SwapEnabled = true
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = true
|
||||
if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = false
|
||||
}
|
||||
|
||||
currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
|
||||
currentConfig.DeliverySkipCheck = true
|
||||
}
|
||||
|
||||
currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
@@ -209,10 +224,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.EnsAPIs = ensAPIs
|
||||
}
|
||||
|
||||
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
|
||||
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
|
||||
}
|
||||
|
||||
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
@@ -221,6 +232,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
|
||||
}
|
||||
|
||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
||||
currentConfig.LocalStoreParams.ChunkDbPath = storePath
|
||||
}
|
||||
|
||||
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
|
||||
}
|
||||
|
||||
if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
|
||||
}
|
||||
@@ -239,7 +262,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
|
||||
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,17 +285,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" {
|
||||
if sync, err := strconv.ParseBool(syncenable); err != nil {
|
||||
currentConfig.SyncEnabled = sync
|
||||
if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
|
||||
if sync, err := strconv.ParseBool(syncdisable); err != nil {
|
||||
currentConfig.SyncEnabled = !sync
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
|
||||
if skipCheck, err := strconv.ParseBool(v); err != nil {
|
||||
currentConfig.DeliverySkipCheck = skipCheck
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
|
||||
if d, err := time.ParseDuration(v); err != nil {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
}
|
||||
|
||||
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
|
||||
currentConfig.SwapApi = swapapi
|
||||
currentConfig.SwapAPI = swapapi
|
||||
}
|
||||
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
@@ -312,18 +347,6 @@ func dumpConfig(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//deprecated flags checked here
|
||||
func checkDeprecated(ctx *cli.Context) {
|
||||
// exit if the deprecated --ethapi flag is set
|
||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||
}
|
||||
// warn if --ens-api flag is set
|
||||
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
|
||||
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
|
||||
}
|
||||
}
|
||||
|
||||
//validate configuration parameters
|
||||
func validateConfig(cfg *bzzapi.Config) (err error) {
|
||||
for _, ensAPI := range cfg.EnsAPIs {
|
||||
|
@@ -34,7 +34,7 @@ import (
|
||||
|
||||
func TestDumpConfig(t *testing.T) {
|
||||
swarm := runSwarm(t, "dumpconfig")
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
@@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsNoBzzAccount(t *testing.T) {
|
||||
func TestConfigFailsNoBzzAccount(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
@@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestCmdLineOverrides(t *testing.T) {
|
||||
func TestConfigCmdLineOverrides(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
|
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
@@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId)
|
||||
if info.NetworkID != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
@@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestFileOverrides(t *testing.T) {
|
||||
func TestConfigFileOverrides(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
@@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) {
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.DeliverySkipCheck = true
|
||||
defaultConf.NetworkID = 54
|
||||
defaultConf.Port = httpPort
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.DbCapacity = 9000000
|
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML string
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
@@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if info.NetworkID != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
if info.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestEnvVars(t *testing.T) {
|
||||
func TestConfigEnvVars(t *testing.T) {
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
@@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) {
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true"))
|
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
@@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId)
|
||||
if info.NetworkID != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID)
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
node.Shutdown()
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
|
||||
func TestCmdLineOverridesFile(t *testing.T) {
|
||||
func TestConfigCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
@@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkID = 54
|
||||
defaultConf.Port = "8588"
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.DbCapacity = 9000000
|
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML file
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
//write file
|
||||
f, err := ioutil.TempFile("", "testconfig.toml")
|
||||
fname := "testconfig.toml"
|
||||
f, err := ioutil.TempFile("", fname)
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
defer os.Remove(fname)
|
||||
//write file
|
||||
_, err = f.WriteString(string(out))
|
||||
if err != nil {
|
||||
@@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
@@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId)
|
||||
if info.NetworkID != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if info.LocalStoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
@@ -30,11 +31,11 @@ import (
|
||||
|
||||
func dbExport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) {
|
||||
|
||||
func dbImport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) {
|
||||
|
||||
func dbClean(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) {
|
||||
store.Cleanup()
|
||||
}
|
||||
|
||||
func openDbStore(path string) (*storage.DbStore, error) {
|
||||
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
|
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||
}
|
||||
hash := storage.MakeHashFunc("SHA3")
|
||||
return storage.NewDbStore(path, hash, 10000000, 0)
|
||||
|
||||
storeparams := storage.NewDefaultStoreParams()
|
||||
ldbparams := storage.NewLDBStoreParams(storeparams, path)
|
||||
ldbparams.BaseKey = basekey
|
||||
return storage.NewLDBStore(ldbparams)
|
||||
}
|
||||
|
85
cmd/swarm/download.go
Normal file
85
cmd/swarm/download.go
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func download(ctx *cli.Context) {
|
||||
log.Debug("downloading content using swarm down")
|
||||
args := ctx.Args()
|
||||
dest := "."
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]")
|
||||
case 1:
|
||||
log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir"))
|
||||
default:
|
||||
log.Trace(fmt.Sprintf("destination path arg: %s", args[1]))
|
||||
if absDest, err := filepath.Abs(args[1]); err == nil {
|
||||
dest = absDest
|
||||
} else {
|
||||
utils.Fatalf("could not get download path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
isRecursive = ctx.Bool(SwarmRecursiveFlag.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
)
|
||||
|
||||
if fi, err := os.Stat(dest); err == nil {
|
||||
if isRecursive && !fi.Mode().IsDir() {
|
||||
utils.Fatalf("destination path is not a directory!")
|
||||
}
|
||||
} else {
|
||||
if !os.IsNotExist(err) {
|
||||
utils.Fatalf("could not stat path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
uri, err := api.Parse(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("could not parse uri argument: %v", err)
|
||||
}
|
||||
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive {
|
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
|
||||
utils.Fatalf("encoutered an error while downloading directory: %v", err)
|
||||
}
|
||||
} else {
|
||||
// we are downloading a file
|
||||
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
|
||||
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest)
|
||||
if err != nil {
|
||||
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
|
||||
}
|
||||
}
|
||||
}
|
139
cmd/swarm/export_test.go
Normal file
139
cmd/swarm/export_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
)
|
||||
|
||||
// TestCLISwarmExportImport perform the following test:
|
||||
// 1. runs swarm node
|
||||
// 2. uploads a random file
|
||||
// 3. runs an export of the local datastore
|
||||
// 4. runs a second swarm node
|
||||
// 5. imports the exported datastore
|
||||
// 6. fetches the uploaded random file from the second node
|
||||
func TestCLISwarmExportImport(t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
|
||||
// generate random 10mb file
|
||||
f, cleanup := generateRandomFile(t, 10000000)
|
||||
defer cleanup()
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
|
||||
var info swarm.Info
|
||||
if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cluster.Stop()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
// generate an export.tar
|
||||
exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
|
||||
exportCmd.ExpectExit()
|
||||
|
||||
// start second cluster
|
||||
cluster2 := newTestCluster(t, 1)
|
||||
|
||||
var info2 swarm.Info
|
||||
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// stop second cluster, so that we close LevelDB
|
||||
cluster2.Stop()
|
||||
defer cluster2.Cleanup()
|
||||
|
||||
// import the export.tar
|
||||
importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
|
||||
importCmd.ExpectExit()
|
||||
|
||||
// spin second cluster back up
|
||||
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
|
||||
|
||||
// try to fetch imported file
|
||||
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
|
||||
}
|
||||
|
||||
// compare downloaded file with the generated random file
|
||||
mustEqualFiles(t, f, res.Body)
|
||||
}
|
||||
|
||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||
h := md5.New()
|
||||
upLen, err := io.Copy(h, up)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
upHash := h.Sum(nil)
|
||||
h.Reset()
|
||||
downLen, err := io.Copy(h, down)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
downHash := h.Sum(nil)
|
||||
|
||||
if !bytes.Equal(upHash, downHash) || upLen != downLen {
|
||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
// write 10mb random data to file
|
||||
buf := make([]byte, 10000000)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
||||
|
||||
return tmp, teardown
|
||||
}
|
127
cmd/swarm/fs.go
Normal file
127
cmd/swarm/fs.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/fuse"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func mount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
if len(args) < 2 {
|
||||
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
|
||||
}
|
||||
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := &fuse.MountInfo{}
|
||||
mountPoint, err := filepath.Abs(filepath.Clean(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error expanding path for mount point: %v", err)
|
||||
}
|
||||
err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func unmount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
|
||||
if len(args) < 1 {
|
||||
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
|
||||
}
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
}
|
||||
fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
|
||||
}
|
||||
|
||||
func listMounts(cliContext *cli.Context) {
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := []fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
}
|
||||
if len(mf) == 0 {
|
||||
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
|
||||
} else {
|
||||
fmt.Printf("Found %d swarmfs mount(s):\n", len(mf))
|
||||
for i, mountInfo := range mf {
|
||||
fmt.Printf("%d:\n", i)
|
||||
fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint)
|
||||
fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest)
|
||||
fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
|
||||
var endpoint string
|
||||
|
||||
if ctx.IsSet(utils.IPCPathFlag.Name) {
|
||||
endpoint = ctx.String(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
utils.Fatalf("swarm ipc endpoint not specified")
|
||||
}
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
|
||||
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
|
||||
// Backwards compatibility with geth < 1.5 which required
|
||||
// these prefixes.
|
||||
endpoint = endpoint[4:]
|
||||
}
|
||||
return rpc.Dial(endpoint)
|
||||
}
|
234
cmd/swarm/fs_test.go
Normal file
234
cmd/swarm/fs_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
type testFile struct {
|
||||
filePath string
|
||||
content string
|
||||
}
|
||||
|
||||
// TestCLISwarmFs is a high-level test of swarmfs
|
||||
func TestCLISwarmFs(t *testing.T) {
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp dir
|
||||
mountPoint, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Debug("swarmfs cli test", "1st mount", mountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(mountPoint)
|
||||
|
||||
handlingNode := cluster.Nodes[0]
|
||||
mhash := doUploadEmptyDir(t, handlingNode)
|
||||
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
mount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mhash,
|
||||
mountPoint,
|
||||
}...)
|
||||
mount.ExpectExit()
|
||||
|
||||
filesToAssert := []*testFile{}
|
||||
|
||||
dirPath, err := createDirInDir(mountPoint, "testSubDir")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
|
||||
|
||||
dummyContent := "somerandomtestcontentthatshouldbeasserted"
|
||||
dirs := []string{
|
||||
mountPoint,
|
||||
dirPath,
|
||||
dirPath2,
|
||||
}
|
||||
files := []string{"f1.tmp", "f2.tmp"}
|
||||
for _, d := range dirs {
|
||||
for _, entry := range files {
|
||||
tFile, err := createTestFileInPath(d, entry, dummyContent)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filesToAssert = append(filesToAssert, tFile)
|
||||
}
|
||||
}
|
||||
if len(filesToAssert) != len(dirs)*len(files) {
|
||||
t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert))
|
||||
}
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mountPoint,
|
||||
}...)
|
||||
_, matches := unmount.ExpectRegexp(hashRegexp)
|
||||
unmount.ExpectExit()
|
||||
|
||||
hash := matches[0]
|
||||
if hash == mhash {
|
||||
t.Fatal("this should not be equal")
|
||||
}
|
||||
log.Debug("swarmfs cli test: asserting no files in mount point")
|
||||
|
||||
//check that there's nothing in the mount folder
|
||||
filesInDir, err := ioutil.ReadDir(mountPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("had an error reading the directory: %v", err)
|
||||
}
|
||||
|
||||
if len(filesInDir) != 0 {
|
||||
t.Fatal("there shouldn't be anything here")
|
||||
}
|
||||
|
||||
secondMountPoint, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(secondMountPoint)
|
||||
|
||||
log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
//remount, check files
|
||||
newMount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
hash, // the latest hash
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
newMount.ExpectExit()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
filesInDir, err = ioutil.ReadDir(secondMountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(filesInDir) == 0 {
|
||||
t.Fatal("there should be something here")
|
||||
}
|
||||
|
||||
log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount")
|
||||
|
||||
for _, file := range filesToAssert {
|
||||
file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1)
|
||||
fileBytes, err := ioutil.ReadFile(file.filePath)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) {
|
||||
t.Fatal("this should be equal")
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmountSec := runSwarm(t, []string{
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
_, matches = unmountSec.ExpectRegexp(hashRegexp)
|
||||
unmountSec.ExpectExit()
|
||||
|
||||
if matches[0] != hash {
|
||||
t.Fatal("these should be equal - no changes made")
|
||||
}
|
||||
}
|
||||
|
||||
func doUploadEmptyDir(t *testing.T, node *testNode) string {
|
||||
// create a tmp dir
|
||||
tmpDir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
|
||||
flags := []string{
|
||||
"--bzzapi", node.URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
tmpDir}
|
||||
|
||||
log.Info("swarmfs cli test: uploading dir with 'swarm up'")
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("swarmfs cli test: dir uploaded", "hash", hash)
|
||||
return hash
|
||||
}
|
||||
|
||||
func createDirInDir(createInDir string, dirToCreate string) (string, error) {
|
||||
fullpath := filepath.Join(createInDir, dirToCreate)
|
||||
err := os.MkdirAll(fullpath, 0777)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fullpath, nil
|
||||
}
|
||||
|
||||
func createTestFileInPath(dir, filename, content string) (*testFile, error) {
|
||||
tFile := &testFile{}
|
||||
filePath := filepath.Join(dir, filename)
|
||||
if file, err := os.Create(filePath); err == nil {
|
||||
tFile.content = content
|
||||
tFile.filePath = filePath
|
||||
|
||||
_, err = io.WriteString(file, content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
||||
return tFile, nil
|
||||
}
|
@@ -38,11 +38,11 @@ func hash(ctx *cli.Context) {
|
||||
defer f.Close()
|
||||
|
||||
stat, _ := f.Stat()
|
||||
chunker := storage.NewTreeChunker(storage.NewChunkerParams())
|
||||
key, err := chunker.Split(f, stat.Size(), nil, nil, nil)
|
||||
fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
|
||||
addr, _, err := fileStore.Store(f, stat.Size(), false)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v\n", err)
|
||||
} else {
|
||||
fmt.Printf("%v\n", key)
|
||||
fmt.Printf("%v\n", addr)
|
||||
}
|
||||
}
|
||||
|
@@ -34,7 +34,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@@ -49,6 +48,22 @@ import (
|
||||
)
|
||||
|
||||
const clientIdentifier = "swarm"
|
||||
const helpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
|
||||
|
||||
CATEGORY:
|
||||
{{.Category}}{{end}}{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if .VisibleFlags}}
|
||||
|
||||
OPTIONS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var (
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
@@ -87,10 +102,6 @@ var (
|
||||
Usage: "Network identifier (integer, default 3=swarm testnet)",
|
||||
EnvVar: SWARM_ENV_NETWORK_ID,
|
||||
}
|
||||
SwarmConfigPathFlag = cli.StringFlag{
|
||||
Name: "bzzconfig",
|
||||
Usage: "DEPRECATED: please use --config path/to/TOML-file",
|
||||
}
|
||||
SwarmSwapEnabledFlag = cli.BoolFlag{
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
@@ -101,10 +112,20 @@ var (
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
EnvVar: SWARM_ENV_SWAP_API,
|
||||
}
|
||||
SwarmSyncEnabledFlag = cli.BoolTFlag{
|
||||
Name: "sync",
|
||||
Usage: "Swarm Syncing enabled (default true)",
|
||||
EnvVar: SWARM_ENV_SYNC_ENABLE,
|
||||
SwarmSyncDisabledFlag = cli.BoolTFlag{
|
||||
Name: "nosync",
|
||||
Usage: "Disable swarm syncing",
|
||||
EnvVar: SWARM_ENV_SYNC_DISABLE,
|
||||
}
|
||||
SwarmSyncUpdateDelay = cli.DurationFlag{
|
||||
Name: "sync-update-delay",
|
||||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
|
||||
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
|
||||
}
|
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
|
||||
Name: "delivery-skip-check",
|
||||
Usage: "Skip chunk delivery check (default false)",
|
||||
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
|
||||
}
|
||||
EnsAPIFlag = cli.StringSliceFlag{
|
||||
Name: "ens-api",
|
||||
@@ -116,7 +137,7 @@ var (
|
||||
Usage: "Swarm HTTP endpoint",
|
||||
Value: "http://127.0.0.1:8500",
|
||||
}
|
||||
SwarmRecursiveUploadFlag = cli.BoolFlag{
|
||||
SwarmRecursiveFlag = cli.BoolFlag{
|
||||
Name: "recursive",
|
||||
Usage: "Upload directories recursively",
|
||||
}
|
||||
@@ -136,20 +157,29 @@ var (
|
||||
Name: "mime",
|
||||
Usage: "force mime type",
|
||||
}
|
||||
SwarmEncryptedFlag = cli.BoolFlag{
|
||||
Name: "encrypt",
|
||||
Usage: "use encrypted upload",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
EnvVar: SWARM_ENV_CORS,
|
||||
}
|
||||
|
||||
// the following flags are deprecated and should be removed in the future
|
||||
DeprecatedEthAPIFlag = cli.StringFlag{
|
||||
Name: "ethapi",
|
||||
Usage: "DEPRECATED: please use --ens-api and --swap-api",
|
||||
SwarmStorePath = cli.StringFlag{
|
||||
Name: "store.path",
|
||||
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
|
||||
EnvVar: SWARM_ENV_STORE_PATH,
|
||||
}
|
||||
DeprecatedEnsAddrFlag = cli.StringFlag{
|
||||
Name: "ens-addr",
|
||||
Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format",
|
||||
SwarmStoreCapacity = cli.Uint64Flag{
|
||||
Name: "store.size",
|
||||
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
|
||||
EnvVar: SWARM_ENV_STORE_CAPACITY,
|
||||
}
|
||||
SwarmStoreCacheCapacity = cli.UintFlag{
|
||||
Name: "store.cache.size",
|
||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -180,91 +210,130 @@ func init() {
|
||||
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Action: version,
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
The output of this command is supposed to be machine-readable.
|
||||
`,
|
||||
Action: version,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
Description: "The output of this command is supposed to be machine-readable",
|
||||
},
|
||||
{
|
||||
Action: upload,
|
||||
Name: "up",
|
||||
Usage: "upload a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: " <file>",
|
||||
Description: `
|
||||
"upload a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
`,
|
||||
Action: upload,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "up",
|
||||
Usage: "uploads a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
},
|
||||
{
|
||||
Action: list,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: " <manifest> [<prefix>]",
|
||||
Description: `
|
||||
Lists files and directories contained in a manifest.
|
||||
`,
|
||||
Action: list,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: "<manifest> [<prefix>]",
|
||||
Description: "Lists files and directories contained in a manifest",
|
||||
},
|
||||
{
|
||||
Action: hash,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: " <file>",
|
||||
Description: `
|
||||
Prints the swarm hash of file or directory.
|
||||
`,
|
||||
Action: hash,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: "<file>",
|
||||
Description: "Prints the swarm hash of file or directory",
|
||||
},
|
||||
{
|
||||
Name: "manifest",
|
||||
Usage: "update a MANIFEST",
|
||||
ArgsUsage: "manifest COMMAND",
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `
|
||||
Updates a MANIFEST by adding/removing/updating the hash of a path.
|
||||
Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
|
||||
`,
|
||||
},
|
||||
|
||||
{
|
||||
Name: "manifest",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform operations on swarm manifests",
|
||||
ArgsUsage: "COMMAND",
|
||||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: add,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
Description: `
|
||||
Adds a new path to the manifest
|
||||
`,
|
||||
Action: add,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
Description: "Adds a new path to the manifest",
|
||||
},
|
||||
{
|
||||
Action: update,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
Description: `
|
||||
Update the hash for an already existing path in the manifest
|
||||
`,
|
||||
Action: update,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
Description: "Update the hash for an already existing path in the manifest",
|
||||
},
|
||||
{
|
||||
Action: remove,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: `
|
||||
Removes a path from the manifest
|
||||
`,
|
||||
Action: remove,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: "Removes a path from the manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: `
|
||||
Manage the local chunk database.
|
||||
`,
|
||||
Name: "fs",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform FUSE operations",
|
||||
ArgsUsage: "fs COMMAND",
|
||||
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Action: mount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "mount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "mount a swarm hash to a mount point",
|
||||
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
|
||||
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: unmount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "unmount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "unmount a swarmfs mount",
|
||||
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
|
||||
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: listMounts,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "list",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "list swarmfs mounts",
|
||||
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
|
||||
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: "Manage the local chunk database",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||
|
||||
@@ -277,10 +346,11 @@ pv(1) tool to get a progress bar:
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbImport,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Action: dbImport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
@@ -293,27 +363,16 @@ pv(1) tool to get a progress bar:
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: `
|
||||
Remove corrupt entries from a local chunk database.
|
||||
`,
|
||||
Action: dbClean,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: "Remove corrupt entries from a local chunk database",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: func(ctx *cli.Context) {
|
||||
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
|
||||
},
|
||||
Name: "cleandb",
|
||||
Usage: "DEPRECATED: use 'swarm db clean'",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
DEPRECATED: use 'swarm db clean'.
|
||||
`,
|
||||
},
|
||||
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
}
|
||||
@@ -339,10 +398,11 @@ DEPRECATED: use 'swarm db clean'.
|
||||
CorsStringFlag,
|
||||
EnsAPIFlag,
|
||||
SwarmTomlConfigPathFlag,
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSwapAPIFlag,
|
||||
SwarmSyncEnabledFlag,
|
||||
SwarmSyncDisabledFlag,
|
||||
SwarmSyncUpdateDelay,
|
||||
SwarmDeliverySkipCheckFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
SwarmAccountFlag,
|
||||
@@ -350,15 +410,24 @@ DEPRECATED: use 'swarm db clean'.
|
||||
ChequebookAddrFlag,
|
||||
// upload flags
|
||||
SwarmApiFlag,
|
||||
SwarmRecursiveUploadFlag,
|
||||
SwarmRecursiveFlag,
|
||||
SwarmWantManifestFlag,
|
||||
SwarmUploadDefaultPath,
|
||||
SwarmUpFromStdinFlag,
|
||||
SwarmUploadMimeType,
|
||||
//deprecated flags
|
||||
DeprecatedEthAPIFlag,
|
||||
DeprecatedEnsAddrFlag,
|
||||
// storage flags
|
||||
SwarmStorePath,
|
||||
SwarmStoreCapacity,
|
||||
SwarmStoreCacheCapacity,
|
||||
}
|
||||
rpcFlags := []cli.Flag{
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
}
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, swarmmetrics.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
@@ -383,16 +452,12 @@ func main() {
|
||||
}
|
||||
|
||||
func version(ctx *cli.Context) error {
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", params.Version)
|
||||
fmt.Println("Version:", SWARM_VERSION)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
fmt.Println("OS:", runtime.GOOS)
|
||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||
fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -405,6 +470,10 @@ func bzzd(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
cfg := defaultNodeConfig
|
||||
|
||||
//pss operates on ws
|
||||
cfg.WSModules = append(cfg.WSModules, "pss")
|
||||
|
||||
//geth only supports --datadir via command line
|
||||
//in order to be consistent within swarm, if we pass --datadir via environment variable
|
||||
//or via config file, we get the same directory for geth and swarm
|
||||
@@ -421,7 +490,7 @@ func bzzd(ctx *cli.Context) error {
|
||||
//due to overriding behavior
|
||||
initSwarmNode(bzzconfig, stack, ctx)
|
||||
//register BZZ as node.Service in the ethereum node
|
||||
registerBzzService(bzzconfig, ctx, stack)
|
||||
registerBzzService(bzzconfig, stack)
|
||||
//start the node
|
||||
utils.StartNode(stack)
|
||||
|
||||
@@ -439,7 +508,7 @@ func bzzd(ctx *cli.Context) error {
|
||||
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
|
||||
injectBootnodes(stack.Server(), bootnodes)
|
||||
} else {
|
||||
if bzzconfig.NetworkId == 3 {
|
||||
if bzzconfig.NetworkID == 3 {
|
||||
injectBootnodes(stack.Server(), testbetBootNodes)
|
||||
}
|
||||
}
|
||||
@@ -448,21 +517,11 @@ func bzzd(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
|
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) {
|
||||
//define the swarm service boot function
|
||||
boot := func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var swapClient *ethclient.Client
|
||||
var err error
|
||||
if bzzconfig.SwapApi != "" {
|
||||
log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi)
|
||||
swapClient, err = ethclient.Dial(bzzconfig.SwapApi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err)
|
||||
}
|
||||
}
|
||||
|
||||
return swarm.NewSwarm(ctx, swapClient, bzzconfig)
|
||||
boot := func(_ *node.ServiceContext) (node.Service, error) {
|
||||
// In production, mockStore must be always nil.
|
||||
return swarm.NewSwarm(bzzconfig, nil)
|
||||
}
|
||||
//register within the ethereum node
|
||||
if err := stack.Register(boot); err != nil {
|
||||
|
@@ -131,13 +131,13 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
|
||||
//TODO: check if the "hash" to add is valid and present in swarm
|
||||
_, err = client.DownloadManifest(hash)
|
||||
_, _, err = client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Hash to add is not present: %v", err)
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
mroot.Entries = append(mroot.Entries, newEntry)
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
@@ -257,7 +257,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
@@ -323,7 +323,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
|
@@ -81,6 +81,7 @@ type testCluster struct {
|
||||
//
|
||||
// When starting more than one node, they are connected together using the
|
||||
// admin SetPeer RPC method.
|
||||
|
||||
func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster := &testCluster{}
|
||||
defer func() {
|
||||
@@ -96,18 +97,7 @@ func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster.TmpDir = tmpdir
|
||||
|
||||
// start the nodes
|
||||
cluster.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes, node)
|
||||
}
|
||||
cluster.StartNewNodes(t, size)
|
||||
|
||||
if size == 1 {
|
||||
return cluster
|
||||
@@ -145,14 +135,51 @@ func (c *testCluster) Shutdown() {
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
func (c *testCluster) Stop() {
|
||||
for _, node := range c.Nodes {
|
||||
node.Shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) StartNewNodes(t *testing.T, size int) {
|
||||
c.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
c.Nodes = append(c.Nodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
|
||||
c.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
node := existingTestNode(t, dir, bzzaccount)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
c.Nodes = append(c.Nodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) Cleanup() {
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
type testNode struct {
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
IpcPath string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
}
|
||||
|
||||
const testPassphrase = "swarm-test-passphrase"
|
||||
@@ -181,6 +208,72 @@ func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accoun
|
||||
return conf, account
|
||||
}
|
||||
|
||||
func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
conf, _ := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" {
|
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
|
||||
}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--ens-api", "",
|
||||
"--bzzaccount", bzzaccount,
|
||||
"--bzznetworkid", "321",
|
||||
"--bzzport", httpPort,
|
||||
"--verbosity", "6",
|
||||
)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
|
||||
node.URL = "http://" + node.Addr
|
||||
|
||||
var nodeInfo p2p.NodeInfo
|
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
|
||||
conf, account := getTestAccount(t, dir)
|
||||
@@ -239,6 +332,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
node.IpcPath = conf.IPCPath
|
||||
|
||||
return node
|
||||
}
|
||||
|
101
cmd/swarm/swarm-smoke/main.go
Normal file
101
cmd/swarm/swarm-smoke/main.go
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
endpoints []string
|
||||
includeLocalhost bool
|
||||
cluster string
|
||||
scheme string
|
||||
filesize int
|
||||
from int
|
||||
to int
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "smoke-test"
|
||||
app.Usage = ""
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster-endpoint",
|
||||
Value: "testing",
|
||||
Usage: "cluster to point to (open, or testing)",
|
||||
Destination: &cluster,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cluster-from",
|
||||
Value: 8501,
|
||||
Usage: "swarm node (from)",
|
||||
Destination: &from,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cluster-to",
|
||||
Value: 8512,
|
||||
Usage: "swarm node (to)",
|
||||
Destination: &to,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cluster-scheme",
|
||||
Value: "http",
|
||||
Usage: "http or https",
|
||||
Destination: &scheme,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "include-localhost",
|
||||
Usage: "whether to include localhost:8500 as an endpoint",
|
||||
Destination: &includeLocalhost,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "filesize",
|
||||
Value: 1,
|
||||
Usage: "file size for generated random file in MB",
|
||||
Destination: &filesize,
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "upload_and_sync",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "upload and sync",
|
||||
Action: cliUploadAndSync,
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(cli.FlagsByName(app.Flags))
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
}
|
||||
}
|
184
cmd/swarm/swarm-smoke/upload_and_sync.go
Normal file
184
cmd/swarm/swarm-smoke/upload_and_sync.go
Normal file
@@ -0,0 +1,184 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func generateEndpoints(scheme string, cluster string, from int, to int) {
|
||||
for port := from; port <= to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster))
|
||||
}
|
||||
|
||||
if includeLocalhost {
|
||||
endpoints = append(endpoints, "http://localhost:8500")
|
||||
}
|
||||
}
|
||||
|
||||
func cliUploadAndSync(c *cli.Context) error {
|
||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now())
|
||||
|
||||
generateEndpoints(scheme, cluster, from, to)
|
||||
|
||||
log.Info("uploading to " + endpoints[0] + " and syncing")
|
||||
|
||||
f, cleanup := generateRandomFile(filesize * 1000000)
|
||||
defer cleanup()
|
||||
|
||||
hash, err := upload(f, endpoints[0])
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
fhash, err := digest(f)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
||||
|
||||
if filesize < 10 {
|
||||
time.Sleep(15 * time.Second)
|
||||
} else {
|
||||
time.Sleep(2 * time.Duration(filesize) * time.Second)
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for _, endpoint := range endpoints {
|
||||
endpoint := endpoint
|
||||
ruid := uuid.New()[:8]
|
||||
wg.Add(1)
|
||||
go func(endpoint string, ruid string) {
|
||||
for {
|
||||
err := fetch(hash, endpoint, fhash, ruid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(endpoint, ruid)
|
||||
}
|
||||
wg.Wait()
|
||||
log.Info("all endpoints synced random file successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||
log.Trace("sleeping", "ruid", ruid)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
||||
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
rdigest, err := digest(res.Body)
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdigest, original) {
|
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
|
||||
func upload(f *os.File, endpoint string) (string, error) {
|
||||
var out bytes.Buffer
|
||||
cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hash := strings.TrimRight(out.String(), "\r\n")
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func digest(r io.Reader) ([]byte, error) {
|
||||
h := md5.New()
|
||||
_, err := io.Copy(h, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// generateRandomFile is creating a temporary file with the requested byte size
|
||||
func generateRandomFile(size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
buf := make([]byte, size)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
||||
|
||||
return tmp, teardown
|
||||
}
|
@@ -40,12 +40,13 @@ func upload(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name)
|
||||
recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
|
||||
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
|
||||
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
|
||||
file string
|
||||
)
|
||||
|
||||
@@ -76,7 +77,7 @@ func upload(ctx *cli.Context) {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
hash, err := client.UploadRaw(f, f.Size)
|
||||
hash, err := client.UploadRaw(f, f.Size, toEncrypt)
|
||||
if err != nil {
|
||||
utils.Fatalf("Upload failed: %s", err)
|
||||
}
|
||||
@@ -97,7 +98,7 @@ func upload(ctx *cli.Context) {
|
||||
if !recursive {
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
return client.UploadDirectory(file, defaultPath, "")
|
||||
return client.UploadDirectory(file, defaultPath, "", toEncrypt)
|
||||
}
|
||||
} else {
|
||||
doUpload = func() (string, error) {
|
||||
@@ -110,7 +111,7 @@ func upload(ctx *cli.Context) {
|
||||
mimeType = detectMimeType(file)
|
||||
}
|
||||
f.ContentType = mimeType
|
||||
return client.Upload(f, "")
|
||||
return client.Upload(f, "", toEncrypt)
|
||||
}
|
||||
}
|
||||
hash, err := doUpload()
|
||||
|
@@ -17,60 +17,259 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUp(t *testing.T) {
|
||||
// start 3 node cluster
|
||||
t.Log("starting 3 node cluster")
|
||||
testCLISwarmUp(false, t)
|
||||
}
|
||||
func TestCLISwarmUpRecursive(t *testing.T) {
|
||||
testCLISwarmUpRecursive(false, t)
|
||||
}
|
||||
|
||||
// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUpEncrypted(t *testing.T) {
|
||||
testCLISwarmUp(true, t)
|
||||
}
|
||||
func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
|
||||
testCLISwarmUpRecursive(true, t)
|
||||
}
|
||||
|
||||
func testCLISwarmUp(toEncrypt bool, t *testing.T) {
|
||||
log.Info("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
assertNil(t, err)
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = io.WriteString(tmp, "data")
|
||||
assertNil(t, err)
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
t.Log("uploading file with 'swarm up'")
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
t.Logf("file uploaded with hash %s", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
t.Logf("getting file from %s", node.Name)
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
assertNil(t, err)
|
||||
assertHTTPResponse(t, res, http.StatusOK, "data")
|
||||
}
|
||||
}
|
||||
|
||||
func assertNil(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
|
||||
// write data to file
|
||||
data := "notsorandomdata"
|
||||
_, err = io.WriteString(tmp, data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
flags := []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"up",
|
||||
tmp.Name()}
|
||||
if toEncrypt {
|
||||
hashRegexp = `[a-f\d]{128}`
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmp.Name()}
|
||||
}
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("file uploaded", "hash", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
log.Info("getting file from node", "node", node.Name)
|
||||
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
reply, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("expected HTTP status 200, got %s", res.Status)
|
||||
}
|
||||
if string(reply) != data {
|
||||
t.Fatalf("expected HTTP body %q, got %q", data, reply)
|
||||
}
|
||||
log.Debug("verifying uploaded file using `swarm down`")
|
||||
//try to get the content with `swarm down`
|
||||
tmpDownload, err := ioutil.TempDir("", "swarm-test")
|
||||
tmpDownload = path.Join(tmpDownload, "tmpfile.tmp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDownload)
|
||||
|
||||
bzzLocator := "bzz:/" + hash
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"down",
|
||||
bzzLocator,
|
||||
tmpDownload,
|
||||
}
|
||||
|
||||
down := runSwarm(t, flags...)
|
||||
down.ExpectExit()
|
||||
|
||||
fi, err := os.Stat(tmpDownload)
|
||||
if err != nil {
|
||||
t.Fatalf("could not stat path: %v", err)
|
||||
}
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsRegular():
|
||||
downloadedBytes, err := ioutil.ReadFile(tmpDownload)
|
||||
if err != nil {
|
||||
t.Fatalf("had an error reading the downloaded file: %v", err)
|
||||
}
|
||||
if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) {
|
||||
t.Fatalf("retrieved data and posted data not equal!")
|
||||
}
|
||||
|
||||
default:
|
||||
t.Fatalf("expected to download regular file, got %s", fi.Mode())
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.Duration(2 * time.Second)
|
||||
httpClient := http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
// try to squeeze a timeout by getting an non-existent hash from each node
|
||||
for _, node := range cluster.Nodes {
|
||||
_, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340")
|
||||
// we're speeding up the timeout here since netstore has a 60 seconds timeout on a request
|
||||
if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// this is disabled since it takes 60s due to netstore timeout
|
||||
// if res.StatusCode != 404 {
|
||||
// t.Fatalf("expected HTTP status 404, got %s", res.Status)
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) {
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != expectedStatus {
|
||||
t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status)
|
||||
func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
||||
fmt.Println("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
assertNil(t, err)
|
||||
if string(data) != expectedBody {
|
||||
t.Fatalf("expected HTTP body %q, got %q", expectedBody, data)
|
||||
defer os.RemoveAll(tmpUploadDir)
|
||||
// create tmp files
|
||||
data := "notsorandomdata"
|
||||
for _, path := range []string{"tmp1", "tmp2"} {
|
||||
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
flags := []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
tmpUploadDir}
|
||||
if toEncrypt {
|
||||
hashRegexp = `[a-f\d]{128}`
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmpUploadDir}
|
||||
}
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("dir uploaded", "hash", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
log.Info("getting file from node", "node", node.Name)
|
||||
//try to get the content with `swarm down`
|
||||
tmpDownload, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDownload)
|
||||
bzzLocator := "bzz:/" + hash
|
||||
flagss := []string{}
|
||||
flagss = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"down",
|
||||
"--recursive",
|
||||
bzzLocator,
|
||||
tmpDownload,
|
||||
}
|
||||
|
||||
fmt.Println("downloading from swarm with recursive")
|
||||
down := runSwarm(t, flagss...)
|
||||
down.ExpectExit()
|
||||
|
||||
files, err := ioutil.ReadDir(tmpDownload)
|
||||
for _, v := range files {
|
||||
fi, err := os.Stat(path.Join(tmpDownload, v.Name()))
|
||||
if err != nil {
|
||||
t.Fatalf("got an error: %v", err)
|
||||
}
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsRegular():
|
||||
if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
|
||||
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
|
||||
} else {
|
||||
ff := make([]byte, len(data))
|
||||
io.ReadFull(file, ff)
|
||||
buf := bytes.NewBufferString(data)
|
||||
|
||||
if !bytes.Equal(ff, buf.Bytes()) {
|
||||
t.Fatalf("retrieved data and posted data not equal!")
|
||||
}
|
||||
}
|
||||
default:
|
||||
t.Fatalf("this shouldnt happen")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not list files at: %v", files)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
@@ -48,6 +49,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/metrics/influxdb"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
@@ -360,10 +362,6 @@ var (
|
||||
Name: "ethstats",
|
||||
Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)",
|
||||
}
|
||||
MetricsEnabledFlag = cli.BoolFlag{
|
||||
Name: metrics.MetricsEnabledFlag,
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
}
|
||||
FakePoWFlag = cli.BoolFlag{
|
||||
Name: "fakepow",
|
||||
Usage: "Disables proof-of-work verification",
|
||||
@@ -532,6 +530,45 @@ var (
|
||||
Usage: "Minimum POW accepted",
|
||||
Value: whisper.DefaultMinimumPoW,
|
||||
}
|
||||
|
||||
// Metrics flags
|
||||
MetricsEnabledFlag = cli.BoolFlag{
|
||||
Name: metrics.MetricsEnabledFlag,
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
}
|
||||
MetricsEnableInfluxDBFlag = cli.BoolFlag{
|
||||
Name: "metrics.influxdb",
|
||||
Usage: "Enable metrics export/push to an external InfluxDB database",
|
||||
}
|
||||
MetricsInfluxDBEndpointFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.endpoint",
|
||||
Usage: "InfluxDB API endpoint to report metrics to",
|
||||
Value: "http://localhost:8086",
|
||||
}
|
||||
MetricsInfluxDBDatabaseFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.database",
|
||||
Usage: "InfluxDB database name to push reported metrics to",
|
||||
Value: "geth",
|
||||
}
|
||||
MetricsInfluxDBUsernameFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.username",
|
||||
Usage: "Username to authorize access to the database",
|
||||
Value: "test",
|
||||
}
|
||||
MetricsInfluxDBPasswordFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.password",
|
||||
Usage: "Password to authorize access to the database",
|
||||
Value: "test",
|
||||
}
|
||||
// The `host` tag is part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
|
||||
// It is used so that we can group all nodes and average a measurement across all of them, but also so
|
||||
// that we can select a specific node and inspect its measurements.
|
||||
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
|
||||
MetricsInfluxDBHostTagFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.host.tag",
|
||||
Usage: "InfluxDB `host` tag attached to all measurements",
|
||||
Value: "localhost",
|
||||
}
|
||||
)
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
@@ -1084,6 +1121,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 1337
|
||||
}
|
||||
// Create new developer account or reuse existing one
|
||||
var (
|
||||
developer accounts.Account
|
||||
@@ -1181,6 +1221,27 @@ func SetupNetwork(ctx *cli.Context) {
|
||||
params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name)
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx *cli.Context) {
|
||||
if metrics.Enabled {
|
||||
log.Info("Enabling metrics collection")
|
||||
var (
|
||||
enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name)
|
||||
endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
|
||||
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
|
||||
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
|
||||
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
|
||||
hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name)
|
||||
)
|
||||
|
||||
if enableExport {
|
||||
log.Info("Enabling metrics export to InfluxDB")
|
||||
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", map[string]string{
|
||||
"host": hosttag,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
var (
|
||||
|
@@ -140,8 +140,8 @@ func processArgs() {
|
||||
}
|
||||
|
||||
if *asymmetricMode && len(*argPub) > 0 {
|
||||
pub = crypto.ToECDSAPub(common.FromHex(*argPub))
|
||||
if !isKeyValid(pub) {
|
||||
var err error
|
||||
if pub, err = crypto.UnmarshalPubkey(common.FromHex(*argPub)); err != nil {
|
||||
utils.Fatalf("invalid public key")
|
||||
}
|
||||
}
|
||||
@@ -321,10 +321,6 @@ func startServer() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func isKeyValid(k *ecdsa.PublicKey) bool {
|
||||
return k.X != nil && k.Y != nil
|
||||
}
|
||||
|
||||
func configureNode() {
|
||||
var err error
|
||||
var p2pAccept bool
|
||||
@@ -340,9 +336,8 @@ func configureNode() {
|
||||
if b == nil {
|
||||
utils.Fatalf("Error: can not convert hexadecimal string")
|
||||
}
|
||||
pub = crypto.ToECDSAPub(b)
|
||||
if !isKeyValid(pub) {
|
||||
utils.Fatalf("Error: invalid public key")
|
||||
if pub, err = crypto.UnmarshalPubkey(b); err != nil {
|
||||
utils.Fatalf("Error: invalid peer public key")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -19,15 +19,20 @@ package common
|
||||
|
||||
import "encoding/hex"
|
||||
|
||||
// ToHex returns the hex representation of b, prefixed with '0x'.
|
||||
// For empty slices, the return value is "0x0".
|
||||
//
|
||||
// Deprecated: use hexutil.Encode instead.
|
||||
func ToHex(b []byte) string {
|
||||
hex := Bytes2Hex(b)
|
||||
// Prefer output of "0x0" instead of "0x"
|
||||
if len(hex) == 0 {
|
||||
hex = "0"
|
||||
}
|
||||
return "0x" + hex
|
||||
}
|
||||
|
||||
// FromHex returns the bytes represented by the hexadecimal string s.
|
||||
// s may be prefixed with "0x".
|
||||
func FromHex(s string) []byte {
|
||||
if len(s) > 1 {
|
||||
if s[0:2] == "0x" || s[0:2] == "0X" {
|
||||
@@ -40,9 +45,7 @@ func FromHex(s string) []byte {
|
||||
return Hex2Bytes(s)
|
||||
}
|
||||
|
||||
// Copy bytes
|
||||
//
|
||||
// Returns an exact copy of the provided bytes
|
||||
// CopyBytes returns an exact copy of the provided bytes.
|
||||
func CopyBytes(b []byte) (copiedBytes []byte) {
|
||||
if b == nil {
|
||||
return nil
|
||||
@@ -53,14 +56,17 @@ func CopyBytes(b []byte) (copiedBytes []byte) {
|
||||
return
|
||||
}
|
||||
|
||||
// hasHexPrefix validates str begins with '0x' or '0X'.
|
||||
func hasHexPrefix(str string) bool {
|
||||
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
|
||||
}
|
||||
|
||||
// isHexCharacter returns bool of c being a valid hexadecimal.
|
||||
func isHexCharacter(c byte) bool {
|
||||
return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
|
||||
}
|
||||
|
||||
// isHex validates whether each byte is valid hexadecimal string.
|
||||
func isHex(str string) bool {
|
||||
if len(str)%2 != 0 {
|
||||
return false
|
||||
@@ -73,16 +79,18 @@ func isHex(str string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Bytes2Hex returns the hexadecimal encoding of d.
|
||||
func Bytes2Hex(d []byte) string {
|
||||
return hex.EncodeToString(d)
|
||||
}
|
||||
|
||||
// Hex2Bytes returns the bytes represented by the hexadecimal string str.
|
||||
func Hex2Bytes(str string) []byte {
|
||||
h, _ := hex.DecodeString(str)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Hex2BytesFixed returns bytes of a specified fixed length flen.
|
||||
func Hex2BytesFixed(str string, flen int) []byte {
|
||||
h, _ := hex.DecodeString(str)
|
||||
if len(h) == flen {
|
||||
@@ -96,6 +104,7 @@ func Hex2BytesFixed(str string, flen int) []byte {
|
||||
return hh
|
||||
}
|
||||
|
||||
// RightPadBytes zero-pads slice to the right up to length l.
|
||||
func RightPadBytes(slice []byte, l int) []byte {
|
||||
if l <= len(slice) {
|
||||
return slice
|
||||
@@ -107,6 +116,7 @@ func RightPadBytes(slice []byte, l int) []byte {
|
||||
return padded
|
||||
}
|
||||
|
||||
// LeftPadBytes zero-pads slice to the left up to length l.
|
||||
func LeftPadBytes(slice []byte, l int) []byte {
|
||||
if l <= len(slice) {
|
||||
return slice
|
||||
|
@@ -31,11 +31,17 @@ import (
|
||||
|
||||
var versionRegexp = regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`)
|
||||
|
||||
// Contract contains information about a compiled contract, alongside its code.
|
||||
type Contract struct {
|
||||
Code string `json:"code"`
|
||||
Info ContractInfo `json:"info"`
|
||||
}
|
||||
|
||||
// ContractInfo contains information about a compiled contract, including access
|
||||
// to the ABI definition, user and developer docs, and metadata.
|
||||
//
|
||||
// Depending on the source, language version, compiler version, and compiler
|
||||
// options will provide information about how the contract was compiled.
|
||||
type ContractInfo struct {
|
||||
Source string `json:"source"`
|
||||
Language string `json:"language"`
|
||||
@@ -142,8 +148,22 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("solc: %v\n%s", err, stderr.Bytes())
|
||||
}
|
||||
|
||||
return ParseCombinedJSON(stdout.Bytes(), source, s.Version, s.Version, strings.Join(s.makeArgs(), " "))
|
||||
}
|
||||
|
||||
// ParseCombinedJSON takes the direct output of a solc --combined-output run and
|
||||
// parses it into a map of string contract name to Contract structs. The
|
||||
// provided source, language and compiler version, and compiler options are all
|
||||
// passed through into the Contract structs.
|
||||
//
|
||||
// The solc output is expected to contain ABI, user docs, and dev docs.
|
||||
//
|
||||
// Returns an error if the JSON is malformed or missing data, or if the JSON
|
||||
// embedded within the JSON is malformed.
|
||||
func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion string, compilerVersion string, compilerOptions string) (map[string]*Contract, error) {
|
||||
var output solcOutput
|
||||
if err := json.Unmarshal(stdout.Bytes(), &output); err != nil {
|
||||
if err := json.Unmarshal(combinedJSON, &output); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -168,9 +188,9 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro
|
||||
Info: ContractInfo{
|
||||
Source: source,
|
||||
Language: "Solidity",
|
||||
LanguageVersion: s.Version,
|
||||
CompilerVersion: s.Version,
|
||||
CompilerOptions: strings.Join(s.makeArgs(), " "),
|
||||
LanguageVersion: languageVersion,
|
||||
CompilerVersion: compilerVersion,
|
||||
CompilerOptions: compilerOptions,
|
||||
AbiDefinition: abi,
|
||||
UserDoc: userdoc,
|
||||
DeveloperDoc: devdoc,
|
||||
|
@@ -39,6 +39,7 @@ import (
|
||||
|
||||
const uintBits = 32 << (uint64(^uint(0)) >> 63)
|
||||
|
||||
// Errors
|
||||
var (
|
||||
ErrEmptyString = &decError{"empty hex string"}
|
||||
ErrSyntax = &decError{"invalid hex string"}
|
||||
|
@@ -22,12 +22,13 @@ import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Various big integer limit values.
|
||||
var (
|
||||
tt255 = BigPow(2, 255)
|
||||
tt256 = BigPow(2, 256)
|
||||
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
tt63 = BigPow(2, 63)
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
|
||||
)
|
||||
|
||||
@@ -78,7 +79,7 @@ func ParseBig256(s string) (*big.Int, bool) {
|
||||
return bigint, ok
|
||||
}
|
||||
|
||||
// MustParseBig parses s as a 256 bit big integer and panics if the string is invalid.
|
||||
// MustParseBig256 parses s as a 256 bit big integer and panics if the string is invalid.
|
||||
func MustParseBig256(s string) *big.Int {
|
||||
v, ok := ParseBig256(s)
|
||||
if !ok {
|
||||
@@ -186,9 +187,8 @@ func U256(x *big.Int) *big.Int {
|
||||
func S256(x *big.Int) *big.Int {
|
||||
if x.Cmp(tt255) < 0 {
|
||||
return x
|
||||
} else {
|
||||
return new(big.Int).Sub(x, tt256)
|
||||
}
|
||||
return new(big.Int).Sub(x, tt256)
|
||||
}
|
||||
|
||||
// Exp implements exponentiation by squaring.
|
||||
|
@@ -21,8 +21,8 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Integer limit values.
|
||||
const (
|
||||
// Integer limit values.
|
||||
MaxInt8 = 1<<7 - 1
|
||||
MinInt8 = -1 << 7
|
||||
MaxInt16 = 1<<15 - 1
|
||||
|
@@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// package mclock is a wrapper for a monotonic clock source
|
||||
// Package mclock is a wrapper for a monotonic clock source
|
||||
package mclock
|
||||
|
||||
import (
|
||||
@@ -23,8 +23,10 @@ import (
|
||||
"github.com/aristanetworks/goarista/monotime"
|
||||
)
|
||||
|
||||
type AbsTime time.Duration // absolute monotonic time
|
||||
// AbsTime represents absolute monotonic time.
|
||||
type AbsTime time.Duration
|
||||
|
||||
// Now returns the current absolute monotonic time.
|
||||
func Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
@@ -1,197 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package number
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var tt256 = new(big.Int).Lsh(big.NewInt(1), 256)
|
||||
var tt256m1 = new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(1))
|
||||
var tt255 = new(big.Int).Lsh(big.NewInt(1), 255)
|
||||
|
||||
func limitUnsigned256(x *Number) *Number {
|
||||
x.num.And(x.num, tt256m1)
|
||||
return x
|
||||
}
|
||||
|
||||
func limitSigned256(x *Number) *Number {
|
||||
if x.num.Cmp(tt255) < 0 {
|
||||
return x
|
||||
} else {
|
||||
x.num.Sub(x.num, tt256)
|
||||
return x
|
||||
}
|
||||
}
|
||||
|
||||
// Number function
|
||||
type Initialiser func(n int64) *Number
|
||||
|
||||
// A Number represents a generic integer with a bounding function limiter. Limit is called after each operations
|
||||
// to give "fake" bounded integers. New types of Number can be created through NewInitialiser returning a lambda
|
||||
// with the new Initialiser.
|
||||
type Number struct {
|
||||
num *big.Int
|
||||
limit func(n *Number) *Number
|
||||
}
|
||||
|
||||
// Returns a new initialiser for a new *Number without having to expose certain fields
|
||||
func NewInitialiser(limiter func(*Number) *Number) Initialiser {
|
||||
return func(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limiter}
|
||||
}
|
||||
}
|
||||
|
||||
// Return a Number with a UNSIGNED limiter up to 256 bits
|
||||
func Uint256(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limitUnsigned256}
|
||||
}
|
||||
|
||||
// Return a Number with a SIGNED limiter up to 256 bits
|
||||
func Int256(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limitSigned256}
|
||||
}
|
||||
|
||||
// Returns a Number with a SIGNED unlimited size
|
||||
func Big(n int64) *Number {
|
||||
return &Number{big.NewInt(n), func(x *Number) *Number { return x }}
|
||||
}
|
||||
|
||||
// Sets i to sum of x+y
|
||||
func (i *Number) Add(x, y *Number) *Number {
|
||||
i.num.Add(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to difference of x-y
|
||||
func (i *Number) Sub(x, y *Number) *Number {
|
||||
i.num.Sub(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to product of x*y
|
||||
func (i *Number) Mul(x, y *Number) *Number {
|
||||
i.num.Mul(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to the quotient prodject of x/y
|
||||
func (i *Number) Div(x, y *Number) *Number {
|
||||
i.num.Div(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to x % y
|
||||
func (i *Number) Mod(x, y *Number) *Number {
|
||||
i.num.Mod(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to x << s
|
||||
func (i *Number) Lsh(x *Number, s uint) *Number {
|
||||
i.num.Lsh(x.num, s)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sets i to x^y
|
||||
func (i *Number) Pow(x, y *Number) *Number {
|
||||
i.num.Exp(x.num, y.num, big.NewInt(0))
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Setters
|
||||
|
||||
// Set x to i
|
||||
func (i *Number) Set(x *Number) *Number {
|
||||
i.num.Set(x.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Set x bytes to i
|
||||
func (i *Number) SetBytes(x []byte) *Number {
|
||||
i.num.SetBytes(x)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Cmp compares x and y and returns:
|
||||
//
|
||||
// -1 if x < y
|
||||
// 0 if x == y
|
||||
// +1 if x > y
|
||||
func (i *Number) Cmp(x *Number) int {
|
||||
return i.num.Cmp(x.num)
|
||||
}
|
||||
|
||||
// Getters
|
||||
|
||||
// Returns the string representation of i
|
||||
func (i *Number) String() string {
|
||||
return i.num.String()
|
||||
}
|
||||
|
||||
// Returns the byte representation of i
|
||||
func (i *Number) Bytes() []byte {
|
||||
return i.num.Bytes()
|
||||
}
|
||||
|
||||
// Uint64 returns the Uint64 representation of x. If x cannot be represented in an int64, the result is undefined.
|
||||
func (i *Number) Uint64() uint64 {
|
||||
return i.num.Uint64()
|
||||
}
|
||||
|
||||
// Int64 returns the int64 representation of x. If x cannot be represented in an int64, the result is undefined.
|
||||
func (i *Number) Int64() int64 {
|
||||
return i.num.Int64()
|
||||
}
|
||||
|
||||
// Returns the signed version of i
|
||||
func (i *Number) Int256() *Number {
|
||||
return Int(0).Set(i)
|
||||
}
|
||||
|
||||
// Returns the unsigned version of i
|
||||
func (i *Number) Uint256() *Number {
|
||||
return Uint(0).Set(i)
|
||||
}
|
||||
|
||||
// Returns the index of the first bit that's set to 1
|
||||
func (i *Number) FirstBitSet() int {
|
||||
for j := 0; j < i.num.BitLen(); j++ {
|
||||
if i.num.Bit(j) > 0 {
|
||||
return j
|
||||
}
|
||||
}
|
||||
|
||||
return i.num.BitLen()
|
||||
}
|
||||
|
||||
// Variables
|
||||
|
||||
var (
|
||||
Zero = Uint(0)
|
||||
One = Uint(1)
|
||||
Two = Uint(2)
|
||||
MaxUint256 = Uint(0).SetBytes(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
|
||||
|
||||
MinOne = Int(-1)
|
||||
|
||||
// "typedefs"
|
||||
Uint = Uint256
|
||||
Int = Int256
|
||||
)
|
@@ -1,108 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package number
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
a := Uint(0)
|
||||
b := Uint(10)
|
||||
a.Set(b)
|
||||
if a.num.Cmp(b.num) != 0 {
|
||||
t.Error("didn't compare", a, b)
|
||||
}
|
||||
|
||||
c := Uint(0).SetBytes(common.Hex2Bytes("0a"))
|
||||
if c.num.Cmp(big.NewInt(10)) != 0 {
|
||||
t.Error("c set bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialiser(t *testing.T) {
|
||||
check := false
|
||||
init := NewInitialiser(func(x *Number) *Number {
|
||||
check = true
|
||||
return x
|
||||
})
|
||||
a := init(0).Add(init(1), init(2))
|
||||
if a.Cmp(init(3)) != 0 {
|
||||
t.Error("expected 3. got", a)
|
||||
}
|
||||
if !check {
|
||||
t.Error("expected limiter to be called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
a := Uint(10)
|
||||
if a.Uint64() != 10 {
|
||||
t.Error("expected to get 10. got", a.Uint64())
|
||||
}
|
||||
|
||||
a = Uint(10)
|
||||
if a.Int64() != 10 {
|
||||
t.Error("expected to get 10. got", a.Int64())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmp(t *testing.T) {
|
||||
a := Uint(10)
|
||||
b := Uint(10)
|
||||
c := Uint(11)
|
||||
|
||||
if a.Cmp(b) != 0 {
|
||||
t.Error("a b == 0 failed", a, b)
|
||||
}
|
||||
|
||||
if a.Cmp(c) >= 0 {
|
||||
t.Error("a c < 0 failed", a, c)
|
||||
}
|
||||
|
||||
if c.Cmp(b) <= 0 {
|
||||
t.Error("c b > 0 failed", c, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxArith(t *testing.T) {
|
||||
a := Uint(0).Add(MaxUint256, One)
|
||||
if a.Cmp(Zero) != 0 {
|
||||
t.Error("expected max256 + 1 = 0 got", a)
|
||||
}
|
||||
|
||||
a = Uint(0).Sub(Uint(0), One)
|
||||
if a.Cmp(MaxUint256) != 0 {
|
||||
t.Error("expected 0 - 1 = max256 got", a)
|
||||
}
|
||||
|
||||
a = Int(0).Sub(Int(0), One)
|
||||
if a.Cmp(MinOne) != 0 {
|
||||
t.Error("expected 0 - 1 = -1 got", a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConversion(t *testing.T) {
|
||||
a := Int(-1)
|
||||
b := a.Uint256()
|
||||
if b.Cmp(MaxUint256) != 0 {
|
||||
t.Error("expected -1 => unsigned to return max. got", b)
|
||||
}
|
||||
}
|
@@ -30,6 +30,7 @@ func MakeName(name, version string) string {
|
||||
return fmt.Sprintf("%s/v%s/%s/%s", name, version, runtime.GOOS, runtime.Version())
|
||||
}
|
||||
|
||||
// FileExist checks if a file exists at filePath.
|
||||
func FileExist(filePath string) bool {
|
||||
_, err := os.Stat(filePath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
@@ -39,9 +40,10 @@ func FileExist(filePath string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func AbsolutePath(Datadir string, filename string) string {
|
||||
// AbsolutePath returns datadir + filename, or filename if it is absolute.
|
||||
func AbsolutePath(datadir string, filename string) string {
|
||||
if filepath.IsAbs(filename) {
|
||||
return filename
|
||||
}
|
||||
return filepath.Join(Datadir, filename)
|
||||
return filepath.Join(datadir, filename)
|
||||
}
|
||||
|
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
// Lengths of hashes and addresses in bytes.
|
||||
const (
|
||||
HashLength = 32
|
||||
AddressLength = 20
|
||||
@@ -42,19 +43,30 @@ var (
|
||||
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
|
||||
type Hash [HashLength]byte
|
||||
|
||||
// BytesToHash sets b to hash.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func BytesToHash(b []byte) Hash {
|
||||
var h Hash
|
||||
h.SetBytes(b)
|
||||
return h
|
||||
}
|
||||
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
|
||||
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
|
||||
|
||||
// Get the string representation of the underlying hash
|
||||
func (h Hash) Str() string { return string(h[:]) }
|
||||
// BigToHash sets byte representation of b to hash.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
|
||||
|
||||
// HexToHash sets byte representation of s to hash.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
|
||||
|
||||
// Bytes gets the byte representation of the underlying hash.
|
||||
func (h Hash) Bytes() []byte { return h[:] }
|
||||
|
||||
// Big converts a hash to a big integer.
|
||||
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
|
||||
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
|
||||
|
||||
// Hex converts a hash to a hex string.
|
||||
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
|
||||
|
||||
// TerminalString implements log.TerminalStringer, formatting a string for console
|
||||
// output during logging.
|
||||
@@ -89,7 +101,8 @@ func (h Hash) MarshalText() ([]byte, error) {
|
||||
return hexutil.Bytes(h[:]).MarshalText()
|
||||
}
|
||||
|
||||
// Sets the hash to the value of b. If b is larger than len(h), 'b' will be cropped (from the left).
|
||||
// SetBytes sets the hash to the value of b.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func (h *Hash) SetBytes(b []byte) {
|
||||
if len(b) > len(h) {
|
||||
b = b[len(b)-HashLength:]
|
||||
@@ -98,16 +111,6 @@ func (h *Hash) SetBytes(b []byte) {
|
||||
copy(h[HashLength-len(b):], b)
|
||||
}
|
||||
|
||||
// Set string `s` to h. If s is larger than len(h) s will be cropped (from left) to fit.
|
||||
func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) }
|
||||
|
||||
// Sets h to other
|
||||
func (h *Hash) Set(other Hash) {
|
||||
for i, v := range other {
|
||||
h[i] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Generate implements testing/quick.Generator.
|
||||
func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := rand.Intn(len(h))
|
||||
@@ -117,10 +120,6 @@ func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
return reflect.ValueOf(h)
|
||||
}
|
||||
|
||||
func EmptyHash(h Hash) bool {
|
||||
return h == Hash{}
|
||||
}
|
||||
|
||||
// UnprefixedHash allows marshaling a Hash without 0x prefix.
|
||||
type UnprefixedHash Hash
|
||||
|
||||
@@ -139,13 +138,21 @@ func (h UnprefixedHash) MarshalText() ([]byte, error) {
|
||||
// Address represents the 20 byte address of an Ethereum account.
|
||||
type Address [AddressLength]byte
|
||||
|
||||
// BytesToAddress returns Address with value b.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func BytesToAddress(b []byte) Address {
|
||||
var a Address
|
||||
a.SetBytes(b)
|
||||
return a
|
||||
}
|
||||
|
||||
// BigToAddress returns Address with byte values of b.
|
||||
// If b is larger than len(h), b will be cropped from the left.
|
||||
func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
|
||||
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }
|
||||
|
||||
// HexToAddress returns Address with byte values of s.
|
||||
// If s is larger than len(h), s will be cropped from the left.
|
||||
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }
|
||||
|
||||
// IsHexAddress verifies whether a string can represent a valid hex-encoded
|
||||
// Ethereum address or not.
|
||||
@@ -156,11 +163,14 @@ func IsHexAddress(s string) bool {
|
||||
return len(s) == 2*AddressLength && isHex(s)
|
||||
}
|
||||
|
||||
// Get the string representation of the underlying address
|
||||
func (a Address) Str() string { return string(a[:]) }
|
||||
// Bytes gets the string representation of the underlying address.
|
||||
func (a Address) Bytes() []byte { return a[:] }
|
||||
|
||||
// Big converts an address to a big integer.
|
||||
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
|
||||
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
|
||||
|
||||
// Hash converts an address to a hash by left-padding it with zeros.
|
||||
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
|
||||
|
||||
// Hex returns an EIP55-compliant hex string representation of the address.
|
||||
func (a Address) Hex() string {
|
||||
@@ -184,7 +194,7 @@ func (a Address) Hex() string {
|
||||
return "0x" + string(result)
|
||||
}
|
||||
|
||||
// String implements the stringer interface and is used also by the logger.
|
||||
// String implements fmt.Stringer.
|
||||
func (a Address) String() string {
|
||||
return a.Hex()
|
||||
}
|
||||
@@ -195,7 +205,8 @@ func (a Address) Format(s fmt.State, c rune) {
|
||||
fmt.Fprintf(s, "%"+string(c), a[:])
|
||||
}
|
||||
|
||||
// Sets the address to the value of b. If b is larger than len(a) it will panic
|
||||
// SetBytes sets the address to the value of b.
|
||||
// If b is larger than len(a) it will panic.
|
||||
func (a *Address) SetBytes(b []byte) {
|
||||
if len(b) > len(a) {
|
||||
b = b[len(b)-AddressLength:]
|
||||
@@ -203,16 +214,6 @@ func (a *Address) SetBytes(b []byte) {
|
||||
copy(a[AddressLength-len(b):], b)
|
||||
}
|
||||
|
||||
// Set string `s` to a. If s is larger than len(a) it will panic
|
||||
func (a *Address) SetString(s string) { a.SetBytes([]byte(s)) }
|
||||
|
||||
// Sets a to other
|
||||
func (a *Address) Set(other Address) {
|
||||
for i, v := range other {
|
||||
a[i] = v
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText returns the hex representation of a.
|
||||
func (a Address) MarshalText() ([]byte, error) {
|
||||
return hexutil.Bytes(a[:]).MarshalText()
|
||||
@@ -228,7 +229,7 @@ func (a *Address) UnmarshalJSON(input []byte) error {
|
||||
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
|
||||
}
|
||||
|
||||
// UnprefixedHash allows marshaling an Address without 0x prefix.
|
||||
// UnprefixedAddress allows marshaling an Address without 0x prefix.
|
||||
type UnprefixedAddress Address
|
||||
|
||||
// UnmarshalText decodes the address from hex. The 0x prefix is optional.
|
||||
|
@@ -1,64 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build none
|
||||
//sed -e 's/_N_/Hash/g' -e 's/_S_/32/g' -e '1d' types_template.go | gofmt -w hash.go
|
||||
|
||||
package common
|
||||
|
||||
import "math/big"
|
||||
|
||||
type _N_ [_S_]byte
|
||||
|
||||
func BytesTo_N_(b []byte) _N_ {
|
||||
var h _N_
|
||||
h.SetBytes(b)
|
||||
return h
|
||||
}
|
||||
func StringTo_N_(s string) _N_ { return BytesTo_N_([]byte(s)) }
|
||||
func BigTo_N_(b *big.Int) _N_ { return BytesTo_N_(b.Bytes()) }
|
||||
func HexTo_N_(s string) _N_ { return BytesTo_N_(FromHex(s)) }
|
||||
|
||||
// Don't use the default 'String' method in case we want to overwrite
|
||||
|
||||
// Get the string representation of the underlying hash
|
||||
func (h _N_) Str() string { return string(h[:]) }
|
||||
func (h _N_) Bytes() []byte { return h[:] }
|
||||
func (h _N_) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
|
||||
func (h _N_) Hex() string { return "0x" + Bytes2Hex(h[:]) }
|
||||
|
||||
// Sets the hash to the value of b. If b is larger than len(h) it will panic
|
||||
func (h *_N_) SetBytes(b []byte) {
|
||||
// Use the right most bytes
|
||||
if len(b) > len(h) {
|
||||
b = b[len(b)-_S_:]
|
||||
}
|
||||
|
||||
// Reverse the loop
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
h[_S_-len(b)+i] = b[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Set string `s` to h. If s is larger than len(h) it will panic
|
||||
func (h *_N_) SetString(s string) { h.SetBytes([]byte(s)) }
|
||||
|
||||
// Sets h to other
|
||||
func (h *_N_) Set(other _N_) {
|
||||
for i, v := range other {
|
||||
h[i] = v
|
||||
}
|
||||
}
|
@@ -383,7 +383,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
|
||||
// If an on-disk checkpoint snapshot can be found, use that
|
||||
if number%checkpointInterval == 0 {
|
||||
if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
|
||||
log.Trace("Loaded voting snapshot form disk", "number", number, "hash", hash)
|
||||
log.Trace("Loaded voting snapshot from disk", "number", number, "hash", hash)
|
||||
snap = s
|
||||
break
|
||||
}
|
||||
|
@@ -94,14 +94,25 @@ func calcDatasetSize(epoch int) uint64 {
|
||||
// reused between hash runs instead of requiring new ones to be created.
|
||||
type hasher func(dest []byte, data []byte)
|
||||
|
||||
// makeHasher creates a repetitive hasher, allowing the same hash data structures
|
||||
// to be reused between hash runs instead of requiring new ones to be created.
|
||||
// The returned function is not thread safe!
|
||||
// makeHasher creates a repetitive hasher, allowing the same hash data structures to
|
||||
// be reused between hash runs instead of requiring new ones to be created. The returned
|
||||
// function is not thread safe!
|
||||
func makeHasher(h hash.Hash) hasher {
|
||||
// sha3.state supports Read to get the sum, use it to avoid the overhead of Sum.
|
||||
// Read alters the state but we reset the hash before every operation.
|
||||
type readerHash interface {
|
||||
hash.Hash
|
||||
Read([]byte) (int, error)
|
||||
}
|
||||
rh, ok := h.(readerHash)
|
||||
if !ok {
|
||||
panic("can't find Read method on hash")
|
||||
}
|
||||
outputLen := rh.Size()
|
||||
return func(dest []byte, data []byte) {
|
||||
h.Write(data)
|
||||
h.Sum(dest[:0])
|
||||
h.Reset()
|
||||
rh.Reset()
|
||||
rh.Write(data)
|
||||
rh.Read(dest[:outputLen])
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -389,7 +389,7 @@ type Config struct {
|
||||
PowMode Mode
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proot-of-work implementing the ethash
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
// algorithm.
|
||||
type Ethash struct {
|
||||
config Config
|
||||
|
@@ -271,7 +271,7 @@ func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
|
||||
}
|
||||
|
||||
type jsonrpcCall struct {
|
||||
Id int64
|
||||
ID int64
|
||||
Method string
|
||||
Params []interface{}
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
resps, _ := call.Otto.Object("new Array()")
|
||||
for _, req := range reqs {
|
||||
resp, _ := call.Otto.Object(`({"jsonrpc":"2.0"})`)
|
||||
resp.Set("id", req.Id)
|
||||
resp.Set("id", req.ID)
|
||||
var result json.RawMessage
|
||||
err = b.client.Call(&result, req.Method, req.Params...)
|
||||
switch err := err.(type) {
|
||||
|
@@ -60,7 +60,7 @@ type Config struct {
|
||||
Preload []string // Absolute paths to JavaScript files to preload
|
||||
}
|
||||
|
||||
// Console is a JavaScript interpreted runtime environment. It is a fully fleged
|
||||
// Console is a JavaScript interpreted runtime environment. It is a fully fledged
|
||||
// JavaScript console attached to a running node via an external or in-process RPC
|
||||
// client.
|
||||
type Console struct {
|
||||
@@ -73,6 +73,8 @@ type Console struct {
|
||||
printer io.Writer // Output writer to serialize any display strings to
|
||||
}
|
||||
|
||||
// New initializes a JavaScript interpreted runtime environment and sets defaults
|
||||
// with the config struct.
|
||||
func New(config Config) (*Console, error) {
|
||||
// Handle unset config values gracefully
|
||||
if config.Prompter == nil {
|
||||
|
@@ -95,7 +95,7 @@ func ensParentNode(name string) (common.Hash, common.Hash) {
|
||||
}
|
||||
}
|
||||
|
||||
func ensNode(name string) common.Hash {
|
||||
func EnsNode(name string) common.Hash {
|
||||
parentNode, parentLabel := ensParentNode(name)
|
||||
return crypto.Keccak256Hash(parentNode[:], parentLabel[:])
|
||||
}
|
||||
@@ -136,7 +136,7 @@ func (self *ENS) getRegistrar(node [32]byte) (*contract.FIFSRegistrarSession, er
|
||||
|
||||
// Resolve is a non-transactional call that returns the content hash associated with a name.
|
||||
func (self *ENS) Resolve(name string) (common.Hash, error) {
|
||||
node := ensNode(name)
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
if err != nil {
|
||||
@@ -165,7 +165,7 @@ func (self *ENS) Register(name string) (*types.Transaction, error) {
|
||||
// SetContentHash sets the content hash associated with a name. Only works if the caller
|
||||
// owns the name, and the associated resolver implements a `setContent` function.
|
||||
func (self *ENS) SetContentHash(name string, hash common.Hash) (*types.Transaction, error) {
|
||||
node := ensNode(name)
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
if err != nil {
|
||||
|
@@ -55,7 +55,7 @@ func TestENS(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("can't deploy resolver: %v", err)
|
||||
}
|
||||
if _, err := ens.SetResolver(ensNode(name), resolverAddr); err != nil {
|
||||
if _, err := ens.SetResolver(EnsNode(name), resolverAddr); err != nil {
|
||||
t.Fatalf("can't set resolver: %v", err)
|
||||
}
|
||||
contractBackend.Commit()
|
||||
|
@@ -51,7 +51,7 @@ func NewCompiler(debug bool) *Compiler {
|
||||
// the compiler.
|
||||
//
|
||||
// feed is the first pass in the compile stage as it
|
||||
// collect the used labels in the program and keeps a
|
||||
// collects the used labels in the program and keeps a
|
||||
// program counter which is used to determine the locations
|
||||
// of the jump dests. The labels can than be used in the
|
||||
// second stage to push labels and determine the right
|
||||
@@ -120,7 +120,7 @@ func (c *Compiler) next() token {
|
||||
return token
|
||||
}
|
||||
|
||||
// compile line compiles a single line instruction e.g.
|
||||
// compileLine compiles a single line instruction e.g.
|
||||
// "push 1", "jump @label".
|
||||
func (c *Compiler) compileLine() error {
|
||||
n := c.next()
|
||||
|
@@ -242,7 +242,7 @@ func lexLabel(l *lexer) stateFn {
|
||||
}
|
||||
|
||||
// lexInsideString lexes the inside of a string until
|
||||
// until the state function finds the closing quote.
|
||||
// the state function finds the closing quote.
|
||||
// It returns the lex text state function.
|
||||
func lexInsideString(l *lexer) stateFn {
|
||||
if l.acceptRunUntil('"') {
|
||||
|
@@ -269,8 +269,8 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
// Rewind the header chain, deleting all block bodies until then
|
||||
delFn := func(hash common.Hash, num uint64) {
|
||||
rawdb.DeleteBody(bc.db, hash, num)
|
||||
delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) {
|
||||
rawdb.DeleteBody(db, hash, num)
|
||||
}
|
||||
bc.hc.SetHead(head, delFn)
|
||||
currentHeader := bc.hc.CurrentHeader()
|
||||
@@ -672,9 +672,9 @@ func (bc *BlockChain) Stop() {
|
||||
}
|
||||
}
|
||||
for !bc.triegc.Empty() {
|
||||
triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{})
|
||||
triedb.Dereference(bc.triegc.PopItem().(common.Hash))
|
||||
}
|
||||
if size := triedb.Size(); size != 0 {
|
||||
if size, _ := triedb.Size(); size != 0 {
|
||||
log.Error("Dangling trie nodes after full cleanup")
|
||||
}
|
||||
}
|
||||
@@ -916,33 +916,29 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
bc.triegc.Push(root, -float32(block.NumberU64()))
|
||||
|
||||
if current := block.NumberU64(); current > triesInMemory {
|
||||
// If we exceeded our memory allowance, flush matured singleton nodes to disk
|
||||
var (
|
||||
nodes, imgs = triedb.Size()
|
||||
limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
|
||||
)
|
||||
if nodes > limit || imgs > 4*1024*1024 {
|
||||
triedb.Cap(limit - ethdb.IdealBatchSize)
|
||||
}
|
||||
// Find the next state trie we need to commit
|
||||
header := bc.GetHeaderByNumber(current - triesInMemory)
|
||||
chosen := header.Number.Uint64()
|
||||
|
||||
// Only write to disk if we exceeded our memory allowance *and* also have at
|
||||
// least a given number of tries gapped.
|
||||
var (
|
||||
size = triedb.Size()
|
||||
limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
|
||||
)
|
||||
if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit {
|
||||
// If we exceeded out time allowance, flush an entire trie to disk
|
||||
if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
|
||||
// If we're exceeding limits but haven't reached a large enough memory gap,
|
||||
// warn the user that the system is becoming unstable.
|
||||
if chosen < lastWrite+triesInMemory {
|
||||
switch {
|
||||
case size >= 2*limit:
|
||||
log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit:
|
||||
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
}
|
||||
}
|
||||
// If optimum or critical limits reached, write to disk
|
||||
if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
|
||||
triedb.Commit(header.Root, true)
|
||||
lastWrite = chosen
|
||||
bc.gcproc = 0
|
||||
if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
|
||||
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
}
|
||||
// Flush an entire trie and restart the counters
|
||||
triedb.Commit(header.Root, true)
|
||||
lastWrite = chosen
|
||||
bc.gcproc = 0
|
||||
}
|
||||
// Garbage collect anything below our required write retention
|
||||
for !bc.triegc.Empty() {
|
||||
@@ -951,7 +947,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
bc.triegc.Push(root, number)
|
||||
break
|
||||
}
|
||||
triedb.Dereference(root.(common.Hash), common.Hash{})
|
||||
triedb.Dereference(root.(common.Hash))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1009,6 +1005,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
// only reason this method exists as a separate one is to make locking cleaner
|
||||
// with deferred statements.
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
|
||||
// Sanity check that we have something meaningful to import
|
||||
if len(chain) == 0 {
|
||||
return 0, nil, nil, nil
|
||||
}
|
||||
// Do a sanity check that the provided chain is actually ordered and linked
|
||||
for i := 1; i < len(chain); i++ {
|
||||
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
|
||||
@@ -1047,6 +1047,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
|
||||
defer close(abort)
|
||||
|
||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
|
||||
|
||||
// Iterate over the blocks and insert when the verifier permits
|
||||
for i, block := range chain {
|
||||
// If the chain is terminating, stop processing blocks
|
||||
@@ -1181,7 +1184,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
}
|
||||
stats.processed++
|
||||
stats.usedGas += usedGas
|
||||
stats.report(chain, i, bc.stateCache.TrieDB().Size())
|
||||
|
||||
cache, _ := bc.stateCache.TrieDB().Size()
|
||||
stats.report(chain, i, cache)
|
||||
}
|
||||
// Append a single chain head event if we've progressed the chain
|
||||
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
||||
@@ -1335,9 +1340,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
diff := types.TxDifference(deletedTxs, addedTxs)
|
||||
// When transactions get deleted from the database that means the
|
||||
// receipts that were created in the fork must also be deleted
|
||||
batch := bc.db.NewBatch()
|
||||
for _, tx := range diff {
|
||||
rawdb.DeleteTxLookupEntry(bc.db, tx.Hash())
|
||||
rawdb.DeleteTxLookupEntry(batch, tx.Hash())
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
if len(deletedLogs) > 0 {
|
||||
go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
|
||||
}
|
||||
@@ -1387,27 +1395,21 @@ func (bc *BlockChain) update() {
|
||||
}
|
||||
}
|
||||
|
||||
// BadBlockArgs represents the entries in the list returned when bad blocks are queried.
|
||||
type BadBlockArgs struct {
|
||||
Hash common.Hash `json:"hash"`
|
||||
Header *types.Header `json:"header"`
|
||||
}
|
||||
|
||||
// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
|
||||
func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) {
|
||||
headers := make([]BadBlockArgs, 0, bc.badBlocks.Len())
|
||||
func (bc *BlockChain) BadBlocks() []*types.Block {
|
||||
blocks := make([]*types.Block, 0, bc.badBlocks.Len())
|
||||
for _, hash := range bc.badBlocks.Keys() {
|
||||
if hdr, exist := bc.badBlocks.Peek(hash); exist {
|
||||
header := hdr.(*types.Header)
|
||||
headers = append(headers, BadBlockArgs{header.Hash(), header})
|
||||
if blk, exist := bc.badBlocks.Peek(hash); exist {
|
||||
block := blk.(*types.Block)
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
}
|
||||
return headers, nil
|
||||
return blocks
|
||||
}
|
||||
|
||||
// addBadBlock adds a bad block to the bad-block LRU cache
|
||||
func (bc *BlockChain) addBadBlock(block *types.Block) {
|
||||
bc.badBlocks.Add(block.Header().Hash(), block.Header())
|
||||
bc.badBlocks.Add(block.Hash(), block)
|
||||
}
|
||||
|
||||
// reportBlock logs a bad block error.
|
||||
@@ -1525,6 +1527,18 @@ func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []com
|
||||
return bc.hc.GetBlockHashesFromHash(hash, max)
|
||||
}
|
||||
|
||||
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
||||
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
||||
// number of blocks to be individually checked before we reach the canonical chain.
|
||||
//
|
||||
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
||||
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||
bc.chainmu.Lock()
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
|
||||
}
|
||||
|
||||
// GetHeaderByNumber retrieves a block header from the database by number,
|
||||
// caching it (associated with its hash) if found.
|
||||
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
||||
|
@@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
@@ -35,6 +36,39 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// So we can deterministically seed different blockchains
|
||||
var (
|
||||
canonicalSeed = 1
|
||||
forkSeed = 2
|
||||
)
|
||||
|
||||
// newCanonical creates a chain database, and injects a deterministic canonical
|
||||
// chain. Depending on the full flag, if creates either a full block chain or a
|
||||
// header only chain.
|
||||
func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) {
|
||||
var (
|
||||
db = ethdb.NewMemDatabase()
|
||||
genesis = new(Genesis).MustCommit(db)
|
||||
)
|
||||
|
||||
// Initialize a fresh chain with only a genesis block
|
||||
blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{})
|
||||
// Create and inject the requested chain
|
||||
if n == 0 {
|
||||
return db, blockchain, nil
|
||||
}
|
||||
if full {
|
||||
// Full block-chain requested
|
||||
blocks := makeBlockChain(genesis, n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertChain(blocks)
|
||||
return db, blockchain, err
|
||||
}
|
||||
// Header-only chain requested
|
||||
headers := makeHeaderChain(genesis.Header(), n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertHeaderChain(headers, 1)
|
||||
return db, blockchain, err
|
||||
}
|
||||
|
||||
// Test fork of length N starting from block i
|
||||
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
||||
// Copy old chain up to #i into a new db
|
||||
@@ -578,7 +612,7 @@ func TestFastVsFullChains(t *testing.T) {
|
||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
||||
}
|
||||
genesis = gspec.MustCommit(gendb)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1024, func(i int, block *BlockGen) {
|
||||
block.SetCoinbase(common.Address{0x00})
|
||||
@@ -753,7 +787,7 @@ func TestChainTxReorgs(t *testing.T) {
|
||||
},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
|
||||
// Create two transactions shared between the chains:
|
||||
@@ -859,7 +893,7 @@ func TestLogReorgs(t *testing.T) {
|
||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
|
||||
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
@@ -906,7 +940,7 @@ func TestReorgSideEvent(t *testing.T) {
|
||||
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
|
||||
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
|
||||
@@ -1032,7 +1066,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
funds = big.NewInt(1000000000)
|
||||
deleteAddr = common.Address{1}
|
||||
gspec = &Genesis{
|
||||
Config: ¶ms.ChainConfig{ChainId: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
|
||||
Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
|
||||
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
@@ -1063,7 +1097,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
}
|
||||
block.AddTx(tx)
|
||||
|
||||
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainId))
|
||||
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1075,7 +1109,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
}
|
||||
block.AddTx(tx)
|
||||
|
||||
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainId))
|
||||
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1103,7 +1137,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
}
|
||||
|
||||
// generate an invalid chain id transaction
|
||||
config := ¶ms.ChainConfig{ChainId: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}
|
||||
config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}
|
||||
blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
|
||||
var (
|
||||
tx *types.Transaction
|
||||
@@ -1137,7 +1171,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||
theAddr = common.Address{1}
|
||||
gspec = &Genesis{
|
||||
Config: ¶ms.ChainConfig{
|
||||
ChainId: big.NewInt(1),
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: new(big.Int),
|
||||
EIP155Block: new(big.Int),
|
||||
EIP158Block: big.NewInt(2),
|
||||
@@ -1153,7 +1187,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||
var (
|
||||
tx *types.Transaction
|
||||
err error
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
)
|
||||
switch i {
|
||||
case 0:
|
||||
@@ -1279,8 +1313,8 @@ func TestTrieForkGC(t *testing.T) {
|
||||
}
|
||||
// Dereference all the recent tries and ensure no past trie is left in
|
||||
for i := 0; i < triesInMemory; i++ {
|
||||
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root(), common.Hash{})
|
||||
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root(), common.Hash{})
|
||||
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
|
||||
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
|
||||
}
|
||||
if len(chain.stateCache.TrieDB().Nodes()) > 0 {
|
||||
t.Fatalf("stale tries still alive after garbase collection")
|
||||
|
@@ -30,12 +30,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// So we can deterministically seed different blockchains
|
||||
var (
|
||||
canonicalSeed = 1
|
||||
forkSeed = 2
|
||||
)
|
||||
|
||||
// BlockGen creates blocks for testing.
|
||||
// See GenerateChain for a detailed explanation.
|
||||
type BlockGen struct {
|
||||
@@ -252,33 +246,6 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S
|
||||
}
|
||||
}
|
||||
|
||||
// newCanonical creates a chain database, and injects a deterministic canonical
|
||||
// chain. Depending on the full flag, if creates either a full block chain or a
|
||||
// header only chain.
|
||||
func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) {
|
||||
var (
|
||||
db = ethdb.NewMemDatabase()
|
||||
genesis = new(Genesis).MustCommit(db)
|
||||
)
|
||||
|
||||
// Initialize a fresh chain with only a genesis block
|
||||
blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{})
|
||||
// Create and inject the requested chain
|
||||
if n == 0 {
|
||||
return db, blockchain, nil
|
||||
}
|
||||
if full {
|
||||
// Full block-chain requested
|
||||
blocks := makeBlockChain(genesis, n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertChain(blocks)
|
||||
return db, blockchain, err
|
||||
}
|
||||
// Header-only chain requested
|
||||
headers := makeHeaderChain(genesis.Header(), n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertHeaderChain(headers, 1)
|
||||
return db, blockchain, err
|
||||
}
|
||||
|
||||
// makeHeaderChain creates a deterministic chain of headers rooted at parent.
|
||||
func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Header {
|
||||
blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed)
|
||||
|
@@ -21,8 +21,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// TxPreEvent is posted when a transaction enters the transaction pool.
|
||||
type TxPreEvent struct{ Tx *types.Transaction }
|
||||
// NewTxsEvent is posted when a batch of transactions enter the transaction pool.
|
||||
type NewTxsEvent struct{ Txs []*types.Transaction }
|
||||
|
||||
// PendingLogsEvent is posted pre mining and notifies of pending logs.
|
||||
type PendingLogsEvent struct {
|
||||
@@ -35,9 +35,6 @@ type PendingStateEvent struct{}
|
||||
// NewMinedBlockEvent is posted when a block has been imported.
|
||||
type NewMinedBlockEvent struct{ Block *types.Block }
|
||||
|
||||
// RemovedTransactionEvent is posted when a reorg happens
|
||||
type RemovedTransactionEvent struct{ Txs types.Transactions }
|
||||
|
||||
// RemovedLogsEvent is posted when a reorg happens
|
||||
type RemovedLogsEvent struct{ Logs []*types.Log }
|
||||
|
||||
|
@@ -156,13 +156,16 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
||||
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
||||
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
|
||||
// Delete any canonical number assignments above the new head
|
||||
batch := hc.chainDb.NewBatch()
|
||||
for i := number + 1; ; i++ {
|
||||
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
|
||||
if hash == (common.Hash{}) {
|
||||
break
|
||||
}
|
||||
rawdb.DeleteCanonicalHash(hc.chainDb, i)
|
||||
rawdb.DeleteCanonicalHash(batch, i)
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
// Overwrite any stale canonical number assignments
|
||||
var (
|
||||
headHash = header.ParentHash
|
||||
@@ -307,6 +310,43 @@ func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []co
|
||||
return chain
|
||||
}
|
||||
|
||||
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
||||
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
||||
// number of blocks to be individually checked before we reach the canonical chain.
|
||||
//
|
||||
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
||||
func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||
if ancestor > number {
|
||||
return common.Hash{}, 0
|
||||
}
|
||||
if ancestor == 1 {
|
||||
// in this case it is cheaper to just read the header
|
||||
if header := hc.GetHeader(hash, number); header != nil {
|
||||
return header.ParentHash, number - 1
|
||||
} else {
|
||||
return common.Hash{}, 0
|
||||
}
|
||||
}
|
||||
for ancestor != 0 {
|
||||
if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
|
||||
number -= ancestor
|
||||
return rawdb.ReadCanonicalHash(hc.chainDb, number), number
|
||||
}
|
||||
if *maxNonCanonical == 0 {
|
||||
return common.Hash{}, 0
|
||||
}
|
||||
*maxNonCanonical--
|
||||
ancestor--
|
||||
header := hc.GetHeader(hash, number)
|
||||
if header == nil {
|
||||
return common.Hash{}, 0
|
||||
}
|
||||
hash = header.ParentHash
|
||||
number--
|
||||
}
|
||||
return hash, number
|
||||
}
|
||||
|
||||
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
||||
// database by hash and number, caching it if found.
|
||||
func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
||||
@@ -401,7 +441,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
||||
|
||||
// DeleteCallback is a callback function that is called by SetHead before
|
||||
// each header is deleted.
|
||||
type DeleteCallback func(common.Hash, uint64)
|
||||
type DeleteCallback func(rawdb.DatabaseDeleter, common.Hash, uint64)
|
||||
|
||||
// SetHead rewinds the local chain to a new head. Everything above the new head
|
||||
// will be deleted and the new one set.
|
||||
@@ -411,22 +451,24 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
|
||||
if hdr := hc.CurrentHeader(); hdr != nil {
|
||||
height = hdr.Number.Uint64()
|
||||
}
|
||||
|
||||
batch := hc.chainDb.NewBatch()
|
||||
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
|
||||
hash := hdr.Hash()
|
||||
num := hdr.Number.Uint64()
|
||||
if delFn != nil {
|
||||
delFn(hash, num)
|
||||
delFn(batch, hash, num)
|
||||
}
|
||||
rawdb.DeleteHeader(hc.chainDb, hash, num)
|
||||
rawdb.DeleteTd(hc.chainDb, hash, num)
|
||||
rawdb.DeleteHeader(batch, hash, num)
|
||||
rawdb.DeleteTd(batch, hash, num)
|
||||
|
||||
hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1))
|
||||
}
|
||||
// Roll back the canonical chain numbering
|
||||
for i := height; i > head; i-- {
|
||||
rawdb.DeleteCanonicalHash(hc.chainDb, i)
|
||||
rawdb.DeleteCanonicalHash(batch, i)
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
// Clear out any stale content from the caches
|
||||
hc.headerCache.Purge()
|
||||
hc.tdCache.Purge()
|
||||
|
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
||||
func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash {
|
||||
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...))
|
||||
data, _ := db.Get(headerHashKey(number))
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}
|
||||
}
|
||||
@@ -38,22 +38,21 @@ func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash {
|
||||
|
||||
// WriteCanonicalHash stores the hash assigned to a canonical block number.
|
||||
func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) {
|
||||
key := append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)
|
||||
if err := db.Put(key, hash.Bytes()); err != nil {
|
||||
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
|
||||
log.Crit("Failed to store number to hash mapping", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteCanonicalHash removes the number to hash canonical mapping.
|
||||
func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
|
||||
if err := db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)); err != nil {
|
||||
if err := db.Delete(headerHashKey(number)); err != nil {
|
||||
log.Crit("Failed to delete number to hash mapping", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadHeaderNumber returns the header number assigned to a hash.
|
||||
func ReadHeaderNumber(db DatabaseReader, hash common.Hash) *uint64 {
|
||||
data, _ := db.Get(append(headerNumberPrefix, hash.Bytes()...))
|
||||
data, _ := db.Get(headerNumberKey(hash))
|
||||
if len(data) != 8 {
|
||||
return nil
|
||||
}
|
||||
@@ -129,14 +128,13 @@ func WriteFastTrieProgress(db DatabaseWriter, count uint64) {
|
||||
|
||||
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
||||
func ReadHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
|
||||
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||
data, _ := db.Get(headerKey(number, hash))
|
||||
return data
|
||||
}
|
||||
|
||||
// HasHeader verifies the existence of a block header corresponding to the hash.
|
||||
func HasHeader(db DatabaseReader, hash common.Hash, number uint64) bool {
|
||||
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||
if has, err := db.Has(key); !has || err != nil {
|
||||
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -161,11 +159,11 @@ func ReadHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Heade
|
||||
func WriteHeader(db DatabaseWriter, header *types.Header) {
|
||||
// Write the hash -> number mapping
|
||||
var (
|
||||
hash = header.Hash().Bytes()
|
||||
hash = header.Hash()
|
||||
number = header.Number.Uint64()
|
||||
encoded = encodeBlockNumber(number)
|
||||
)
|
||||
key := append(headerNumberPrefix, hash...)
|
||||
key := headerNumberKey(hash)
|
||||
if err := db.Put(key, encoded); err != nil {
|
||||
log.Crit("Failed to store hash to number mapping", "err", err)
|
||||
}
|
||||
@@ -174,7 +172,7 @@ func WriteHeader(db DatabaseWriter, header *types.Header) {
|
||||
if err != nil {
|
||||
log.Crit("Failed to RLP encode header", "err", err)
|
||||
}
|
||||
key = append(append(headerPrefix, encoded...), hash...)
|
||||
key = headerKey(number, hash)
|
||||
if err := db.Put(key, data); err != nil {
|
||||
log.Crit("Failed to store header", "err", err)
|
||||
}
|
||||
@@ -182,32 +180,30 @@ func WriteHeader(db DatabaseWriter, header *types.Header) {
|
||||
|
||||
// DeleteHeader removes all block header data associated with a hash.
|
||||
func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
if err := db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil {
|
||||
if err := db.Delete(headerKey(number, hash)); err != nil {
|
||||
log.Crit("Failed to delete header", "err", err)
|
||||
}
|
||||
if err := db.Delete(append(headerNumberPrefix, hash.Bytes()...)); err != nil {
|
||||
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
||||
log.Crit("Failed to delete hash to number mapping", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
||||
func ReadBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
|
||||
data, _ := db.Get(append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||
data, _ := db.Get(blockBodyKey(number, hash))
|
||||
return data
|
||||
}
|
||||
|
||||
// WriteBodyRLP stores an RLP encoded block body into the database.
|
||||
func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
|
||||
key := append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||
if err := db.Put(key, rlp); err != nil {
|
||||
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
|
||||
log.Crit("Failed to store block body", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// HasBody verifies the existence of a block body corresponding to the hash.
|
||||
func HasBody(db DatabaseReader, hash common.Hash, number uint64) bool {
|
||||
key := append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||
if has, err := db.Has(key); !has || err != nil {
|
||||
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -238,14 +234,14 @@ func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.B
|
||||
|
||||
// DeleteBody removes all block body data associated with a hash.
|
||||
func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
if err := db.Delete(append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil {
|
||||
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
|
||||
log.Crit("Failed to delete block body", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
||||
func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
|
||||
data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), headerTDSuffix...))
|
||||
data, _ := db.Get(headerTDKey(number, hash))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -263,15 +259,14 @@ func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) {
|
||||
if err != nil {
|
||||
log.Crit("Failed to RLP encode block total difficulty", "err", err)
|
||||
}
|
||||
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), headerTDSuffix...)
|
||||
if err := db.Put(key, data); err != nil {
|
||||
if err := db.Put(headerTDKey(number, hash), data); err != nil {
|
||||
log.Crit("Failed to store block total difficulty", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteTd removes all block total difficulty data associated with a hash.
|
||||
func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
if err := db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), headerTDSuffix...)); err != nil {
|
||||
if err := db.Delete(headerTDKey(number, hash)); err != nil {
|
||||
log.Crit("Failed to delete block total difficulty", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -279,7 +274,7 @@ func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
// ReadReceipts retrieves all the transaction receipts belonging to a block.
|
||||
func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
|
||||
// Retrieve the flattened receipt slice
|
||||
data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...))
|
||||
data, _ := db.Get(blockReceiptsKey(number, hash))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -308,15 +303,14 @@ func WriteReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts
|
||||
log.Crit("Failed to encode block receipts", "err", err)
|
||||
}
|
||||
// Store the flattened receipt slice
|
||||
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||
if err := db.Put(key, bytes); err != nil {
|
||||
if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
|
||||
log.Crit("Failed to store block receipts", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteReceipts removes all receipt data associated with a block hash.
|
||||
func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
if err := db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil {
|
||||
if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
|
||||
log.Crit("Failed to delete block receipts", "err", err)
|
||||
}
|
||||
}
|
||||
|
@@ -17,8 +17,6 @@
|
||||
package rawdb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -28,7 +26,7 @@ import (
|
||||
// ReadTxLookupEntry retrieves the positional metadata associated with a transaction
|
||||
// hash to allow retrieving the transaction or receipt by hash.
|
||||
func ReadTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) {
|
||||
data, _ := db.Get(append(txLookupPrefix, hash.Bytes()...))
|
||||
data, _ := db.Get(txLookupKey(hash))
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}, 0, 0
|
||||
}
|
||||
@@ -53,7 +51,7 @@ func WriteTxLookupEntries(db DatabaseWriter, block *types.Block) {
|
||||
if err != nil {
|
||||
log.Crit("Failed to encode transaction lookup entry", "err", err)
|
||||
}
|
||||
if err := db.Put(append(txLookupPrefix, tx.Hash().Bytes()...), data); err != nil {
|
||||
if err := db.Put(txLookupKey(tx.Hash()), data); err != nil {
|
||||
log.Crit("Failed to store transaction lookup entry", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -61,7 +59,7 @@ func WriteTxLookupEntries(db DatabaseWriter, block *types.Block) {
|
||||
|
||||
// DeleteTxLookupEntry removes all transaction data associated with a hash.
|
||||
func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) {
|
||||
db.Delete(append(txLookupPrefix, hash.Bytes()...))
|
||||
db.Delete(txLookupKey(hash))
|
||||
}
|
||||
|
||||
// ReadTransaction retrieves a specific transaction from the database, along with
|
||||
@@ -97,23 +95,13 @@ func ReadReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Ha
|
||||
// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
|
||||
// section and bit index from the.
|
||||
func ReadBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
|
||||
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
|
||||
|
||||
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
||||
binary.BigEndian.PutUint64(key[3:], section)
|
||||
|
||||
return db.Get(key)
|
||||
return db.Get(bloomBitsKey(bit, section, head))
|
||||
}
|
||||
|
||||
// WriteBloomBits stores the compressed bloom bits vector belonging to the given
|
||||
// section and bit index.
|
||||
func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) {
|
||||
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
|
||||
|
||||
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
||||
binary.BigEndian.PutUint64(key[3:], section)
|
||||
|
||||
if err := db.Put(key, bits); err != nil {
|
||||
if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
|
||||
log.Crit("Failed to store bloom bits", "err", err)
|
||||
}
|
||||
}
|
||||
|
@@ -45,7 +45,7 @@ func WriteDatabaseVersion(db DatabaseWriter, version int) {
|
||||
|
||||
// ReadChainConfig retrieves the consensus settings based on the given genesis hash.
|
||||
func ReadChainConfig(db DatabaseReader, hash common.Hash) *params.ChainConfig {
|
||||
data, _ := db.Get(append(configPrefix, hash[:]...))
|
||||
data, _ := db.Get(configKey(hash))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -66,14 +66,14 @@ func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConf
|
||||
if err != nil {
|
||||
log.Crit("Failed to JSON encode chain config", "err", err)
|
||||
}
|
||||
if err := db.Put(append(configPrefix, hash[:]...), data); err != nil {
|
||||
if err := db.Put(configKey(hash), data); err != nil {
|
||||
log.Crit("Failed to store chain config", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadPreimage retrieves a single preimage of the provided hash.
|
||||
func ReadPreimage(db DatabaseReader, hash common.Hash) []byte {
|
||||
data, _ := db.Get(append(preimagePrefix, hash.Bytes()...))
|
||||
data, _ := db.Get(preimageKey(hash))
|
||||
return data
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func ReadPreimage(db DatabaseReader, hash common.Hash) []byte {
|
||||
// current block number, and is used for debug messages only.
|
||||
func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) {
|
||||
for hash, preimage := range preimages {
|
||||
if err := db.Put(append(preimagePrefix, hash.Bytes()...), preimage); err != nil {
|
||||
if err := db.Put(preimageKey(hash), preimage); err != nil {
|
||||
log.Crit("Failed to store trie preimage", "err", err)
|
||||
}
|
||||
}
|
||||
|
@@ -77,3 +77,58 @@ func encodeBlockNumber(number uint64) []byte {
|
||||
binary.BigEndian.PutUint64(enc, number)
|
||||
return enc
|
||||
}
|
||||
|
||||
// headerKey = headerPrefix + num (uint64 big endian) + hash
|
||||
func headerKey(number uint64, hash common.Hash) []byte {
|
||||
return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||
}
|
||||
|
||||
// headerTDKey = headerPrefix + num (uint64 big endian) + hash + headerTDSuffix
|
||||
func headerTDKey(number uint64, hash common.Hash) []byte {
|
||||
return append(headerKey(number, hash), headerTDSuffix...)
|
||||
}
|
||||
|
||||
// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix
|
||||
func headerHashKey(number uint64) []byte {
|
||||
return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)
|
||||
}
|
||||
|
||||
// headerNumberKey = headerNumberPrefix + hash
|
||||
func headerNumberKey(hash common.Hash) []byte {
|
||||
return append(headerNumberPrefix, hash.Bytes()...)
|
||||
}
|
||||
|
||||
// blockBodyKey = blockBodyPrefix + num (uint64 big endian) + hash
|
||||
func blockBodyKey(number uint64, hash common.Hash) []byte {
|
||||
return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||
}
|
||||
|
||||
// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
|
||||
func blockReceiptsKey(number uint64, hash common.Hash) []byte {
|
||||
return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||
}
|
||||
|
||||
// txLookupKey = txLookupPrefix + hash
|
||||
func txLookupKey(hash common.Hash) []byte {
|
||||
return append(txLookupPrefix, hash.Bytes()...)
|
||||
}
|
||||
|
||||
// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
|
||||
func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
|
||||
key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)
|
||||
|
||||
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
||||
binary.BigEndian.PutUint64(key[3:], section)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// preimageKey = preimagePrefix + hash
|
||||
func preimageKey(hash common.Hash) []byte {
|
||||
return append(preimagePrefix, hash.Bytes()...)
|
||||
}
|
||||
|
||||
// configKey = configPrefix + hash
|
||||
func configKey(hash common.Hash) []byte {
|
||||
return append(configPrefix, hash.Bytes()...)
|
||||
}
|
||||
|
@@ -219,7 +219,7 @@ func (self *stateObject) updateRoot(db Database) {
|
||||
self.data.Root = self.trie.Hash()
|
||||
}
|
||||
|
||||
// CommitTrie the storage trie of the object to dwb.
|
||||
// CommitTrie the storage trie of the object to db.
|
||||
// This updates the trie root.
|
||||
func (self *stateObject) CommitTrie(db Database) error {
|
||||
self.updateTrie(db)
|
||||
|
@@ -99,7 +99,7 @@ func (s *StateSuite) TestNull(c *checker.C) {
|
||||
s.state.SetState(address, common.Hash{}, value)
|
||||
s.state.Commit(false)
|
||||
value = s.state.GetState(address, common.Hash{})
|
||||
if !common.EmptyHash(value) {
|
||||
if value != (common.Hash{}) {
|
||||
c.Errorf("expected empty hash. got %x", value)
|
||||
}
|
||||
}
|
||||
|
@@ -358,7 +358,7 @@ func (self *StateDB) deleteStateObject(stateObject *stateObject) {
|
||||
self.setError(self.trie.TryDelete(addr[:]))
|
||||
}
|
||||
|
||||
// Retrieve a state object given my the address. Returns nil if not found.
|
||||
// Retrieve a state object given by the address. Returns nil if not found.
|
||||
func (self *StateDB) getStateObject(addr common.Address) (stateObject *stateObject) {
|
||||
// Prefer 'live' objects.
|
||||
if obj := self.stateObjects[addr]; obj != nil {
|
||||
@@ -596,7 +596,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error)
|
||||
case isDirty:
|
||||
// Write any contract code associated with the state object
|
||||
if stateObject.code != nil && stateObject.dirtyCode {
|
||||
s.db.TrieDB().Insert(common.BytesToHash(stateObject.CodeHash()), stateObject.code)
|
||||
s.db.TrieDB().InsertBlob(common.BytesToHash(stateObject.CodeHash()), stateObject.code)
|
||||
stateObject.dirtyCode = false
|
||||
}
|
||||
// Write any storage changes in the state object to its storage trie.
|
||||
|
@@ -25,8 +25,8 @@ import (
|
||||
)
|
||||
|
||||
// NewStateSync create a new state trie download scheduler.
|
||||
func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.TrieSync {
|
||||
var syncer *trie.TrieSync
|
||||
func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.Sync {
|
||||
var syncer *trie.Sync
|
||||
callback := func(leaf []byte, parent common.Hash) error {
|
||||
var obj Account
|
||||
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
|
||||
@@ -36,6 +36,6 @@ func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.TrieSync
|
||||
syncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent)
|
||||
return nil
|
||||
}
|
||||
syncer = trie.NewTrieSync(root, database, callback)
|
||||
syncer = trie.NewSync(root, database, callback)
|
||||
return syncer
|
||||
}
|
||||
|
@@ -85,7 +85,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||
// and uses the input parameters for its environment. It returns the receipt
|
||||
// for the transaction, gas used and an error if the transaction failed,
|
||||
// indicating the block was invalid.
|
||||
func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) {
|
||||
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) {
|
||||
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
|
105
core/tx_cacher.go
Normal file
105
core/tx_cacher.go
Normal file
@@ -0,0 +1,105 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// senderCacher is a concurrent tranaction sender recoverer anc cacher.
|
||||
var senderCacher = newTxSenderCacher(runtime.NumCPU())
|
||||
|
||||
// txSenderCacherRequest is a request for recovering transaction senders with a
|
||||
// specific signature scheme and caching it into the transactions themselves.
|
||||
//
|
||||
// The inc field defines the number of transactions to skip after each recovery,
|
||||
// which is used to feed the same underlying input array to different threads but
|
||||
// ensure they process the early transactions fast.
|
||||
type txSenderCacherRequest struct {
|
||||
signer types.Signer
|
||||
txs []*types.Transaction
|
||||
inc int
|
||||
}
|
||||
|
||||
// txSenderCacher is a helper structure to concurrently ecrecover transaction
|
||||
// senders from digital signatures on background threads.
|
||||
type txSenderCacher struct {
|
||||
threads int
|
||||
tasks chan *txSenderCacherRequest
|
||||
}
|
||||
|
||||
// newTxSenderCacher creates a new transaction sender background cacher and starts
|
||||
// as many procesing goroutines as allowed by the GOMAXPROCS on construction.
|
||||
func newTxSenderCacher(threads int) *txSenderCacher {
|
||||
cacher := &txSenderCacher{
|
||||
tasks: make(chan *txSenderCacherRequest, threads),
|
||||
threads: threads,
|
||||
}
|
||||
for i := 0; i < threads; i++ {
|
||||
go cacher.cache()
|
||||
}
|
||||
return cacher
|
||||
}
|
||||
|
||||
// cache is an infinite loop, caching transaction senders from various forms of
|
||||
// data structures.
|
||||
func (cacher *txSenderCacher) cache() {
|
||||
for task := range cacher.tasks {
|
||||
for i := 0; i < len(task.txs); i += task.inc {
|
||||
types.Sender(task.signer, task.txs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recover recovers the senders from a batch of transactions and caches them
|
||||
// back into the same data structures. There is no validation being done, nor
|
||||
// any reaction to invalid signatures. That is up to calling code later.
|
||||
func (cacher *txSenderCacher) recover(signer types.Signer, txs []*types.Transaction) {
|
||||
// If there's nothing to recover, abort
|
||||
if len(txs) == 0 {
|
||||
return
|
||||
}
|
||||
// Ensure we have meaningful task sizes and schedule the recoveries
|
||||
tasks := cacher.threads
|
||||
if len(txs) < tasks*4 {
|
||||
tasks = (len(txs) + 3) / 4
|
||||
}
|
||||
for i := 0; i < tasks; i++ {
|
||||
cacher.tasks <- &txSenderCacherRequest{
|
||||
signer: signer,
|
||||
txs: txs[i:],
|
||||
inc: tasks,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recoverFromBlocks recovers the senders from a batch of blocks and caches them
|
||||
// back into the same data structures. There is no validation being done, nor
|
||||
// any reaction to invalid signatures. That is up to calling code later.
|
||||
func (cacher *txSenderCacher) recoverFromBlocks(signer types.Signer, blocks []*types.Block) {
|
||||
count := 0
|
||||
for _, block := range blocks {
|
||||
count += len(block.Transactions())
|
||||
}
|
||||
txs := make([]*types.Transaction, 0, count)
|
||||
for _, block := range blocks {
|
||||
txs = append(txs, block.Transactions()...)
|
||||
}
|
||||
cacher.recover(signer, txs)
|
||||
}
|
@@ -56,7 +56,7 @@ func newTxJournal(path string) *txJournal {
|
||||
|
||||
// load parses a transaction journal dump from disk, loading its contents into
|
||||
// the specified pool.
|
||||
func (journal *txJournal) load(add func(*types.Transaction) error) error {
|
||||
func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
|
||||
// Skip the parsing if the journal file doens't exist at all
|
||||
if _, err := os.Stat(journal.path); os.IsNotExist(err) {
|
||||
return nil
|
||||
@@ -76,7 +76,21 @@ func (journal *txJournal) load(add func(*types.Transaction) error) error {
|
||||
stream := rlp.NewStream(input, 0)
|
||||
total, dropped := 0, 0
|
||||
|
||||
var failure error
|
||||
// Create a method to load a limited batch of transactions and bump the
|
||||
// appropriate progress counters. Then use this method to load all the
|
||||
// journalled transactions in small-ish batches.
|
||||
loadBatch := func(txs types.Transactions) {
|
||||
for _, err := range add(txs) {
|
||||
if err != nil {
|
||||
log.Debug("Failed to add journaled transaction", "err", err)
|
||||
dropped++
|
||||
}
|
||||
}
|
||||
}
|
||||
var (
|
||||
failure error
|
||||
batch types.Transactions
|
||||
)
|
||||
for {
|
||||
// Parse the next transaction and terminate on error
|
||||
tx := new(types.Transaction)
|
||||
@@ -84,14 +98,17 @@ func (journal *txJournal) load(add func(*types.Transaction) error) error {
|
||||
if err != io.EOF {
|
||||
failure = err
|
||||
}
|
||||
if batch.Len() > 0 {
|
||||
loadBatch(batch)
|
||||
}
|
||||
break
|
||||
}
|
||||
// Import the transaction and bump the appropriate progress counters
|
||||
// New transaction parsed, queue up for later, import if threnshold is reached
|
||||
total++
|
||||
if err = add(tx); err != nil {
|
||||
log.Debug("Failed to add journaled transaction", "err", err)
|
||||
dropped++
|
||||
continue
|
||||
|
||||
if batch = append(batch, tx); batch.Len() > 1024 {
|
||||
loadBatch(batch)
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped)
|
||||
|
@@ -397,13 +397,13 @@ func (h *priceHeap) Pop() interface{} {
|
||||
// txPricedList is a price-sorted heap to allow operating on transactions pool
|
||||
// contents in a price-incrementing way.
|
||||
type txPricedList struct {
|
||||
all *map[common.Hash]*types.Transaction // Pointer to the map of all transactions
|
||||
items *priceHeap // Heap of prices of all the stored transactions
|
||||
stales int // Number of stale price points to (re-heap trigger)
|
||||
all *txLookup // Pointer to the map of all transactions
|
||||
items *priceHeap // Heap of prices of all the stored transactions
|
||||
stales int // Number of stale price points to (re-heap trigger)
|
||||
}
|
||||
|
||||
// newTxPricedList creates a new price-sorted transaction heap.
|
||||
func newTxPricedList(all *map[common.Hash]*types.Transaction) *txPricedList {
|
||||
func newTxPricedList(all *txLookup) *txPricedList {
|
||||
return &txPricedList{
|
||||
all: all,
|
||||
items: new(priceHeap),
|
||||
@@ -425,12 +425,13 @@ func (l *txPricedList) Removed() {
|
||||
return
|
||||
}
|
||||
// Seems we've reached a critical number of stale transactions, reheap
|
||||
reheap := make(priceHeap, 0, len(*l.all))
|
||||
reheap := make(priceHeap, 0, l.all.Count())
|
||||
|
||||
l.stales, l.items = 0, &reheap
|
||||
for _, tx := range *l.all {
|
||||
l.all.Range(func(hash common.Hash, tx *types.Transaction) bool {
|
||||
*l.items = append(*l.items, tx)
|
||||
}
|
||||
return true
|
||||
})
|
||||
heap.Init(l.items)
|
||||
}
|
||||
|
||||
@@ -443,7 +444,7 @@ func (l *txPricedList) Cap(threshold *big.Int, local *accountSet) types.Transact
|
||||
for len(*l.items) > 0 {
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(l.items).(*types.Transaction)
|
||||
if _, ok := (*l.all)[tx.Hash()]; !ok {
|
||||
if l.all.Get(tx.Hash()) == nil {
|
||||
l.stales--
|
||||
continue
|
||||
}
|
||||
@@ -475,7 +476,7 @@ func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) boo
|
||||
// Discard stale price points if found at the heap start
|
||||
for len(*l.items) > 0 {
|
||||
head := []*types.Transaction(*l.items)[0]
|
||||
if _, ok := (*l.all)[head.Hash()]; !ok {
|
||||
if l.all.Get(head.Hash()) == nil {
|
||||
l.stales--
|
||||
heap.Pop(l.items)
|
||||
continue
|
||||
@@ -500,7 +501,7 @@ func (l *txPricedList) Discard(count int, local *accountSet) types.Transactions
|
||||
for len(*l.items) > 0 && count > 0 {
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(l.items).(*types.Transaction)
|
||||
if _, ok := (*l.all)[tx.Hash()]; !ok {
|
||||
if l.all.Get(tx.Hash()) == nil {
|
||||
l.stales--
|
||||
continue
|
||||
}
|
||||
|
173
core/tx_pool.go
173
core/tx_pool.go
@@ -200,11 +200,11 @@ type TxPool struct {
|
||||
locals *accountSet // Set of local transaction to exempt from eviction rules
|
||||
journal *txJournal // Journal of local transaction to back up to disk
|
||||
|
||||
pending map[common.Address]*txList // All currently processable transactions
|
||||
queue map[common.Address]*txList // Queued but non-processable transactions
|
||||
beats map[common.Address]time.Time // Last heartbeat from each known account
|
||||
all map[common.Hash]*types.Transaction // All transactions to allow lookups
|
||||
priced *txPricedList // All transactions sorted by price
|
||||
pending map[common.Address]*txList // All currently processable transactions
|
||||
queue map[common.Address]*txList // Queued but non-processable transactions
|
||||
beats map[common.Address]time.Time // Last heartbeat from each known account
|
||||
all *txLookup // All transactions to allow lookups
|
||||
priced *txPricedList // All transactions sorted by price
|
||||
|
||||
wg sync.WaitGroup // for shutdown sync
|
||||
|
||||
@@ -222,23 +222,23 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
|
||||
config: config,
|
||||
chainconfig: chainconfig,
|
||||
chain: chain,
|
||||
signer: types.NewEIP155Signer(chainconfig.ChainId),
|
||||
signer: types.NewEIP155Signer(chainconfig.ChainID),
|
||||
pending: make(map[common.Address]*txList),
|
||||
queue: make(map[common.Address]*txList),
|
||||
beats: make(map[common.Address]time.Time),
|
||||
all: make(map[common.Hash]*types.Transaction),
|
||||
all: newTxLookup(),
|
||||
chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize),
|
||||
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
|
||||
}
|
||||
pool.locals = newAccountSet(pool.signer)
|
||||
pool.priced = newTxPricedList(&pool.all)
|
||||
pool.priced = newTxPricedList(pool.all)
|
||||
pool.reset(nil, chain.CurrentBlock().Header())
|
||||
|
||||
// If local transactions and journaling is enabled, load from disk
|
||||
if !config.NoLocals && config.Journal != "" {
|
||||
pool.journal = newTxJournal(config.Journal)
|
||||
|
||||
if err := pool.journal.load(pool.AddLocal); err != nil {
|
||||
if err := pool.journal.load(pool.AddLocals); err != nil {
|
||||
log.Warn("Failed to load transaction journal", "err", err)
|
||||
}
|
||||
if err := pool.journal.rotate(pool.local()); err != nil {
|
||||
@@ -411,6 +411,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
|
||||
|
||||
// Inject any transactions discarded due to reorgs
|
||||
log.Debug("Reinjecting stale transactions", "count", len(reinject))
|
||||
senderCacher.recover(pool.signer, reinject)
|
||||
pool.addTxsLocked(reinject, false)
|
||||
|
||||
// validate the pool of pending transactions, this will remove
|
||||
@@ -444,9 +445,9 @@ func (pool *TxPool) Stop() {
|
||||
log.Info("Transaction pool stopped")
|
||||
}
|
||||
|
||||
// SubscribeTxPreEvent registers a subscription of TxPreEvent and
|
||||
// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
|
||||
// starts sending event to the given channel.
|
||||
func (pool *TxPool) SubscribeTxPreEvent(ch chan<- TxPreEvent) event.Subscription {
|
||||
func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
|
||||
return pool.scope.Track(pool.txFeed.Subscribe(ch))
|
||||
}
|
||||
|
||||
@@ -605,7 +606,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
|
||||
// If the transaction is already known, discard it
|
||||
hash := tx.Hash()
|
||||
if pool.all[hash] != nil {
|
||||
if pool.all.Get(hash) != nil {
|
||||
log.Trace("Discarding already known transaction", "hash", hash)
|
||||
return false, fmt.Errorf("known transaction: %x", hash)
|
||||
}
|
||||
@@ -616,7 +617,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
// If the transaction pool is full, discard underpriced transactions
|
||||
if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
|
||||
if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
|
||||
// If the new transaction is underpriced, don't accept it
|
||||
if !local && pool.priced.Underpriced(tx, pool.locals) {
|
||||
log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
|
||||
@@ -624,7 +625,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
|
||||
return false, ErrUnderpriced
|
||||
}
|
||||
// New transaction is better than our worse ones, make room for it
|
||||
drop := pool.priced.Discard(len(pool.all)-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
|
||||
drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
|
||||
for _, tx := range drop {
|
||||
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
|
||||
underpricedTxCounter.Inc(1)
|
||||
@@ -642,18 +643,18 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
|
||||
}
|
||||
// New transaction is better, replace old one
|
||||
if old != nil {
|
||||
delete(pool.all, old.Hash())
|
||||
pool.all.Remove(old.Hash())
|
||||
pool.priced.Removed()
|
||||
pendingReplaceCounter.Inc(1)
|
||||
}
|
||||
pool.all[tx.Hash()] = tx
|
||||
pool.all.Add(tx)
|
||||
pool.priced.Put(tx)
|
||||
pool.journalTx(from, tx)
|
||||
|
||||
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
|
||||
|
||||
// We've directly injected a replacement transaction, notify subsystems
|
||||
go pool.txFeed.Send(TxPreEvent{tx})
|
||||
go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}})
|
||||
|
||||
return old != nil, nil
|
||||
}
|
||||
@@ -689,12 +690,12 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er
|
||||
}
|
||||
// Discard any previous transaction and mark this
|
||||
if old != nil {
|
||||
delete(pool.all, old.Hash())
|
||||
pool.all.Remove(old.Hash())
|
||||
pool.priced.Removed()
|
||||
queuedReplaceCounter.Inc(1)
|
||||
}
|
||||
if pool.all[hash] == nil {
|
||||
pool.all[hash] = tx
|
||||
if pool.all.Get(hash) == nil {
|
||||
pool.all.Add(tx)
|
||||
pool.priced.Put(tx)
|
||||
}
|
||||
return old != nil, nil
|
||||
@@ -712,10 +713,11 @@ func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
|
||||
}
|
||||
}
|
||||
|
||||
// promoteTx adds a transaction to the pending (processable) list of transactions.
|
||||
// promoteTx adds a transaction to the pending (processable) list of transactions
|
||||
// and returns whether it was inserted or an older was better.
|
||||
//
|
||||
// Note, this method assumes the pool lock is held!
|
||||
func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) {
|
||||
func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
|
||||
// Try to insert the transaction into the pending queue
|
||||
if pool.pending[addr] == nil {
|
||||
pool.pending[addr] = newTxList(true)
|
||||
@@ -725,29 +727,29 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
|
||||
inserted, old := list.Add(tx, pool.config.PriceBump)
|
||||
if !inserted {
|
||||
// An older transaction was better, discard this
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed()
|
||||
|
||||
pendingDiscardCounter.Inc(1)
|
||||
return
|
||||
return false
|
||||
}
|
||||
// Otherwise discard any previous transaction and mark this
|
||||
if old != nil {
|
||||
delete(pool.all, old.Hash())
|
||||
pool.all.Remove(old.Hash())
|
||||
pool.priced.Removed()
|
||||
|
||||
pendingReplaceCounter.Inc(1)
|
||||
}
|
||||
// Failsafe to work around direct pending inserts (tests)
|
||||
if pool.all[hash] == nil {
|
||||
pool.all[hash] = tx
|
||||
if pool.all.Get(hash) == nil {
|
||||
pool.all.Add(tx)
|
||||
pool.priced.Put(tx)
|
||||
}
|
||||
// Set the potentially new pending nonce and notify any subsystems of the new tx
|
||||
pool.beats[addr] = time.Now()
|
||||
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
|
||||
|
||||
go pool.txFeed.Send(TxPreEvent{tx})
|
||||
return true
|
||||
}
|
||||
|
||||
// AddLocal enqueues a single transaction into the pool if it is valid, marking
|
||||
@@ -813,11 +815,9 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
|
||||
|
||||
for i, tx := range txs {
|
||||
var replace bool
|
||||
if replace, errs[i] = pool.add(tx, local); errs[i] == nil {
|
||||
if !replace {
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
dirty[from] = struct{}{}
|
||||
}
|
||||
if replace, errs[i] = pool.add(tx, local); errs[i] == nil && !replace {
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
dirty[from] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Only reprocess the internal state if something was actually added
|
||||
@@ -839,7 +839,7 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
|
||||
|
||||
status := make([]TxStatus, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
if tx := pool.all[hash]; tx != nil {
|
||||
if tx := pool.all.Get(hash); tx != nil {
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
if pool.pending[from] != nil && pool.pending[from].txs.items[tx.Nonce()] != nil {
|
||||
status[i] = TxStatusPending
|
||||
@@ -854,24 +854,21 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
|
||||
// Get returns a transaction if it is contained in the pool
|
||||
// and nil otherwise.
|
||||
func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
|
||||
pool.mu.RLock()
|
||||
defer pool.mu.RUnlock()
|
||||
|
||||
return pool.all[hash]
|
||||
return pool.all.Get(hash)
|
||||
}
|
||||
|
||||
// removeTx removes a single transaction from the queue, moving all subsequent
|
||||
// transactions back to the future queue.
|
||||
func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||
// Fetch the transaction we wish to delete
|
||||
tx, ok := pool.all[hash]
|
||||
if !ok {
|
||||
tx := pool.all.Get(hash)
|
||||
if tx == nil {
|
||||
return
|
||||
}
|
||||
addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
|
||||
|
||||
// Remove it from the list of known transactions
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
if outofbound {
|
||||
pool.priced.Removed()
|
||||
}
|
||||
@@ -907,6 +904,9 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||
// future queue to the set of pending transactions. During this process, all
|
||||
// invalidated transactions (low nonce, low balance) are deleted.
|
||||
func (pool *TxPool) promoteExecutables(accounts []common.Address) {
|
||||
// Track the promoted transactions to broadcast them at once
|
||||
var promoted []*types.Transaction
|
||||
|
||||
// Gather all the accounts potentially needing updates
|
||||
if accounts == nil {
|
||||
accounts = make([]common.Address, 0, len(pool.queue))
|
||||
@@ -924,7 +924,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
|
||||
for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) {
|
||||
hash := tx.Hash()
|
||||
log.Trace("Removed old queued transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed()
|
||||
}
|
||||
// Drop all transactions that are too costly (low balance or out of gas)
|
||||
@@ -932,21 +932,23 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
|
||||
for _, tx := range drops {
|
||||
hash := tx.Hash()
|
||||
log.Trace("Removed unpayable queued transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed()
|
||||
queuedNofundsCounter.Inc(1)
|
||||
}
|
||||
// Gather all executable transactions and promote them
|
||||
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
|
||||
hash := tx.Hash()
|
||||
log.Trace("Promoting queued transaction", "hash", hash)
|
||||
pool.promoteTx(addr, hash, tx)
|
||||
if pool.promoteTx(addr, hash, tx) {
|
||||
log.Trace("Promoting queued transaction", "hash", hash)
|
||||
promoted = append(promoted, tx)
|
||||
}
|
||||
}
|
||||
// Drop all transactions over the allowed limit
|
||||
if !pool.locals.contains(addr) {
|
||||
for _, tx := range list.Cap(int(pool.config.AccountQueue)) {
|
||||
hash := tx.Hash()
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed()
|
||||
queuedRateLimitCounter.Inc(1)
|
||||
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
|
||||
@@ -957,6 +959,10 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
|
||||
delete(pool.queue, addr)
|
||||
}
|
||||
}
|
||||
// Notify subsystem for new promoted transactions.
|
||||
if len(promoted) > 0 {
|
||||
go pool.txFeed.Send(NewTxsEvent{promoted})
|
||||
}
|
||||
// If the pending limit is overflown, start equalizing allowances
|
||||
pending := uint64(0)
|
||||
for _, list := range pool.pending {
|
||||
@@ -991,7 +997,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
|
||||
for _, tx := range list.Cap(list.Len() - 1) {
|
||||
// Drop the transaction from the global pools too
|
||||
hash := tx.Hash()
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed()
|
||||
|
||||
// Update the account nonce to the dropped transaction
|
||||
@@ -1013,7 +1019,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
|
||||
for _, tx := range list.Cap(list.Len() - 1) {
|
||||
// Drop the transaction from the global pools too
|
||||
hash := tx.Hash()
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed()
|
||||
|
||||
// Update the account nonce to the dropped transaction
|
||||
@@ -1082,7 +1088,7 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||
for _, tx := range list.Forward(nonce) {
|
||||
hash := tx.Hash()
|
||||
log.Trace("Removed old pending transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed()
|
||||
}
|
||||
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
|
||||
@@ -1090,7 +1096,7 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||
for _, tx := range drops {
|
||||
hash := tx.Hash()
|
||||
log.Trace("Removed unpayable pending transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed()
|
||||
pendingNofundsCounter.Inc(1)
|
||||
}
|
||||
@@ -1099,7 +1105,7 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||
log.Trace("Demoting pending transaction", "hash", hash)
|
||||
pool.enqueueTx(hash, tx)
|
||||
}
|
||||
// If there's a gap in front, warn (should never happen) and postpone all transactions
|
||||
// If there's a gap in front, alert (should never happen) and postpone all transactions
|
||||
if list.Len() > 0 && list.txs.Get(nonce) == nil {
|
||||
for _, tx := range list.Cap(0) {
|
||||
hash := tx.Hash()
|
||||
@@ -1162,3 +1168,68 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool {
|
||||
func (as *accountSet) add(addr common.Address) {
|
||||
as.accounts[addr] = struct{}{}
|
||||
}
|
||||
|
||||
// txLookup is used internally by TxPool to track transactions while allowing lookup without
|
||||
// mutex contention.
|
||||
//
|
||||
// Note, although this type is properly protected against concurrent access, it
|
||||
// is **not** a type that should ever be mutated or even exposed outside of the
|
||||
// transaction pool, since its internal state is tightly coupled with the pools
|
||||
// internal mechanisms. The sole purpose of the type is to permit out-of-bound
|
||||
// peeking into the pool in TxPool.Get without having to acquire the widely scoped
|
||||
// TxPool.mu mutex.
|
||||
type txLookup struct {
|
||||
all map[common.Hash]*types.Transaction
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newTxLookup returns a new txLookup structure.
|
||||
func newTxLookup() *txLookup {
|
||||
return &txLookup{
|
||||
all: make(map[common.Hash]*types.Transaction),
|
||||
}
|
||||
}
|
||||
|
||||
// Range calls f on each key and value present in the map.
|
||||
func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
|
||||
for key, value := range t.all {
|
||||
if !f(key, value) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a transaction if it exists in the lookup, or nil if not found.
|
||||
func (t *txLookup) Get(hash common.Hash) *types.Transaction {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
|
||||
return t.all[hash]
|
||||
}
|
||||
|
||||
// Count returns the current number of items in the lookup.
|
||||
func (t *txLookup) Count() int {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
|
||||
return len(t.all)
|
||||
}
|
||||
|
||||
// Add adds a transaction to the lookup.
|
||||
func (t *txLookup) Add(tx *types.Transaction) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
t.all[tx.Hash()] = tx
|
||||
}
|
||||
|
||||
// Remove removes a transaction from the lookup.
|
||||
func (t *txLookup) Remove(hash common.Hash) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
delete(t.all, hash)
|
||||
}
|
||||
|
@@ -94,7 +94,7 @@ func validateTxPoolInternals(pool *TxPool) error {
|
||||
|
||||
// Ensure the total transaction set is consistent with pending + queued
|
||||
pending, queued := pool.stats()
|
||||
if total := len(pool.all); total != pending+queued {
|
||||
if total := pool.all.Count(); total != pending+queued {
|
||||
return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued)
|
||||
}
|
||||
if priced := pool.priced.items.Len() - pool.priced.stales; priced != pending+queued {
|
||||
@@ -118,21 +118,27 @@ func validateTxPoolInternals(pool *TxPool) error {
|
||||
|
||||
// validateEvents checks that the correct number of transaction addition events
|
||||
// were fired on the pool's event feed.
|
||||
func validateEvents(events chan TxPreEvent, count int) error {
|
||||
for i := 0; i < count; i++ {
|
||||
func validateEvents(events chan NewTxsEvent, count int) error {
|
||||
var received []*types.Transaction
|
||||
|
||||
for len(received) < count {
|
||||
select {
|
||||
case <-events:
|
||||
case ev := <-events:
|
||||
received = append(received, ev.Txs...)
|
||||
case <-time.After(time.Second):
|
||||
return fmt.Errorf("event #%d not fired", i)
|
||||
return fmt.Errorf("event #%d not fired", received)
|
||||
}
|
||||
}
|
||||
if len(received) > count {
|
||||
return fmt.Errorf("more than %d events fired: %v", count, received[count:])
|
||||
}
|
||||
select {
|
||||
case tx := <-events:
|
||||
return fmt.Errorf("more than %d events fired: %v", count, tx.Tx)
|
||||
case ev := <-events:
|
||||
return fmt.Errorf("more than %d events fired: %v", count, ev.Txs)
|
||||
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
// This branch should be "default", but it's a data race between goroutines,
|
||||
// reading the event channel and pushng into it, so better wait a bit ensuring
|
||||
// reading the event channel and pushing into it, so better wait a bit ensuring
|
||||
// really nothing gets injected.
|
||||
}
|
||||
return nil
|
||||
@@ -395,8 +401,8 @@ func TestTransactionDoubleNonce(t *testing.T) {
|
||||
t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
|
||||
}
|
||||
// Ensure the total transaction count is correct
|
||||
if len(pool.all) != 1 {
|
||||
t.Error("expected 1 total transactions, got", len(pool.all))
|
||||
if pool.all.Count() != 1 {
|
||||
t.Error("expected 1 total transactions, got", pool.all.Count())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,8 +424,8 @@ func TestTransactionMissingNonce(t *testing.T) {
|
||||
if pool.queue[addr].Len() != 1 {
|
||||
t.Error("expected 1 queued transaction, got", pool.queue[addr].Len())
|
||||
}
|
||||
if len(pool.all) != 1 {
|
||||
t.Error("expected 1 total transactions, got", len(pool.all))
|
||||
if pool.all.Count() != 1 {
|
||||
t.Error("expected 1 total transactions, got", pool.all.Count())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -482,8 +488,8 @@ func TestTransactionDropping(t *testing.T) {
|
||||
if pool.queue[account].Len() != 3 {
|
||||
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
|
||||
}
|
||||
if len(pool.all) != 6 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 6)
|
||||
if pool.all.Count() != 6 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
|
||||
}
|
||||
pool.lockedReset(nil, nil)
|
||||
if pool.pending[account].Len() != 3 {
|
||||
@@ -492,8 +498,8 @@ func TestTransactionDropping(t *testing.T) {
|
||||
if pool.queue[account].Len() != 3 {
|
||||
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
|
||||
}
|
||||
if len(pool.all) != 6 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 6)
|
||||
if pool.all.Count() != 6 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
|
||||
}
|
||||
// Reduce the balance of the account, and check that invalidated transactions are dropped
|
||||
pool.currentState.AddBalance(account, big.NewInt(-650))
|
||||
@@ -517,8 +523,8 @@ func TestTransactionDropping(t *testing.T) {
|
||||
if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok {
|
||||
t.Errorf("out-of-fund queued transaction present: %v", tx11)
|
||||
}
|
||||
if len(pool.all) != 4 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
|
||||
if pool.all.Count() != 4 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4)
|
||||
}
|
||||
// Reduce the block gas limit, check that invalidated transactions are dropped
|
||||
pool.chain.(*testBlockChain).gasLimit = 100
|
||||
@@ -536,8 +542,8 @@ func TestTransactionDropping(t *testing.T) {
|
||||
if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok {
|
||||
t.Errorf("over-gased queued transaction present: %v", tx11)
|
||||
}
|
||||
if len(pool.all) != 2 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 2)
|
||||
if pool.all.Count() != 2 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -590,8 +596,8 @@ func TestTransactionPostponing(t *testing.T) {
|
||||
if len(pool.queue) != 0 {
|
||||
t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
|
||||
}
|
||||
if len(pool.all) != len(txs) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs))
|
||||
if pool.all.Count() != len(txs) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
|
||||
}
|
||||
pool.lockedReset(nil, nil)
|
||||
if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
|
||||
@@ -600,8 +606,8 @@ func TestTransactionPostponing(t *testing.T) {
|
||||
if len(pool.queue) != 0 {
|
||||
t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
|
||||
}
|
||||
if len(pool.all) != len(txs) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs))
|
||||
if pool.all.Count() != len(txs) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
|
||||
}
|
||||
// Reduce the balance of the account, and check that transactions are reorganised
|
||||
for _, addr := range accs {
|
||||
@@ -650,8 +656,8 @@ func TestTransactionPostponing(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(pool.all) != len(txs)/2 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs)/2)
|
||||
if pool.all.Count() != len(txs)/2 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -669,7 +675,7 @@ func TestTransactionGapFilling(t *testing.T) {
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
events := make(chan TxPreEvent, testTxPoolConfig.AccountQueue+5)
|
||||
events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5)
|
||||
sub := pool.txFeed.Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
@@ -742,8 +748,8 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(pool.all) != int(testTxPoolConfig.AccountQueue) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), testTxPoolConfig.AccountQueue)
|
||||
if pool.all.Count() != int(testTxPoolConfig.AccountQueue) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -920,7 +926,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
events := make(chan TxPreEvent, testTxPoolConfig.AccountQueue+5)
|
||||
events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5)
|
||||
sub := pool.txFeed.Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
@@ -936,8 +942,8 @@ func TestTransactionPendingLimiting(t *testing.T) {
|
||||
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0)
|
||||
}
|
||||
}
|
||||
if len(pool.all) != int(testTxPoolConfig.AccountQueue+5) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), testTxPoolConfig.AccountQueue+5)
|
||||
if pool.all.Count() != int(testTxPoolConfig.AccountQueue+5) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue+5)
|
||||
}
|
||||
if err := validateEvents(events, int(testTxPoolConfig.AccountQueue+5)); err != nil {
|
||||
t.Fatalf("event firing failed: %v", err)
|
||||
@@ -987,8 +993,8 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
|
||||
if len(pool1.queue) != len(pool2.queue) {
|
||||
t.Errorf("queued transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.queue), len(pool2.queue))
|
||||
}
|
||||
if len(pool1.all) != len(pool2.all) {
|
||||
t.Errorf("total transaction count mismatch: one-by-one algo %d, batch algo %d", len(pool1.all), len(pool2.all))
|
||||
if pool1.all.Count() != pool2.all.Count() {
|
||||
t.Errorf("total transaction count mismatch: one-by-one algo %d, batch algo %d", pool1.all.Count(), pool2.all.Count())
|
||||
}
|
||||
if err := validateTxPoolInternals(pool1); err != nil {
|
||||
t.Errorf("pool 1 internal state corrupted: %v", err)
|
||||
@@ -1140,7 +1146,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
|
||||
defer pool.Stop()
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
events := make(chan TxPreEvent, 32)
|
||||
events := make(chan NewTxsEvent, 32)
|
||||
sub := pool.txFeed.Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
@@ -1327,7 +1333,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
|
||||
defer pool.Stop()
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
events := make(chan TxPreEvent, 32)
|
||||
events := make(chan NewTxsEvent, 32)
|
||||
sub := pool.txFeed.Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
@@ -1433,7 +1439,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) {
|
||||
defer pool.Stop()
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
events := make(chan TxPreEvent, 32)
|
||||
events := make(chan NewTxsEvent, 32)
|
||||
sub := pool.txFeed.Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
@@ -1495,7 +1501,7 @@ func TestTransactionReplacement(t *testing.T) {
|
||||
defer pool.Stop()
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
events := make(chan TxPreEvent, 32)
|
||||
events := make(chan NewTxsEvent, 32)
|
||||
sub := pool.txFeed.Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
|
@@ -12,10 +12,11 @@ import (
|
||||
|
||||
var _ = (*receiptMarshaling)(nil)
|
||||
|
||||
// MarshalJSON marshals as JSON.
|
||||
func (r Receipt) MarshalJSON() ([]byte, error) {
|
||||
type Receipt struct {
|
||||
PostState hexutil.Bytes `json:"root"`
|
||||
Status hexutil.Uint `json:"status"`
|
||||
Status hexutil.Uint64 `json:"status"`
|
||||
CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"`
|
||||
Bloom Bloom `json:"logsBloom" gencodec:"required"`
|
||||
Logs []*Log `json:"logs" gencodec:"required"`
|
||||
@@ -25,7 +26,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
var enc Receipt
|
||||
enc.PostState = r.PostState
|
||||
enc.Status = hexutil.Uint(r.Status)
|
||||
enc.Status = hexutil.Uint64(r.Status)
|
||||
enc.CumulativeGasUsed = hexutil.Uint64(r.CumulativeGasUsed)
|
||||
enc.Bloom = r.Bloom
|
||||
enc.Logs = r.Logs
|
||||
@@ -35,10 +36,11 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (r *Receipt) UnmarshalJSON(input []byte) error {
|
||||
type Receipt struct {
|
||||
PostState *hexutil.Bytes `json:"root"`
|
||||
Status *hexutil.Uint `json:"status"`
|
||||
Status *hexutil.Uint64 `json:"status"`
|
||||
CumulativeGasUsed *hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"`
|
||||
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
|
||||
Logs []*Log `json:"logs" gencodec:"required"`
|
||||
@@ -54,7 +56,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
|
||||
r.PostState = *dec.PostState
|
||||
}
|
||||
if dec.Status != nil {
|
||||
r.Status = uint(*dec.Status)
|
||||
r.Status = uint64(*dec.Status)
|
||||
}
|
||||
if dec.CumulativeGasUsed == nil {
|
||||
return errors.New("missing required field 'cumulativeGasUsed' for Receipt")
|
||||
|
@@ -36,17 +36,17 @@ var (
|
||||
|
||||
const (
|
||||
// ReceiptStatusFailed is the status code of a transaction if execution failed.
|
||||
ReceiptStatusFailed = uint(0)
|
||||
ReceiptStatusFailed = uint64(0)
|
||||
|
||||
// ReceiptStatusSuccessful is the status code of a transaction if execution succeeded.
|
||||
ReceiptStatusSuccessful = uint(1)
|
||||
ReceiptStatusSuccessful = uint64(1)
|
||||
)
|
||||
|
||||
// Receipt represents the results of a transaction.
|
||||
type Receipt struct {
|
||||
// Consensus fields
|
||||
PostState []byte `json:"root"`
|
||||
Status uint `json:"status"`
|
||||
Status uint64 `json:"status"`
|
||||
CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"`
|
||||
Bloom Bloom `json:"logsBloom" gencodec:"required"`
|
||||
Logs []*Log `json:"logs" gencodec:"required"`
|
||||
@@ -59,7 +59,7 @@ type Receipt struct {
|
||||
|
||||
type receiptMarshaling struct {
|
||||
PostState hexutil.Bytes
|
||||
Status hexutil.Uint
|
||||
Status hexutil.Uint64
|
||||
CumulativeGasUsed hexutil.Uint64
|
||||
GasUsed hexutil.Uint64
|
||||
}
|
||||
|
@@ -35,15 +35,6 @@ var (
|
||||
ErrInvalidSig = errors.New("invalid transaction v, r, s values")
|
||||
)
|
||||
|
||||
// deriveSigner makes a *best* guess about which signer to use.
|
||||
func deriveSigner(V *big.Int) Signer {
|
||||
if V.Sign() != 0 && isProtectedV(V) {
|
||||
return NewEIP155Signer(deriveChainId(V))
|
||||
} else {
|
||||
return HomesteadSigner{}
|
||||
}
|
||||
}
|
||||
|
||||
type Transaction struct {
|
||||
data txdata
|
||||
// caches
|
||||
|
@@ -43,7 +43,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer {
|
||||
var signer Signer
|
||||
switch {
|
||||
case config.IsEIP155(blockNumber):
|
||||
signer = NewEIP155Signer(config.ChainId)
|
||||
signer = NewEIP155Signer(config.ChainID)
|
||||
case config.IsHomestead(blockNumber):
|
||||
signer = HomesteadSigner{}
|
||||
default:
|
||||
|
@@ -165,28 +165,13 @@ func TestTransactionPriceNonceSort(t *testing.T) {
|
||||
t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
|
||||
}
|
||||
}
|
||||
// Find the previous and next nonce of this account
|
||||
prev, next := i-1, i+1
|
||||
for j := i - 1; j >= 0; j-- {
|
||||
if fromj, _ := Sender(signer, txs[j]); fromi == fromj {
|
||||
prev = j
|
||||
break
|
||||
}
|
||||
}
|
||||
for j := i + 1; j < len(txs); j++ {
|
||||
if fromj, _ := Sender(signer, txs[j]); fromi == fromj {
|
||||
next = j
|
||||
break
|
||||
}
|
||||
}
|
||||
// Make sure that in between the neighbor nonces, the transaction is correctly positioned price wise
|
||||
for j := prev + 1; j < next; j++ {
|
||||
fromj, _ := Sender(signer, txs[j])
|
||||
if j < i && txs[j].GasPrice().Cmp(txi.GasPrice()) < 0 {
|
||||
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", j, fromj[:4], txs[j].GasPrice(), i, fromi[:4], txi.GasPrice())
|
||||
}
|
||||
if j > i && txs[j].GasPrice().Cmp(txi.GasPrice()) > 0 {
|
||||
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) > tx #%d (A=%x P=%v)", j, fromj[:4], txs[j].GasPrice(), i, fromi[:4], txi.GasPrice())
|
||||
|
||||
// If the next tx has different from account, the price must be lower than the current one
|
||||
if i+1 < len(txs) {
|
||||
next := txs[i+1]
|
||||
fromNext, _ := Sender(signer, next)
|
||||
if fromi != fromNext && txi.GasPrice().Cmp(next.GasPrice()) < 0 {
|
||||
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ package vm
|
||||
|
||||
import "errors"
|
||||
|
||||
// List execution errors
|
||||
var (
|
||||
ErrOutOfGas = errors.New("out of gas")
|
||||
ErrCodeStoreOutOfGas = errors.New("contract creation code storage out of gas")
|
||||
|
@@ -31,8 +31,10 @@ import (
|
||||
var emptyCodeHash = crypto.Keccak256Hash(nil)
|
||||
|
||||
type (
|
||||
// CanTransferFunc is the signature of a transfer guard function
|
||||
CanTransferFunc func(StateDB, common.Address, *big.Int) bool
|
||||
TransferFunc func(StateDB, common.Address, common.Address, *big.Int)
|
||||
// TransferFunc is the signature of a transfer function
|
||||
TransferFunc func(StateDB, common.Address, common.Address, *big.Int)
|
||||
// GetHashFunc returns the nth block hash in the blockchain
|
||||
// and is used by the BLOCKHASH EVM op code.
|
||||
GetHashFunc func(uint64) common.Hash
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user