Compare commits
126 Commits
v1.6.2
...
release/1.
Author | SHA1 | Date | |
---|---|---|---|
ab5646c532 | |||
3b25012481 | |||
bd381be9c8 | |||
225de7ca0a | |||
bd01cd7183 | |||
0958770625 | |||
4f7a38001f | |||
65f0e905dd | |||
4b8860a7b4 | |||
34ec9913f6 | |||
f25486c3fb | |||
88b4fe7d21 | |||
5e38f7a664 | |||
4c1d0b164b | |||
48ee7f9de7 | |||
fe13949d9d | |||
138f26c93a | |||
8f12d76a47 | |||
be8f8409bc | |||
a633a2d7ea | |||
67aff49822 | |||
a0aa071ca6 | |||
c7041fe145 | |||
41318f3776 | |||
65b96dc4b9 | |||
8bbd598ef4 | |||
ae11545bc5 | |||
0550957989 | |||
dfd076244d | |||
6dc32e897a | |||
e4301564c2 | |||
bae7565231 | |||
9e5f03b6c4 | |||
bb366271fe | |||
4a741df757 | |||
5421a08d2f | |||
cf611c50b9 | |||
c008176f9f | |||
f3359d5e58 | |||
feb2932706 | |||
ea1d1825a8 | |||
f321ed23fb | |||
413dc1d265 | |||
fdf2184b1e | |||
3c7338d6c8 | |||
ef8d4711d5 | |||
caa00b7e6d | |||
5603eb9116 | |||
78c04c920d | |||
10a45cb59b | |||
cd88f69715 | |||
46d0d04f97 | |||
514659a023 | |||
01c9cf1cb5 | |||
b751cf3901 | |||
b6e99deee9 | |||
d40179f882 | |||
beb708e6d7 | |||
f2c5b2cc1c | |||
a4277450b2 | |||
b664bedcf2 | |||
eebde1a2e2 | |||
b0b3cf2eeb | |||
c98d9b49bf | |||
0042f13d47 | |||
d432688886 | |||
58a1e13e6d | |||
a1f3878ec5 | |||
a20a02ce0b | |||
9a44e1035e | |||
c62d5422bb | |||
9012863ad7 | |||
a5d08c893d | |||
a4e4c76cb3 | |||
7a11e86442 | |||
4a1d516d78 | |||
b6b0e00198 | |||
3d66ba56ef | |||
767dc6c73d | |||
36e7963467 | |||
98e101ef8e | |||
50c18e6eb8 | |||
60e27b51bc | |||
693d9ccbfb | |||
5c53a5be66 | |||
431cf2a1e4 | |||
4f77857f74 | |||
fade09a7ff | |||
b58a501673 | |||
db6e695002 | |||
335abdceb1 | |||
732273094c | |||
b8793edd83 | |||
eb92522278 | |||
061889d4ea | |||
e3dfd55820 | |||
2fefe4baa0 | |||
ac9865791a | |||
80f7c6c299 | |||
bc24b7a912 | |||
1496b3aff6 | |||
1e9f86b49e | |||
65ea913e29 | |||
9a0e433b13 | |||
04d2de9119 | |||
3285a0fda3 | |||
6171d01b11 | |||
cf87713dd4 | |||
ac92d7c411 | |||
d5a79934dc | |||
0424192e61 | |||
9c2882b2e5 | |||
1a0eb903f1 | |||
0036e2a747 | |||
727eadacca | |||
99cba96f26 | |||
f272879e5a | |||
72dd51e25a | |||
799a469000 | |||
f4d81178d8 | |||
310d2e7ef4 | |||
3ecde4e2aa | |||
a355b401db | |||
cba33029a8 | |||
9702badd83 | |||
067dc2cbf5 |
@ -1,3 +1,6 @@
|
||||
.git
|
||||
**/.git
|
||||
**/*_test.go
|
||||
|
||||
build/_workspace
|
||||
build/_bin
|
||||
tests/testdata
|
||||
|
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
[submodule "tests"]
|
||||
path = tests/testdata
|
||||
url = https://github.com/ethereum/tests
|
@ -53,6 +53,7 @@ matrix:
|
||||
- debhelper
|
||||
- dput
|
||||
- gcc-multilib
|
||||
- fakeroot
|
||||
script:
|
||||
# Build for the primary platforms that Trusty can manage
|
||||
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
|
||||
|
7
Makefile
7
Makefile
@ -2,7 +2,7 @@
|
||||
# with Go source code. If you know what GOPATH is then you probably
|
||||
# don't need to bother with make.
|
||||
|
||||
.PHONY: geth android ios geth-cross evm all test clean
|
||||
.PHONY: geth android ios geth-cross swarm evm all test clean
|
||||
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
|
||||
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
|
||||
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
||||
@ -16,6 +16,11 @@ geth:
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||
|
||||
swarm:
|
||||
build/env.sh go run build/ci.go install ./cmd/swarm
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/swarm\" to launch swarm."
|
||||
|
||||
evm:
|
||||
build/env.sh go run build/ci.go install ./cmd/evm
|
||||
@echo "Done building."
|
||||
|
20
README.md
20
README.md
@ -1,4 +1,4 @@
|
||||
## Ethereum Go
|
||||
## Go Ethereum
|
||||
|
||||
Official golang implementation of the Ethereum protocol.
|
||||
|
||||
@ -32,7 +32,7 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
||||
|
||||
| Command | Description |
|
||||
|:----------:|-------------|
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options |
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `disasm` | Bytecode disassembler to convert EVM (Ethereum Virtual Machine) bytecode into more user friendly assembly-like opcodes (e.g. `echo "6001" | disasm`). For details on the individual opcodes, please see pages 22-30 of the [Ethereum Yellow Paper](http://gavwood.com/paper.pdf). |
|
||||
@ -102,6 +102,22 @@ over between the main network and test network, you should make sure to always u
|
||||
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
|
||||
separate the two networks and will not make any accounts available between them.*
|
||||
|
||||
### Configuration
|
||||
|
||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a configuration file via:
|
||||
|
||||
```
|
||||
$ geth --config /path/to/your_config.toml
|
||||
```
|
||||
|
||||
To get an idea how the file should look like you can use the `dumpconfig` subcommand to export your existing configuration:
|
||||
|
||||
```
|
||||
$ geth --your-favourite-flags dumpconfig
|
||||
```
|
||||
|
||||
*Note: This works only with geth v1.6.0 and above*
|
||||
|
||||
#### Docker quick start
|
||||
|
||||
One of the quickest ways to get Ethereum up and running on your machine is by using Docker:
|
||||
|
@ -17,11 +17,9 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
@ -67,7 +65,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
}
|
||||
method = m
|
||||
}
|
||||
arguments, err := method.pack(method, args...)
|
||||
arguments, err := method.pack(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -78,199 +76,6 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
return append(method.Id(), arguments...), nil
|
||||
}
|
||||
|
||||
// toGoSliceType parses the input and casts it to the proper slice defined by the ABI
|
||||
// argument in T.
|
||||
func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
index := i * 32
|
||||
// The slice must, at very least be large enough for the index+32 which is exactly the size required
|
||||
// for the [offset in output, size of offset].
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
|
||||
}
|
||||
elem := t.Type.Elem
|
||||
|
||||
// first we need to create a slice of the type
|
||||
var refSlice reflect.Value
|
||||
switch elem.T {
|
||||
case IntTy, UintTy, BoolTy:
|
||||
// create a new reference slice matching the element type
|
||||
switch t.Type.Kind {
|
||||
case reflect.Bool:
|
||||
refSlice = reflect.ValueOf([]bool(nil))
|
||||
case reflect.Uint8:
|
||||
refSlice = reflect.ValueOf([]uint8(nil))
|
||||
case reflect.Uint16:
|
||||
refSlice = reflect.ValueOf([]uint16(nil))
|
||||
case reflect.Uint32:
|
||||
refSlice = reflect.ValueOf([]uint32(nil))
|
||||
case reflect.Uint64:
|
||||
refSlice = reflect.ValueOf([]uint64(nil))
|
||||
case reflect.Int8:
|
||||
refSlice = reflect.ValueOf([]int8(nil))
|
||||
case reflect.Int16:
|
||||
refSlice = reflect.ValueOf([]int16(nil))
|
||||
case reflect.Int32:
|
||||
refSlice = reflect.ValueOf([]int32(nil))
|
||||
case reflect.Int64:
|
||||
refSlice = reflect.ValueOf([]int64(nil))
|
||||
default:
|
||||
refSlice = reflect.ValueOf([]*big.Int(nil))
|
||||
}
|
||||
case AddressTy: // address must be of slice Address
|
||||
refSlice = reflect.ValueOf([]common.Address(nil))
|
||||
case HashTy: // hash must be of slice hash
|
||||
refSlice = reflect.ValueOf([]common.Hash(nil))
|
||||
case FixedBytesTy:
|
||||
refSlice = reflect.ValueOf([][]byte(nil))
|
||||
default: // no other types are supported
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
|
||||
}
|
||||
|
||||
var slice []byte
|
||||
var size int
|
||||
var offset int
|
||||
if t.Type.IsSlice {
|
||||
// get the offset which determines the start of this array ...
|
||||
offset = int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
|
||||
slice = output[offset:]
|
||||
// ... starting with the size of the array in elements ...
|
||||
size = int(binary.BigEndian.Uint64(slice[24:32]))
|
||||
slice = slice[32:]
|
||||
// ... and make sure that we've at the very least the amount of bytes
|
||||
// available in the buffer.
|
||||
if size*32 > len(slice) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), offset+32+size*32)
|
||||
}
|
||||
|
||||
// reslice to match the required size
|
||||
slice = slice[:size*32]
|
||||
} else if t.Type.IsArray {
|
||||
//get the number of elements in the array
|
||||
size = t.Type.SliceSize
|
||||
|
||||
//check to make sure array size matches up
|
||||
if index+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), index+32*size)
|
||||
}
|
||||
//slice is there for a fixed amount of times
|
||||
slice = output[index : index+size*32]
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
var (
|
||||
inter interface{} // interface type
|
||||
returnOutput = slice[i*32 : i*32+32] // the return output
|
||||
)
|
||||
// set inter to the correct type (cast)
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
inter = readInteger(t.Type.Kind, returnOutput)
|
||||
case BoolTy:
|
||||
inter = !allZero(returnOutput)
|
||||
case AddressTy:
|
||||
inter = common.BytesToAddress(returnOutput)
|
||||
case HashTy:
|
||||
inter = common.BytesToHash(returnOutput)
|
||||
case FixedBytesTy:
|
||||
inter = returnOutput
|
||||
}
|
||||
// append the item to our reflect slice
|
||||
refSlice = reflect.Append(refSlice, reflect.ValueOf(inter))
|
||||
}
|
||||
|
||||
// return the interface
|
||||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(b[len(b)-1])
|
||||
case reflect.Uint16:
|
||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||
case reflect.Uint32:
|
||||
return binary.BigEndian.Uint32(b[len(b)-4:])
|
||||
case reflect.Uint64:
|
||||
return binary.BigEndian.Uint64(b[len(b)-8:])
|
||||
case reflect.Int8:
|
||||
return int8(b[len(b)-1])
|
||||
case reflect.Int16:
|
||||
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
|
||||
case reflect.Int32:
|
||||
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
|
||||
case reflect.Int64:
|
||||
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||
default:
|
||||
return new(big.Int).SetBytes(b)
|
||||
}
|
||||
}
|
||||
|
||||
func allZero(b []byte) bool {
|
||||
for _, byte := range b {
|
||||
if byte != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// toGoType parses the input and casts it to the proper type defined by the ABI
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// we need to treat slices differently
|
||||
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy && t.Type.T != FunctionTy {
|
||||
return toGoSlice(i, t, output)
|
||||
}
|
||||
|
||||
index := i * 32
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
|
||||
}
|
||||
|
||||
// Parse the given index output and check whether we need to read
|
||||
// a different offset and length based on the type (i.e. string, bytes)
|
||||
var returnOutput []byte
|
||||
switch t.Type.T {
|
||||
case StringTy, BytesTy: // variable arrays are written at the end of the return bytes
|
||||
// parse offset from which we should start reading
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
|
||||
}
|
||||
// parse the size up until we should be reading
|
||||
size := int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
|
||||
}
|
||||
|
||||
// get the bytes for this return value
|
||||
returnOutput = output[offset+32 : offset+32+size]
|
||||
default:
|
||||
returnOutput = output[index : index+32]
|
||||
}
|
||||
|
||||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.Type.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return !allZero(returnOutput), nil
|
||||
case AddressTy:
|
||||
return common.BytesToAddress(returnOutput), nil
|
||||
case HashTy:
|
||||
return common.BytesToHash(returnOutput), nil
|
||||
case BytesTy, FixedBytesTy, FunctionTy:
|
||||
return returnOutput, nil
|
||||
case StringTy:
|
||||
return string(returnOutput), nil
|
||||
}
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
|
||||
}
|
||||
|
||||
// these variable are used to determine certain types during type assertion for
|
||||
// assignment.
|
||||
var (
|
||||
|
@ -48,412 +48,6 @@ func pad(input []byte, size int, left bool) []byte {
|
||||
return common.RightPadBytes(input, size)
|
||||
}
|
||||
|
||||
func TestTypeCheck(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), ""},
|
||||
{"int", big.NewInt(1), ""},
|
||||
{"uint30", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type []uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"address[]", []common.Address{{1}}, ""},
|
||||
{"address[1]", []common.Address{{1}}, ""},
|
||||
{"address[1]", [1]common.Address{{1}}, ""},
|
||||
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, ""},
|
||||
{"bytes", common.Hash{1}, ""},
|
||||
{"string", "hello world", ""},
|
||||
{"bytes32[]", [][32]byte{{}}, ""},
|
||||
{"function", [24]byte{}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
err = typeCheck(typ, reflect.ValueOf(test.input))
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMethodUnpack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
def string // definition of the **output** ABI params
|
||||
marshalledOutput []byte // evm return data
|
||||
expectedOut interface{} // the expected output
|
||||
outVar string // the output variable (e.g. uint32, *big.Int, etc)
|
||||
err string // empty or error if expected
|
||||
}{
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
uint32(1),
|
||||
"uint32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
int32(1),
|
||||
"int32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "address" } ]`,
|
||||
pad(pad([]byte{1}, 20, false), 32, true),
|
||||
common.Address{1},
|
||||
"address",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"bytes",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"hash",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"interface",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "function" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
[24]byte{1},
|
||||
"function",
|
||||
"",
|
||||
},
|
||||
} {
|
||||
abiDefinition := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(abiDefinition))
|
||||
if err != nil {
|
||||
t.Errorf("%d failed. %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var outvar interface{}
|
||||
switch test.outVar {
|
||||
case "uint8":
|
||||
var v uint8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint16":
|
||||
var v uint16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint32":
|
||||
var v uint32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint64":
|
||||
var v uint64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int8":
|
||||
var v int8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int16":
|
||||
var v int16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int32":
|
||||
var v int32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int64":
|
||||
var v int64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "*big.Int":
|
||||
var v *big.Int
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "address":
|
||||
var v common.Address
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "bytes":
|
||||
var v []byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "hash":
|
||||
var v common.Hash
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "function":
|
||||
var v [24]byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "interface":
|
||||
err = abi.Unpack(&outvar, "method", test.marshalledOutput)
|
||||
default:
|
||||
t.Errorf("unsupported type '%v' please add it to the switch statement in this test", test.outVar)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// bit of an ugly hack for hash type but I don't feel like finding a proper solution
|
||||
if test.outVar == "hash" {
|
||||
tmp := outvar.(common.Hash) // without assignment it's unaddressable
|
||||
outvar = tmp[:]
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedOut, outvar) {
|
||||
t.Errorf("%d failed. Output error: expected %v, got %v", i, test.expectedOut, outvar)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceArrayOutput(t *testing.T) {
|
||||
var (
|
||||
var1 = new([1]uint32)
|
||||
var2 = new([1]uint32)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint32[1]"}, {"type":"uint32[1]"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if *var1 != [1]uint32{1} {
|
||||
t.Error("expected var1 to be [1], got", *var1)
|
||||
}
|
||||
if *var2 != [1]uint32{2} {
|
||||
t.Error("expected var2 to be [2], got", *var2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
||||
input interface{}
|
||||
output []byte
|
||||
}{
|
||||
{"uint16", uint16(2), pad([]byte{2}, 32, true)},
|
||||
{"uint16[]", []uint16{1, 2}, formatSliceOutput([]byte{1}, []byte{2})},
|
||||
{"bytes20", [20]byte{1}, pad([]byte{1}, 32, false)},
|
||||
{"uint256[]", []*big.Int{big.NewInt(1), big.NewInt(2)}, formatSliceOutput([]byte{1}, []byte{2})},
|
||||
{"address[]", []common.Address{{1}, {2}}, formatSliceOutput(pad([]byte{1}, 20, false), pad([]byte{2}, 20, false))},
|
||||
{"bytes32[]", []common.Hash{{1}, {2}}, formatSliceOutput(pad([]byte{1}, 32, false), pad([]byte{2}, 32, false))},
|
||||
{"function", [24]byte{1}, pad([]byte{1}, 32, false)},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatal("unexpected pack error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMethodPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sig := abi.Methods["slice"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err := abi.Pack("slice", []uint32{1, 2})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||
sig = abi.Methods["sliceAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||
sig = abi.Methods["sliceMultiAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = abi.Methods["slice256"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
const jsondata = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
@ -843,399 +437,3 @@ func TestBareEvents(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithStruct(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if inter.Int == nil || inter.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", inter.Int)
|
||||
}
|
||||
|
||||
if inter.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", inter.String)
|
||||
}
|
||||
|
||||
var reversed struct {
|
||||
String string
|
||||
Int *big.Int
|
||||
}
|
||||
|
||||
err = abi.Unpack(&reversed, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if reversed.Int == nil || reversed.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", reversed.Int)
|
||||
}
|
||||
|
||||
if reversed.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", reversed.String)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithSlice(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter []interface{}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(inter) != 2 {
|
||||
t.Fatal("expected 2 results got", len(inter))
|
||||
}
|
||||
|
||||
if num, ok := inter[0].(*big.Int); !ok || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected index 0 to be 1 got", num)
|
||||
}
|
||||
|
||||
if str, ok := inter[1].(string); !ok || str != stringOut {
|
||||
t.Error("expected index 1 to be", stringOut, "got", str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalArrays(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "bytes32", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "constant" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
output := common.LeftPadBytes([]byte{1}, 32)
|
||||
|
||||
var bytes10 [10]byte
|
||||
err = abi.Unpack(&bytes10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var bytes32 [32]byte
|
||||
err = abi.Unpack(&bytes32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(bytes32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
type (
|
||||
B10 [10]byte
|
||||
B32 [32]byte
|
||||
)
|
||||
|
||||
var b10 B10
|
||||
err = abi.Unpack(&b10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var b32 B32
|
||||
err = abi.Unpack(&b32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(b32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
output[10] = 1
|
||||
var shortAssignLong [32]byte
|
||||
err = abi.Unpack(&shortAssignLong, "bytes10", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(output, shortAssignLong[:]) {
|
||||
t.Errorf("expected %x to be %x", shortAssignLong, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
{ "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] },
|
||||
{ "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] },
|
||||
{ "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||
{ "name" : "intArraySingle", "constant" : false, "outputs": [ { "type": "uint256[3]" } ] },
|
||||
{ "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] },
|
||||
{ "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||
{ "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
|
||||
// marshal int
|
||||
var Int *big.Int
|
||||
err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if Int == nil || Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", Int)
|
||||
}
|
||||
|
||||
// marshal bool
|
||||
var Bool bool
|
||||
err = abi.Unpack(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !Bool {
|
||||
t.Error("expected Bool to be true")
|
||||
}
|
||||
|
||||
// marshal dynamic bytes max length 32
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut := common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var Bytes []byte
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 64
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 63
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 63)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes output empty
|
||||
err = abi.Unpack(&Bytes, "bytes", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal dynamic bytes length 5
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, []byte("hello")) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes length 5
|
||||
buff.Reset()
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
var hash common.Hash
|
||||
err = abi.Unpack(&hash, "fixed", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
helloHash := common.BytesToHash(common.RightPadBytes([]byte("hello"), 32))
|
||||
if hash != helloHash {
|
||||
t.Errorf("Expected %x to equal %x", hash, helloHash)
|
||||
}
|
||||
|
||||
// marshal error
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
err = abi.Unpack(&Bytes, "multi", make([]byte, 64))
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal mixed bytes
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
fixed := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
buff.Write(fixed)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var out []interface{}
|
||||
err = abi.Unpack(&out, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytesOut, out[0].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", bytesOut, out[0])
|
||||
}
|
||||
|
||||
if !bytes.Equal(fixed, out[1].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", fixed, out[1])
|
||||
}
|
||||
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
// marshal int array
|
||||
var intArray [3]*big.Int
|
||||
err = abi.Unpack(&intArray, "intArraySingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
var testAgainstIntArray [3]*big.Int
|
||||
testAgainstIntArray[0] = big.NewInt(1)
|
||||
testAgainstIntArray[1] = big.NewInt(2)
|
||||
testAgainstIntArray[2] = big.NewInt(3)
|
||||
|
||||
for i, Int := range intArray {
|
||||
if Int.Cmp(testAgainstIntArray[i]) != 0 {
|
||||
t.Errorf("expected %v, got %v", testAgainstIntArray[i], Int)
|
||||
}
|
||||
}
|
||||
// marshal address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
|
||||
var outAddr []common.Address
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if len(outAddr) != 1 {
|
||||
t.Fatal("expected 1 item, got", len(outAddr))
|
||||
}
|
||||
|
||||
if outAddr[0] != (common.Address{1}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{1}, outAddr[0])
|
||||
}
|
||||
|
||||
// marshal multiple address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000200000000000000000000000000000000000000"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000300000000000000000000000000000000000000"))
|
||||
|
||||
var outAddrStruct struct {
|
||||
A []common.Address
|
||||
B []common.Address
|
||||
}
|
||||
err = abi.Unpack(&outAddrStruct, "addressSliceDouble", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if len(outAddrStruct.A) != 1 {
|
||||
t.Fatal("expected 1 item, got", len(outAddrStruct.A))
|
||||
}
|
||||
|
||||
if outAddrStruct.A[0] != (common.Address{1}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{1}, outAddrStruct.A[0])
|
||||
}
|
||||
|
||||
if len(outAddrStruct.B) != 2 {
|
||||
t.Fatal("expected 1 item, got", len(outAddrStruct.B))
|
||||
}
|
||||
|
||||
if outAddrStruct.B[0] != (common.Address{2}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{2}, outAddrStruct.B[0])
|
||||
}
|
||||
if outAddrStruct.B[1] != (common.Address{3}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{3}, outAddrStruct.B[1])
|
||||
}
|
||||
|
||||
// marshal invalid address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100"))
|
||||
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Fatal("expected error:", err)
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ func (b *SimulatedBackend) Rollback() {
|
||||
func (b *SimulatedBackend) rollback() {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
}
|
||||
|
||||
// CodeAt returns the code associated with a certain account in the blockchain.
|
||||
@ -279,7 +279,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
block.AddTx(tx)
|
||||
})
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -17,10 +17,15 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
errBadBool = errors.New("abi: improperly encoded boolean value")
|
||||
)
|
||||
|
||||
// formatSliceString formats the reflection kind with the given slice size
|
||||
// and returns a formatted string representation.
|
||||
func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
||||
|
@ -39,7 +39,7 @@ type Method struct {
|
||||
Outputs []Argument
|
||||
}
|
||||
|
||||
func (m Method) pack(method Method, args ...interface{}) ([]byte, error) {
|
||||
func (method Method) pack(args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
if len(args) != len(method.Inputs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
|
||||
|
@ -62,19 +62,6 @@ func U256(n *big.Int) []byte {
|
||||
return math.PaddedBigBytes(math.U256(n), 32)
|
||||
}
|
||||
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||
func packNum(value reflect.Value) []byte {
|
||||
switch kind := value.Kind(); kind {
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return U256(new(big.Int).SetUint64(value.Uint()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checks whether the given reflect value is signed. This also works for slices with a number type
|
||||
func isSigned(v reflect.Value) bool {
|
||||
switch v.Type() {
|
||||
|
@ -18,7 +18,6 @@ package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
@ -34,43 +33,6 @@ func TestNumberTypes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackNumber(t *testing.T) {
|
||||
tests := []struct {
|
||||
value reflect.Value
|
||||
packed []byte
|
||||
}{
|
||||
// Protocol limits
|
||||
{reflect.ValueOf(0), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{reflect.ValueOf(1), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
|
||||
{reflect.ValueOf(-1), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}},
|
||||
|
||||
// Type corner cases
|
||||
{reflect.ValueOf(uint8(math.MaxUint8)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255}},
|
||||
{reflect.ValueOf(uint16(math.MaxUint16)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255}},
|
||||
{reflect.ValueOf(uint32(math.MaxUint32)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255}},
|
||||
{reflect.ValueOf(uint64(math.MaxUint64)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255}},
|
||||
|
||||
{reflect.ValueOf(int8(math.MaxInt8)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127}},
|
||||
{reflect.ValueOf(int16(math.MaxInt16)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255}},
|
||||
{reflect.ValueOf(int32(math.MaxInt32)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255}},
|
||||
{reflect.ValueOf(int64(math.MaxInt64)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255}},
|
||||
|
||||
{reflect.ValueOf(int8(math.MinInt8)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128}},
|
||||
{reflect.ValueOf(int16(math.MinInt16)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0}},
|
||||
{reflect.ValueOf(int32(math.MinInt32)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0, 0, 0}},
|
||||
{reflect.ValueOf(int64(math.MinInt64)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0, 0, 0, 0, 0, 0, 0}},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
packed := packNum(tt.value)
|
||||
if !bytes.Equal(packed, tt.packed) {
|
||||
t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed)
|
||||
}
|
||||
}
|
||||
if packed := packNum(reflect.ValueOf("string")); packed != nil {
|
||||
t.Errorf("expected 'string' to pack to nil. got %x instead", packed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigned(t *testing.T) {
|
||||
if isSigned(reflect.ValueOf(uint(10))) {
|
||||
t.Error("signed")
|
||||
|
@ -17,6 +17,7 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -59,8 +60,20 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32)
|
||||
}
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||
func packNum(value reflect.Value) []byte {
|
||||
switch kind := value.Kind(); kind {
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return U256(new(big.Int).SetUint64(value.Uint()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
}
|
||||
return nil
|
||||
}
|
441
accounts/abi/pack_test.go
Normal file
441
accounts/abi/pack_test.go
Normal file
@ -0,0 +1,441 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
||||
input interface{}
|
||||
output []byte
|
||||
}{
|
||||
{
|
||||
"uint8",
|
||||
uint8(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint8[]",
|
||||
[]uint8{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint16",
|
||||
uint16(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint16[]",
|
||||
[]uint16{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint32",
|
||||
uint32(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint32[]",
|
||||
[]uint32{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint64",
|
||||
uint64(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint64[]",
|
||||
[]uint64{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint256",
|
||||
big.NewInt(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint256[]",
|
||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int8",
|
||||
int8(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int8[]",
|
||||
[]int8{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int16",
|
||||
int16(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int16[]",
|
||||
[]int16{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int32",
|
||||
int32(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int32[]",
|
||||
[]int32{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int64",
|
||||
int64(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int64[]",
|
||||
[]int64{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int256",
|
||||
big.NewInt(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int256[]",
|
||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"bytes1",
|
||||
[1]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes2",
|
||||
[2]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes3",
|
||||
[3]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes4",
|
||||
[4]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes5",
|
||||
[5]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes6",
|
||||
[6]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes7",
|
||||
[7]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes8",
|
||||
[8]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes9",
|
||||
[9]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes10",
|
||||
[10]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes11",
|
||||
[11]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes12",
|
||||
[12]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes13",
|
||||
[13]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes14",
|
||||
[14]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes15",
|
||||
[15]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes16",
|
||||
[16]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes17",
|
||||
[17]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes18",
|
||||
[18]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes19",
|
||||
[19]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes20",
|
||||
[20]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes21",
|
||||
[21]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes22",
|
||||
[22]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes23",
|
||||
[23]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes24",
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes24",
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes25",
|
||||
[25]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes26",
|
||||
[26]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes27",
|
||||
[27]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes28",
|
||||
[28]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes29",
|
||||
[29]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes30",
|
||||
[30]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes31",
|
||||
[31]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes32",
|
||||
[32]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"address[]",
|
||||
[]common.Address{{1}, {2}},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes32[]",
|
||||
[]common.Hash{{1}, {2}},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"function",
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"string",
|
||||
"foobar",
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatal("unexpected pack error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMethodPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sig := abi.Methods["slice"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err := abi.Pack("slice", []uint32{1, 2})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||
sig = abi.Methods["sliceAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||
sig = abi.Methods["sliceMultiAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = abi.Methods["slice256"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackNumber(t *testing.T) {
|
||||
tests := []struct {
|
||||
value reflect.Value
|
||||
packed []byte
|
||||
}{
|
||||
// Protocol limits
|
||||
{reflect.ValueOf(0), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")},
|
||||
{reflect.ValueOf(1), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")},
|
||||
{reflect.ValueOf(-1), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")},
|
||||
|
||||
// Type corner cases
|
||||
{reflect.ValueOf(uint8(math.MaxUint8)), common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000ff")},
|
||||
{reflect.ValueOf(uint16(math.MaxUint16)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000ffff")},
|
||||
{reflect.ValueOf(uint32(math.MaxUint32)), common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000ffffffff")},
|
||||
{reflect.ValueOf(uint64(math.MaxUint64)), common.Hex2Bytes("000000000000000000000000000000000000000000000000ffffffffffffffff")},
|
||||
|
||||
{reflect.ValueOf(int8(math.MaxInt8)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000007f")},
|
||||
{reflect.ValueOf(int16(math.MaxInt16)), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000007fff")},
|
||||
{reflect.ValueOf(int32(math.MaxInt32)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000007fffffff")},
|
||||
{reflect.ValueOf(int64(math.MaxInt64)), common.Hex2Bytes("0000000000000000000000000000000000000000000000007fffffffffffffff")},
|
||||
|
||||
{reflect.ValueOf(int8(math.MinInt8)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80")},
|
||||
{reflect.ValueOf(int16(math.MinInt16)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8000")},
|
||||
{reflect.ValueOf(int32(math.MinInt32)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000")},
|
||||
{reflect.ValueOf(int64(math.MinInt64)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffff8000000000000000")},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
packed := packNum(tt.value)
|
||||
if !bytes.Equal(packed, tt.packed) {
|
||||
t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed)
|
||||
}
|
||||
}
|
||||
if packed := packNum(reflect.ValueOf("string")); packed != nil {
|
||||
t.Errorf("expected 'string' to pack to nil. got %x instead", packed)
|
||||
}
|
||||
}
|
@ -32,30 +32,30 @@ func indirect(v reflect.Value) reflect.Value {
|
||||
|
||||
// reflectIntKind returns the reflect using the given size and
|
||||
// unsignedness.
|
||||
func reflectIntKind(unsigned bool, size int) reflect.Kind {
|
||||
func reflectIntKindAndType(unsigned bool, size int) (reflect.Kind, reflect.Type) {
|
||||
switch size {
|
||||
case 8:
|
||||
if unsigned {
|
||||
return reflect.Uint8
|
||||
return reflect.Uint8, uint8_t
|
||||
}
|
||||
return reflect.Int8
|
||||
return reflect.Int8, int8_t
|
||||
case 16:
|
||||
if unsigned {
|
||||
return reflect.Uint16
|
||||
return reflect.Uint16, uint16_t
|
||||
}
|
||||
return reflect.Int16
|
||||
return reflect.Int16, int16_t
|
||||
case 32:
|
||||
if unsigned {
|
||||
return reflect.Uint32
|
||||
return reflect.Uint32, uint32_t
|
||||
}
|
||||
return reflect.Int32
|
||||
return reflect.Int32, int32_t
|
||||
case 64:
|
||||
if unsigned {
|
||||
return reflect.Uint64
|
||||
return reflect.Uint64, uint64_t
|
||||
}
|
||||
return reflect.Int64
|
||||
return reflect.Int64, int64_t
|
||||
}
|
||||
return reflect.Ptr
|
||||
return reflect.Ptr, big_t
|
||||
}
|
||||
|
||||
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
|
||||
|
@ -33,7 +33,7 @@ const (
|
||||
FixedBytesTy
|
||||
BytesTy
|
||||
HashTy
|
||||
FixedpointTy
|
||||
FixedPointTy
|
||||
FunctionTy
|
||||
)
|
||||
|
||||
@ -126,13 +126,11 @@ func NewType(t string) (typ Type, err error) {
|
||||
|
||||
switch varType {
|
||||
case "int":
|
||||
typ.Kind = reflectIntKind(false, varSize)
|
||||
typ.Type = big_t
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = IntTy
|
||||
case "uint":
|
||||
typ.Kind = reflectIntKind(true, varSize)
|
||||
typ.Type = ubig_t
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = UintTy
|
||||
case "bool":
|
||||
|
@ -17,8 +17,11 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// typeWithoutStringer is a alias for the Type type which simply doesn't implement
|
||||
@ -31,26 +34,44 @@ func TestTypeRegexp(t *testing.T) {
|
||||
blob string
|
||||
kind Type
|
||||
}{
|
||||
{"int", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: big_t, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"bool", Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}},
|
||||
{"bool[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Bool, T: BoolTy, Elem: &Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}},
|
||||
{"bool[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Bool, T: BoolTy, Elem: &Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"int16", Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||
{"int32", Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||
{"int64", Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||
{"int256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"int32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"uint", Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"uint32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint32, Type: ubig_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: big_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint32, Type: ubig_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: big_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"bytes", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}},
|
||||
{"bytes32", Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"int8[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint16", Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||
{"uint32", Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||
{"uint64", Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"string", Type{Kind: reflect.String, Size: -1, T: StringTy, stringKind: "string"}},
|
||||
{"string[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[2]"}},
|
||||
@ -76,3 +97,59 @@ func TestTypeRegexp(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeCheck(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), ""},
|
||||
{"int", big.NewInt(1), ""},
|
||||
{"uint30", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type []uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"address[]", []common.Address{{1}}, ""},
|
||||
{"address[1]", []common.Address{{1}}, ""},
|
||||
{"address[1]", [1]common.Address{{1}}, ""},
|
||||
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, ""},
|
||||
{"bytes", common.Hash{1}, ""},
|
||||
{"string", "hello world", ""},
|
||||
{"bytes32[]", [][32]byte{{}}, ""},
|
||||
{"function", [24]byte{}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
err = typeCheck(typ, reflect.ValueOf(test.input))
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
235
accounts/abi/unpack.go
Normal file
235
accounts/abi/unpack.go
Normal file
@ -0,0 +1,235 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// toGoSliceType parses the input and casts it to the proper slice defined by the ABI
|
||||
// argument in T.
|
||||
func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
index := i * 32
|
||||
// The slice must, at very least be large enough for the index+32 which is exactly the size required
|
||||
// for the [offset in output, size of offset].
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
|
||||
}
|
||||
elem := t.Type.Elem
|
||||
|
||||
// first we need to create a slice of the type
|
||||
var refSlice reflect.Value
|
||||
switch elem.T {
|
||||
case IntTy, UintTy, BoolTy:
|
||||
// create a new reference slice matching the element type
|
||||
switch t.Type.Kind {
|
||||
case reflect.Bool:
|
||||
refSlice = reflect.ValueOf([]bool(nil))
|
||||
case reflect.Uint8:
|
||||
refSlice = reflect.ValueOf([]uint8(nil))
|
||||
case reflect.Uint16:
|
||||
refSlice = reflect.ValueOf([]uint16(nil))
|
||||
case reflect.Uint32:
|
||||
refSlice = reflect.ValueOf([]uint32(nil))
|
||||
case reflect.Uint64:
|
||||
refSlice = reflect.ValueOf([]uint64(nil))
|
||||
case reflect.Int8:
|
||||
refSlice = reflect.ValueOf([]int8(nil))
|
||||
case reflect.Int16:
|
||||
refSlice = reflect.ValueOf([]int16(nil))
|
||||
case reflect.Int32:
|
||||
refSlice = reflect.ValueOf([]int32(nil))
|
||||
case reflect.Int64:
|
||||
refSlice = reflect.ValueOf([]int64(nil))
|
||||
default:
|
||||
refSlice = reflect.ValueOf([]*big.Int(nil))
|
||||
}
|
||||
case AddressTy: // address must be of slice Address
|
||||
refSlice = reflect.ValueOf([]common.Address(nil))
|
||||
case HashTy: // hash must be of slice hash
|
||||
refSlice = reflect.ValueOf([]common.Hash(nil))
|
||||
case FixedBytesTy:
|
||||
refSlice = reflect.ValueOf([][]byte(nil))
|
||||
default: // no other types are supported
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
|
||||
}
|
||||
|
||||
var slice []byte
|
||||
var size int
|
||||
var offset int
|
||||
if t.Type.IsSlice {
|
||||
// get the offset which determines the start of this array ...
|
||||
offset = int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
|
||||
slice = output[offset:]
|
||||
// ... starting with the size of the array in elements ...
|
||||
size = int(binary.BigEndian.Uint64(slice[24:32]))
|
||||
slice = slice[32:]
|
||||
// ... and make sure that we've at the very least the amount of bytes
|
||||
// available in the buffer.
|
||||
if size*32 > len(slice) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), offset+32+size*32)
|
||||
}
|
||||
|
||||
// reslice to match the required size
|
||||
slice = slice[:size*32]
|
||||
} else if t.Type.IsArray {
|
||||
//get the number of elements in the array
|
||||
size = t.Type.SliceSize
|
||||
|
||||
//check to make sure array size matches up
|
||||
if index+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), index+32*size)
|
||||
}
|
||||
//slice is there for a fixed amount of times
|
||||
slice = output[index : index+size*32]
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
var (
|
||||
inter interface{} // interface type
|
||||
returnOutput = slice[i*32 : i*32+32] // the return output
|
||||
err error
|
||||
)
|
||||
// set inter to the correct type (cast)
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
inter = readInteger(t.Type.Kind, returnOutput)
|
||||
case BoolTy:
|
||||
inter, err = readBool(returnOutput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case AddressTy:
|
||||
inter = common.BytesToAddress(returnOutput)
|
||||
case HashTy:
|
||||
inter = common.BytesToHash(returnOutput)
|
||||
case FixedBytesTy:
|
||||
inter = returnOutput
|
||||
}
|
||||
// append the item to our reflect slice
|
||||
refSlice = reflect.Append(refSlice, reflect.ValueOf(inter))
|
||||
}
|
||||
|
||||
// return the interface
|
||||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(b[len(b)-1])
|
||||
case reflect.Uint16:
|
||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||
case reflect.Uint32:
|
||||
return binary.BigEndian.Uint32(b[len(b)-4:])
|
||||
case reflect.Uint64:
|
||||
return binary.BigEndian.Uint64(b[len(b)-8:])
|
||||
case reflect.Int8:
|
||||
return int8(b[len(b)-1])
|
||||
case reflect.Int16:
|
||||
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
|
||||
case reflect.Int32:
|
||||
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
|
||||
case reflect.Int64:
|
||||
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||
default:
|
||||
return new(big.Int).SetBytes(b)
|
||||
}
|
||||
}
|
||||
|
||||
func readBool(word []byte) (bool, error) {
|
||||
if len(word) != 32 {
|
||||
return false, fmt.Errorf("abi: fatal error: incorrect word length")
|
||||
}
|
||||
|
||||
for i, b := range word {
|
||||
if b != 0 && i != 31 {
|
||||
return false, errBadBool
|
||||
}
|
||||
}
|
||||
switch word[31] {
|
||||
case 0:
|
||||
return false, nil
|
||||
case 1:
|
||||
return true, nil
|
||||
default:
|
||||
return false, errBadBool
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// toGoType parses the input and casts it to the proper type defined by the ABI
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// we need to treat slices differently
|
||||
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy && t.Type.T != FunctionTy {
|
||||
return toGoSlice(i, t, output)
|
||||
}
|
||||
|
||||
index := i * 32
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
|
||||
}
|
||||
|
||||
// Parse the given index output and check whether we need to read
|
||||
// a different offset and length based on the type (i.e. string, bytes)
|
||||
var returnOutput []byte
|
||||
switch t.Type.T {
|
||||
case StringTy, BytesTy: // variable arrays are written at the end of the return bytes
|
||||
// parse offset from which we should start reading
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
|
||||
}
|
||||
// parse the size up until we should be reading
|
||||
size := int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
|
||||
}
|
||||
|
||||
// get the bytes for this return value
|
||||
returnOutput = output[offset+32 : offset+32+size]
|
||||
default:
|
||||
returnOutput = output[index : index+32]
|
||||
}
|
||||
|
||||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.Type.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return readBool(returnOutput)
|
||||
case AddressTy:
|
||||
return common.BytesToAddress(returnOutput), nil
|
||||
case HashTy:
|
||||
return common.BytesToHash(returnOutput), nil
|
||||
case BytesTy, FixedBytesTy, FunctionTy:
|
||||
return returnOutput, nil
|
||||
case StringTy:
|
||||
return string(returnOutput), nil
|
||||
}
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
|
||||
}
|
681
accounts/abi/unpack_test.go
Normal file
681
accounts/abi/unpack_test.go
Normal file
@ -0,0 +1,681 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestSimpleMethodUnpack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
def string // definition of the **output** ABI params
|
||||
marshalledOutput []byte // evm return data
|
||||
expectedOut interface{} // the expected output
|
||||
outVar string // the output variable (e.g. uint32, *big.Int, etc)
|
||||
err string // empty or error if expected
|
||||
}{
|
||||
{
|
||||
`[ { "type": "bool" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
bool(true),
|
||||
"bool",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
uint32(1),
|
||||
"uint32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
int32(1),
|
||||
"int32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "address" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"),
|
||||
common.Address{1},
|
||||
"address",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"bytes",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"hash",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"interface",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "function" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
[24]byte{1},
|
||||
"function",
|
||||
"",
|
||||
},
|
||||
} {
|
||||
abiDefinition := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(abiDefinition))
|
||||
if err != nil {
|
||||
t.Errorf("%d failed. %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var outvar interface{}
|
||||
switch test.outVar {
|
||||
case "bool":
|
||||
var v bool
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint8":
|
||||
var v uint8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint16":
|
||||
var v uint16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint32":
|
||||
var v uint32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint64":
|
||||
var v uint64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int8":
|
||||
var v int8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int16":
|
||||
var v int16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int32":
|
||||
var v int32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int64":
|
||||
var v int64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "*big.Int":
|
||||
var v *big.Int
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "address":
|
||||
var v common.Address
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "bytes":
|
||||
var v []byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "hash":
|
||||
var v common.Hash
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v.Bytes()[:]
|
||||
case "function":
|
||||
var v [24]byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "interface":
|
||||
err = abi.Unpack(&outvar, "method", test.marshalledOutput)
|
||||
default:
|
||||
t.Errorf("unsupported type '%v' please add it to the switch statement in this test", test.outVar)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if !reflect.DeepEqual(test.expectedOut, outvar) {
|
||||
t.Errorf("%d failed. Output error: expected %v, got %v", i, test.expectedOut, outvar)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceArrayOutput(t *testing.T) {
|
||||
var (
|
||||
var1 = new([1]uint32)
|
||||
var2 = new([1]uint32)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint32[1]"}, {"type":"uint32[1]"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if *var1 != [1]uint32{1} {
|
||||
t.Error("expected var1 to be [1], got", *var1)
|
||||
}
|
||||
if *var2 != [1]uint32{2} {
|
||||
t.Error("expected var2 to be [2], got", *var2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithStruct(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if inter.Int == nil || inter.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", inter.Int)
|
||||
}
|
||||
|
||||
if inter.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", inter.String)
|
||||
}
|
||||
|
||||
var reversed struct {
|
||||
String string
|
||||
Int *big.Int
|
||||
}
|
||||
|
||||
err = abi.Unpack(&reversed, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if reversed.Int == nil || reversed.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", reversed.Int)
|
||||
}
|
||||
|
||||
if reversed.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", reversed.String)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithSlice(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter []interface{}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(inter) != 2 {
|
||||
t.Fatal("expected 2 results got", len(inter))
|
||||
}
|
||||
|
||||
if num, ok := inter[0].(*big.Int); !ok || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected index 0 to be 1 got", num)
|
||||
}
|
||||
|
||||
if str, ok := inter[1].(string); !ok || str != stringOut {
|
||||
t.Error("expected index 1 to be", stringOut, "got", str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalArrays(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "bytes32", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "constant" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
output := common.LeftPadBytes([]byte{1}, 32)
|
||||
|
||||
var bytes10 [10]byte
|
||||
err = abi.Unpack(&bytes10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var bytes32 [32]byte
|
||||
err = abi.Unpack(&bytes32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(bytes32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
type (
|
||||
B10 [10]byte
|
||||
B32 [32]byte
|
||||
)
|
||||
|
||||
var b10 B10
|
||||
err = abi.Unpack(&b10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var b32 B32
|
||||
err = abi.Unpack(&b32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(b32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
output[10] = 1
|
||||
var shortAssignLong [32]byte
|
||||
err = abi.Unpack(&shortAssignLong, "bytes10", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(output, shortAssignLong[:]) {
|
||||
t.Errorf("expected %x to be %x", shortAssignLong, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
{ "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] },
|
||||
{ "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] },
|
||||
{ "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||
{ "name" : "intArraySingle", "constant" : false, "outputs": [ { "type": "uint256[3]" } ] },
|
||||
{ "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] },
|
||||
{ "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||
{ "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
|
||||
// marshal int
|
||||
var Int *big.Int
|
||||
err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if Int == nil || Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", Int)
|
||||
}
|
||||
|
||||
// marshal bool
|
||||
var Bool bool
|
||||
err = abi.Unpack(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !Bool {
|
||||
t.Error("expected Bool to be true")
|
||||
}
|
||||
|
||||
// marshal dynamic bytes max length 32
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut := common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var Bytes []byte
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 64
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 63
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 63)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes output empty
|
||||
err = abi.Unpack(&Bytes, "bytes", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal dynamic bytes length 5
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, []byte("hello")) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes length 5
|
||||
buff.Reset()
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
var hash common.Hash
|
||||
err = abi.Unpack(&hash, "fixed", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
helloHash := common.BytesToHash(common.RightPadBytes([]byte("hello"), 32))
|
||||
if hash != helloHash {
|
||||
t.Errorf("Expected %x to equal %x", hash, helloHash)
|
||||
}
|
||||
|
||||
// marshal error
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
err = abi.Unpack(&Bytes, "multi", make([]byte, 64))
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal mixed bytes
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
fixed := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
buff.Write(fixed)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var out []interface{}
|
||||
err = abi.Unpack(&out, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytesOut, out[0].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", bytesOut, out[0])
|
||||
}
|
||||
|
||||
if !bytes.Equal(fixed, out[1].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", fixed, out[1])
|
||||
}
|
||||
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
// marshal int array
|
||||
var intArray [3]*big.Int
|
||||
err = abi.Unpack(&intArray, "intArraySingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
var testAgainstIntArray [3]*big.Int
|
||||
testAgainstIntArray[0] = big.NewInt(1)
|
||||
testAgainstIntArray[1] = big.NewInt(2)
|
||||
testAgainstIntArray[2] = big.NewInt(3)
|
||||
|
||||
for i, Int := range intArray {
|
||||
if Int.Cmp(testAgainstIntArray[i]) != 0 {
|
||||
t.Errorf("expected %v, got %v", testAgainstIntArray[i], Int)
|
||||
}
|
||||
}
|
||||
// marshal address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
|
||||
var outAddr []common.Address
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if len(outAddr) != 1 {
|
||||
t.Fatal("expected 1 item, got", len(outAddr))
|
||||
}
|
||||
|
||||
if outAddr[0] != (common.Address{1}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{1}, outAddr[0])
|
||||
}
|
||||
|
||||
// marshal multiple address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000200000000000000000000000000000000000000"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000300000000000000000000000000000000000000"))
|
||||
|
||||
var outAddrStruct struct {
|
||||
A []common.Address
|
||||
B []common.Address
|
||||
}
|
||||
err = abi.Unpack(&outAddrStruct, "addressSliceDouble", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if len(outAddrStruct.A) != 1 {
|
||||
t.Fatal("expected 1 item, got", len(outAddrStruct.A))
|
||||
}
|
||||
|
||||
if outAddrStruct.A[0] != (common.Address{1}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{1}, outAddrStruct.A[0])
|
||||
}
|
||||
|
||||
if len(outAddrStruct.B) != 2 {
|
||||
t.Fatal("expected 1 item, got", len(outAddrStruct.B))
|
||||
}
|
||||
|
||||
if outAddrStruct.B[0] != (common.Address{2}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{2}, outAddrStruct.B[0])
|
||||
}
|
||||
if outAddrStruct.B[1] != (common.Address{3}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{3}, outAddrStruct.B[1])
|
||||
}
|
||||
|
||||
// marshal invalid address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100"))
|
||||
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Fatal("expected error:", err)
|
||||
}
|
||||
}
|
@ -38,7 +38,7 @@ var ErrNotSupported = errors.New("not supported")
|
||||
var ErrInvalidPassphrase = errors.New("invalid passphrase")
|
||||
|
||||
// ErrWalletAlreadyOpen is returned if a wallet is attempted to be opened the
|
||||
// secodn time.
|
||||
// second time.
|
||||
var ErrWalletAlreadyOpen = errors.New("wallet already open")
|
||||
|
||||
// ErrWalletClosed is returned if a wallet is attempted to be opened the
|
||||
|
@ -182,10 +182,8 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := crypto.ToECDSA(keyBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := crypto.ToECDSAUnsafe(keyBytes)
|
||||
|
||||
return &Key{
|
||||
Id: uuid.UUID(keyId),
|
||||
Address: crypto.PubkeyToAddress(key.PublicKey),
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -140,21 +141,32 @@ func TestV3_PBKDF2_1(t *testing.T) {
|
||||
testDecryptV3(tests["wikipage_test_vector_pbkdf2"], t)
|
||||
}
|
||||
|
||||
var testsSubmodule = filepath.Join("..", "..", "tests", "testdata", "KeyStoreTests")
|
||||
|
||||
func skipIfSubmoduleMissing(t *testing.T) {
|
||||
if !common.FileExist(testsSubmodule) {
|
||||
t.Skipf("can't find JSON tests from submodule at %s", testsSubmodule)
|
||||
}
|
||||
}
|
||||
|
||||
func TestV3_PBKDF2_2(t *testing.T) {
|
||||
skipIfSubmoduleMissing(t)
|
||||
t.Parallel()
|
||||
tests := loadKeyStoreTestV3("../../tests/files/KeyStoreTests/basic_tests.json", t)
|
||||
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
|
||||
testDecryptV3(tests["test1"], t)
|
||||
}
|
||||
|
||||
func TestV3_PBKDF2_3(t *testing.T) {
|
||||
skipIfSubmoduleMissing(t)
|
||||
t.Parallel()
|
||||
tests := loadKeyStoreTestV3("../../tests/files/KeyStoreTests/basic_tests.json", t)
|
||||
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
|
||||
testDecryptV3(tests["python_generated_test_with_odd_iv"], t)
|
||||
}
|
||||
|
||||
func TestV3_PBKDF2_4(t *testing.T) {
|
||||
skipIfSubmoduleMissing(t)
|
||||
t.Parallel()
|
||||
tests := loadKeyStoreTestV3("../../tests/files/KeyStoreTests/basic_tests.json", t)
|
||||
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
|
||||
testDecryptV3(tests["evilnonce"], t)
|
||||
}
|
||||
|
||||
@ -165,8 +177,9 @@ func TestV3_Scrypt_1(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3_Scrypt_2(t *testing.T) {
|
||||
skipIfSubmoduleMissing(t)
|
||||
t.Parallel()
|
||||
tests := loadKeyStoreTestV3("../../tests/files/KeyStoreTests/basic_tests.json", t)
|
||||
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
|
||||
testDecryptV3(tests["test2"], t)
|
||||
}
|
||||
|
||||
|
@ -74,10 +74,8 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
||||
return nil, err
|
||||
}
|
||||
ethPriv := crypto.Keccak256(plainText)
|
||||
ecKey, err := crypto.ToECDSA(ethPriv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecKey := crypto.ToECDSAUnsafe(ethPriv)
|
||||
|
||||
key = &Key{
|
||||
Id: nil,
|
||||
Address: crypto.PubkeyToAddress(ecKey.PublicKey),
|
||||
|
@ -21,6 +21,7 @@ environment:
|
||||
PATH: C:\msys64\mingw32\bin\;C:\Program Files (x86)\NSIS\;%PATH%
|
||||
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.8.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
|
@ -175,7 +175,7 @@ func doInstall(cmdline []string) {
|
||||
|
||||
// Check Go version. People regularly open issues about compilation
|
||||
// failure with outdated Go. This should save them the trouble.
|
||||
if runtime.Version() < "go1.7" && !strings.HasPrefix(runtime.Version(), "devel") {
|
||||
if runtime.Version() < "go1.7" && !strings.Contains(runtime.Version(), "devel") {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.7 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
|
@ -45,7 +45,7 @@ var (
|
||||
// paths with any of these prefixes will be skipped
|
||||
skipPrefixes = []string{
|
||||
// boring stuff
|
||||
"vendor/", "tests/files/", "build/",
|
||||
"vendor/", "tests/testdata/", "build/",
|
||||
// don't relicense vendored sources
|
||||
"cmd/internal/browser",
|
||||
"consensus/ethash/xor.go",
|
||||
|
67
cmd/evm/json_logger.go
Normal file
67
cmd/evm/json_logger.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
)
|
||||
|
||||
type JSONLogger struct {
|
||||
encoder *json.Encoder
|
||||
cfg *vm.LogConfig
|
||||
}
|
||||
|
||||
func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
}
|
||||
|
||||
// CaptureState outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
log := vm.StructLog{
|
||||
Pc: pc,
|
||||
Op: op,
|
||||
Gas: gas + cost,
|
||||
GasCost: cost,
|
||||
MemorySize: memory.Len(),
|
||||
Storage: nil,
|
||||
Depth: depth,
|
||||
Err: err,
|
||||
}
|
||||
if !l.cfg.DisableMemory {
|
||||
log.Memory = memory.Data()
|
||||
}
|
||||
if !l.cfg.DisableStack {
|
||||
log.Stack = stack.Data()
|
||||
}
|
||||
return l.encoder.Encode(log)
|
||||
}
|
||||
|
||||
// CaptureEnd is triggered at end of execution.
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration) error {
|
||||
type endLog struct {
|
||||
Output string `json:"output"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time time.Duration `json:"time"`
|
||||
}
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t})
|
||||
}
|
@ -90,6 +90,26 @@ var (
|
||||
Name: "nogasmetering",
|
||||
Usage: "disable gas metering",
|
||||
}
|
||||
GenesisFlag = cli.StringFlag{
|
||||
Name: "prestate",
|
||||
Usage: "JSON file with prestate (genesis) config",
|
||||
}
|
||||
MachineFlag = cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output trace logs in machine readable format (json)",
|
||||
}
|
||||
SenderFlag = cli.StringFlag{
|
||||
Name: "sender",
|
||||
Usage: "The transaction origin",
|
||||
}
|
||||
DisableMemoryFlag = cli.BoolFlag{
|
||||
Name: "nomemory",
|
||||
Usage: "disable memory output",
|
||||
}
|
||||
DisableStackFlag = cli.BoolFlag{
|
||||
Name: "nostack",
|
||||
Usage: "disable stack output",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -108,6 +128,11 @@ func init() {
|
||||
MemProfileFlag,
|
||||
CPUProfileFlag,
|
||||
StatDumpFlag,
|
||||
GenesisFlag,
|
||||
MachineFlag,
|
||||
SenderFlag,
|
||||
DisableMemoryFlag,
|
||||
DisableStackFlag,
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
compileCommand,
|
||||
|
@ -18,6 +18,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -29,11 +30,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
@ -45,17 +48,63 @@ var runCommand = cli.Command{
|
||||
Description: `The run command runs arbitrary EVM code.`,
|
||||
}
|
||||
|
||||
// readGenesis will read the given JSON format genesis file and return
|
||||
// the initialized Genesis structure
|
||||
func readGenesis(genesisPath string) *core.Genesis {
|
||||
// Make sure we have a valid genesis JSON
|
||||
//genesisPath := ctx.Args().First()
|
||||
if len(genesisPath) == 0 {
|
||||
utils.Fatalf("Must supply path to genesis JSON file")
|
||||
}
|
||||
file, err := os.Open(genesisPath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read genesis file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
utils.Fatalf("invalid genesis file: %v", err)
|
||||
}
|
||||
return genesis
|
||||
}
|
||||
|
||||
func runCmd(ctx *cli.Context) error {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
logconfig := &vm.LogConfig{
|
||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||
}
|
||||
|
||||
var (
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, db)
|
||||
sender = common.StringToAddress("sender")
|
||||
logger = vm.NewStructLogger(nil)
|
||||
tracer vm.Tracer
|
||||
debugLogger *vm.StructLogger
|
||||
statedb *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.StringToAddress("sender")
|
||||
)
|
||||
if ctx.GlobalBool(MachineFlag.Name) {
|
||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
||||
} else if ctx.GlobalBool(DebugFlag.Name) {
|
||||
debugLogger = vm.NewStructLogger(logconfig)
|
||||
tracer = debugLogger
|
||||
} else {
|
||||
debugLogger = vm.NewStructLogger(logconfig)
|
||||
}
|
||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||
_, statedb = gen.ToBlock()
|
||||
chainConfig = gen.Config
|
||||
} else {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(db))
|
||||
}
|
||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
|
||||
}
|
||||
|
||||
statedb.CreateAccount(sender)
|
||||
|
||||
var (
|
||||
@ -95,16 +144,16 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
}
|
||||
|
||||
initialGas := ctx.GlobalUint64(GasFlag.Name)
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: ctx.GlobalUint64(GasFlag.Name),
|
||||
GasLimit: initialGas,
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: logger,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
DisableGasMetering: ctx.GlobalBool(DisableGasMeteringFlag.Name),
|
||||
},
|
||||
}
|
||||
@ -122,20 +171,24 @@ func runCmd(ctx *cli.Context) error {
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
if chainConfig != nil {
|
||||
runtimeConfig.ChainConfig = chainConfig
|
||||
}
|
||||
tstart := time.Now()
|
||||
var leftOverGas uint64
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
ret, _, err = runtime.Create(input, &runtimeConfig)
|
||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
||||
} else {
|
||||
receiver := common.StringToAddress("receiver")
|
||||
statedb.SetCode(receiver, code)
|
||||
|
||||
ret, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
ret, leftOverGas, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.Commit(true)
|
||||
statedb.IntermediateRoot(true)
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
}
|
||||
|
||||
@ -153,8 +206,10 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, logger.StructLogs())
|
||||
if debugLogger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||
}
|
||||
@ -167,14 +222,18 @@ heap objects: %d
|
||||
allocations: %d
|
||||
total allocations: %d
|
||||
GC calls: %d
|
||||
Gas used: %d
|
||||
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC)
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer != nil {
|
||||
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime)
|
||||
} else {
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
}
|
||||
|
||||
fmt.Printf("0x%x", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v", err)
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i in
|
||||
return accounts.Account{}, ""
|
||||
}
|
||||
|
||||
// getPassPhrase retrieves the passwor associated with an account, either fetched
|
||||
// getPassPhrase retrieves the password associated with an account, either fetched
|
||||
// from a list of preloaded passphrases, or requested interactively from the user.
|
||||
func getPassPhrase(prompt string, confirmation bool, i int, passwords []string) string {
|
||||
// If a list of passwords was supplied, retrieve from them
|
||||
|
@ -44,21 +44,21 @@ func tmpDatadirWithKeystore(t *testing.T) string {
|
||||
|
||||
func TestAccountListEmpty(t *testing.T) {
|
||||
geth := runGeth(t, "account", "list")
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestAccountList(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t, "account", "list", "--datadir", datadir)
|
||||
defer geth.expectExit()
|
||||
defer geth.ExpectExit()
|
||||
if runtime.GOOS == "windows" {
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}\keystore\UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
||||
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}\keystore\aaa
|
||||
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\keystore\zzz
|
||||
`)
|
||||
} else {
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
||||
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}/keystore/aaa
|
||||
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/keystore/zzz
|
||||
@ -68,20 +68,20 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/k
|
||||
|
||||
func TestAccountNew(t *testing.T) {
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
Repeat passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectRegexp(`Address: \{[0-9a-f]{40}\}\n`)
|
||||
geth.ExpectRegexp(`Address: \{[0-9a-f]{40}\}\n`)
|
||||
}
|
||||
|
||||
func TestAccountNewBadRepeat(t *testing.T) {
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "something"}}
|
||||
@ -95,8 +95,8 @@ func TestAccountUpdate(t *testing.T) {
|
||||
geth := runGeth(t, "account", "update",
|
||||
"--datadir", datadir, "--lightkdf",
|
||||
"f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
@ -108,8 +108,8 @@ Repeat passphrase: {{.InputLine "foobar2"}}
|
||||
|
||||
func TestWalletImport(t *testing.T) {
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foo"}}
|
||||
Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
||||
@ -123,8 +123,8 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
||||
|
||||
func TestWalletImportBadPassword(t *testing.T) {
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong"}}
|
||||
Fatal: could not decrypt key with given passphrase
|
||||
@ -137,19 +137,19 @@ func TestUnlockFlag(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -160,8 +160,8 @@ func TestUnlockFlagWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong1"}}
|
||||
@ -180,14 +180,14 @@ func TestUnlockFlagMultiIndex(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account 0 | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
Unlocking account 2 | Attempt 1/3
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
@ -195,7 +195,7 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -207,7 +207,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--password", "testdata/passwords.txt", "--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
@ -215,7 +215,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -226,8 +226,8 @@ func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--password", "testdata/wrong-passwords.txt", "--unlock", "0,2")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Fatal: Failed to unlock account 0 (could not decrypt key with given passphrase)
|
||||
`)
|
||||
}
|
||||
@ -238,14 +238,14 @@ func TestUnlockFlagAmbiguous(t *testing.T) {
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
defer geth.expectExit()
|
||||
defer geth.ExpectExit()
|
||||
|
||||
// Helper for the expect template, returns absolute keystore path.
|
||||
geth.setTemplateFunc("keypath", func(file string) string {
|
||||
geth.SetTemplateFunc("keypath", func(file string) string {
|
||||
abs, _ := filepath.Abs(filepath.Join(store, file))
|
||||
return abs
|
||||
})
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
@ -257,14 +257,14 @@ Your passphrase unlocked keystore://{{keypath "1"}}
|
||||
In order to avoid this warning, you need to remove the following duplicate key files:
|
||||
keystore://{{keypath "2"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -275,14 +275,14 @@ func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
defer geth.ExpectExit()
|
||||
|
||||
// Helper for the expect template, returns absolute keystore path.
|
||||
geth.setTemplateFunc("keypath", func(file string) string {
|
||||
geth.SetTemplateFunc("keypath", func(file string) string {
|
||||
abs, _ := filepath.Abs(filepath.Join(store, file))
|
||||
return abs
|
||||
})
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong"}}
|
||||
@ -292,5 +292,5 @@ Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a:
|
||||
Testing your passphrase against all of them...
|
||||
Fatal: None of the listed files could be unlocked.
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ func dump(ctx *cli.Context) error {
|
||||
fmt.Println("{}")
|
||||
utils.Fatalf("block not found")
|
||||
} else {
|
||||
state, err := state.New(block.Root(), chainDb)
|
||||
state, err := state.New(block.Root(), state.NewDatabase(chainDb))
|
||||
if err != nil {
|
||||
utils.Fatalf("could not create new state: %v", err)
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
@ -42,7 +43,7 @@ var (
|
||||
Name: "dumpconfig",
|
||||
Usage: "Show configuration values",
|
||||
ArgsUsage: "",
|
||||
Flags: append(nodeFlags, rpcFlags...),
|
||||
Flags: append(append(nodeFlags, rpcFlags...), whisperFlags...),
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `The dumpconfig command shows configuration values.`,
|
||||
}
|
||||
@ -76,6 +77,7 @@ type ethstatsConfig struct {
|
||||
|
||||
type gethConfig struct {
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
}
|
||||
@ -99,8 +101,8 @@ func defaultNodeConfig() node.Config {
|
||||
cfg := node.DefaultConfig
|
||||
cfg.Name = clientIdentifier
|
||||
cfg.Version = params.VersionWithCommit(gitCommit)
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth")
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth", "shh")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth", "shh")
|
||||
cfg.IPCPath = "geth.ipc"
|
||||
return cfg
|
||||
}
|
||||
@ -109,6 +111,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
}
|
||||
|
||||
@ -130,19 +133,37 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
|
||||
}
|
||||
|
||||
utils.SetShhConfig(ctx, stack, &cfg.Shh)
|
||||
|
||||
return stack, cfg
|
||||
}
|
||||
|
||||
// enableWhisper returns true in case one of the whisper flags is set.
|
||||
func enableWhisper(ctx *cli.Context) bool {
|
||||
for _, flag := range whisperFlags {
|
||||
if ctx.GlobalIsSet(flag.GetName()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
stack, cfg := makeConfigNode(ctx)
|
||||
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
// Whisper must be explicitly enabled, but is auto-enabled in --dev mode.
|
||||
shhEnabled := ctx.GlobalBool(utils.WhisperEnabledFlag.Name)
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DevModeFlag.Name)
|
||||
if shhEnabled || shhAutoEnabled {
|
||||
utils.RegisterShhService(stack)
|
||||
if ctx.GlobalIsSet(utils.WhisperMaxMessageSizeFlag.Name) {
|
||||
cfg.Shh.MaxMessageSize = uint32(ctx.Int(utils.WhisperMaxMessageSizeFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.WhisperMinPOWFlag.Name) {
|
||||
cfg.Shh.MinimumAcceptedPOW = ctx.Float64(utils.WhisperMinPOWFlag.Name)
|
||||
}
|
||||
utils.RegisterShhService(stack, &cfg.Shh)
|
||||
}
|
||||
|
||||
// Add the Ethereum Stats daemon if requested.
|
||||
|
@ -35,7 +35,7 @@ var (
|
||||
Action: utils.MigrateFlags(localConsole),
|
||||
Name: "console",
|
||||
Usage: "Start an interactive JavaScript environment",
|
||||
Flags: append(append(nodeFlags, rpcFlags...), consoleFlags...),
|
||||
Flags: append(append(append(nodeFlags, rpcFlags...), consoleFlags...), whisperFlags...),
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
|
@ -47,15 +47,15 @@ func TestConsoleWelcome(t *testing.T) {
|
||||
"console")
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
geth.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.setTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.setTemplateFunc("gover", runtime.Version)
|
||||
geth.setTemplateFunc("gethver", func() string { return params.Version })
|
||||
geth.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.setTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
geth.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.SetTemplateFunc("gover", runtime.Version)
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}}
|
||||
@ -66,7 +66,7 @@ at block: 0 ({{niltime}})
|
||||
|
||||
> {{.InputLine "exit"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
// Tests that a console can be attached to a running node via various means.
|
||||
@ -90,8 +90,8 @@ func TestIPCAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestHTTPAttachWelcome(t *testing.T) {
|
||||
@ -104,8 +104,8 @@ func TestHTTPAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "http://localhost:"+port, httpAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestWSAttachWelcome(t *testing.T) {
|
||||
@ -119,29 +119,29 @@ func TestWSAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ws://localhost:"+port, httpAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
// Attach to a running geth note and terminate immediately
|
||||
attach := runGeth(t, "attach", endpoint)
|
||||
defer attach.expectExit()
|
||||
attach.stdin.Close()
|
||||
defer attach.ExpectExit()
|
||||
attach.CloseStdin()
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
attach.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.setTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.setTemplateFunc("gover", runtime.Version)
|
||||
attach.setTemplateFunc("gethver", func() string { return params.Version })
|
||||
attach.setTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.setTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.setTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.setTemplateFunc("apis", func() string { return apis })
|
||||
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.SetTemplateFunc("gover", runtime.Version)
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.SetTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.SetTemplateFunc("apis", func() string { return apis })
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
attach.expect(`
|
||||
attach.Expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}}
|
||||
@ -152,7 +152,7 @@ at block: 0 ({{niltime}}){{if ipc}}
|
||||
|
||||
> {{.InputLine "exit" }}
|
||||
`)
|
||||
attach.expectExit()
|
||||
attach.ExpectExit()
|
||||
}
|
||||
|
||||
// trulyRandInt generates a crypto random integer used by the console tests to
|
||||
|
@ -89,7 +89,7 @@ func TestDAOForkBlockNewChain(t *testing.T) {
|
||||
expectVote bool
|
||||
}{
|
||||
// Test DAO Default Mainnet
|
||||
{"", params.MainNetDAOForkBlock, true},
|
||||
{"", params.MainnetChainConfig.DAOForkBlock, true},
|
||||
// test DAO Init Old Privnet
|
||||
{daoOldGenesis, nil, false},
|
||||
// test DAO Default No Fork Privnet
|
||||
@ -112,12 +112,12 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
|
||||
if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil {
|
||||
t.Fatalf("test %d: failed to write genesis file: %v", test, err)
|
||||
}
|
||||
runGeth(t, "--datadir", datadir, "init", json).cmd.Wait()
|
||||
runGeth(t, "--datadir", datadir, "init", json).WaitExit()
|
||||
} else {
|
||||
// Force chain initialization
|
||||
args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir}
|
||||
geth := runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...)
|
||||
geth.cmd.Wait()
|
||||
geth.WaitExit()
|
||||
}
|
||||
// Retrieve the DAO config flag from the database
|
||||
path := filepath.Join(datadir, "geth", "chaindata")
|
||||
|
@ -97,14 +97,14 @@ func TestCustomGenesis(t *testing.T) {
|
||||
if err := ioutil.WriteFile(json, []byte(tt.genesis), 0600); err != nil {
|
||||
t.Fatalf("test %d: failed to write genesis file: %v", i, err)
|
||||
}
|
||||
runGeth(t, "--datadir", datadir, "init", json).cmd.Wait()
|
||||
runGeth(t, "--datadir", datadir, "init", json).WaitExit()
|
||||
|
||||
// Query the custom genesis block
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--maxpeers", "0", "--port", "0",
|
||||
"--nodiscover", "--nat", "none", "--ipcdisable",
|
||||
"--exec", tt.query, "console")
|
||||
geth.expectRegexp(tt.result)
|
||||
geth.expectExit()
|
||||
geth.ExpectRegexp(tt.result)
|
||||
geth.ExpectExit()
|
||||
}
|
||||
}
|
||||
|
@ -66,6 +66,7 @@ var (
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
@ -95,7 +96,6 @@ var (
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
@ -125,6 +125,12 @@ var (
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
}
|
||||
|
||||
whisperFlags = []cli.Flag{
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.WhisperMaxMessageSizeFlag,
|
||||
utils.WhisperMinPOWFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -161,6 +167,7 @@ func init() {
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, consoleFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, whisperFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@ -252,10 +259,12 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}()
|
||||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
utils.Fatalf("ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
@ -264,6 +273,8 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
th.SetThreads(threads)
|
||||
}
|
||||
}
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name))
|
||||
if err := ethereum.StartMining(true); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
|
@ -17,18 +17,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sync"
|
||||
"testing"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
)
|
||||
|
||||
func tmpdir(t *testing.T) string {
|
||||
@ -40,36 +35,37 @@ func tmpdir(t *testing.T) string {
|
||||
}
|
||||
|
||||
type testgeth struct {
|
||||
// For total convenience, all testing methods are available.
|
||||
*testing.T
|
||||
// template variables for expect
|
||||
Datadir string
|
||||
Executable string
|
||||
Etherbase string
|
||||
Func template.FuncMap
|
||||
*cmdtest.TestCmd
|
||||
|
||||
removeDatadir bool
|
||||
cmd *exec.Cmd
|
||||
stdout *bufio.Reader
|
||||
stdin io.WriteCloser
|
||||
stderr *testlogger
|
||||
// template variables for expect
|
||||
Datadir string
|
||||
Etherbase string
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Run the app if we're the child process for runGeth.
|
||||
if os.Getenv("GETH_TEST_CHILD") != "" {
|
||||
// Run the app if we've been exec'd as "geth-test" in runGeth.
|
||||
reexec.Register("geth-test", func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// spawns geth with the given command line args. If the args don't set --datadir, the
|
||||
// child g gets a temporary data directory.
|
||||
func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
tt := &testgeth{T: t, Executable: os.Args[0]}
|
||||
tt := &testgeth{}
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
for i, arg := range args {
|
||||
switch {
|
||||
case arg == "-datadir" || arg == "--datadir":
|
||||
@ -84,215 +80,19 @@ func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
}
|
||||
if tt.Datadir == "" {
|
||||
tt.Datadir = tmpdir(t)
|
||||
tt.removeDatadir = true
|
||||
tt.Cleanup = func() { os.RemoveAll(tt.Datadir) }
|
||||
args = append([]string{"-datadir", tt.Datadir}, args...)
|
||||
// Remove the temporary datadir if something fails below.
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
tt.Cleanup()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Boot "geth". This actually runs the test binary but the init function
|
||||
// will prevent any tests from running.
|
||||
tt.stderr = &testlogger{t: t}
|
||||
tt.cmd = exec.Command(os.Args[0], args...)
|
||||
tt.cmd.Env = append(os.Environ(), "GETH_TEST_CHILD=1")
|
||||
tt.cmd.Stderr = tt.stderr
|
||||
stdout, err := tt.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tt.stdout = bufio.NewReader(stdout)
|
||||
if tt.stdin, err = tt.cmd.StdinPipe(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tt.cmd.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Boot "geth". This actually runs the test binary but the TestMain
|
||||
// function will prevent any tests from running.
|
||||
tt.Run("geth-test", args...)
|
||||
|
||||
return tt
|
||||
}
|
||||
|
||||
// InputLine writes the given text to the childs stdin.
|
||||
// This method can also be called from an expect template, e.g.:
|
||||
//
|
||||
// geth.expect(`Passphrase: {{.InputLine "password"}}`)
|
||||
func (tt *testgeth) InputLine(s string) string {
|
||||
io.WriteString(tt.stdin, s+"\n")
|
||||
return ""
|
||||
}
|
||||
|
||||
func (tt *testgeth) setTemplateFunc(name string, fn interface{}) {
|
||||
if tt.Func == nil {
|
||||
tt.Func = make(map[string]interface{})
|
||||
}
|
||||
tt.Func[name] = fn
|
||||
}
|
||||
|
||||
// expect runs its argument as a template, then expects the
|
||||
// child process to output the result of the template within 5s.
|
||||
//
|
||||
// If the template starts with a newline, the newline is removed
|
||||
// before matching.
|
||||
func (tt *testgeth) expect(tplsource string) {
|
||||
// Generate the expected output by running the template.
|
||||
tpl := template.Must(template.New("").Funcs(tt.Func).Parse(tplsource))
|
||||
wantbuf := new(bytes.Buffer)
|
||||
if err := tpl.Execute(wantbuf, tt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Trim exactly one newline at the beginning. This makes tests look
|
||||
// much nicer because all expect strings are at column 0.
|
||||
want := bytes.TrimPrefix(wantbuf.Bytes(), []byte("\n"))
|
||||
if err := tt.matchExactOutput(want); err != nil {
|
||||
tt.Fatal(err)
|
||||
}
|
||||
tt.Logf("Matched stdout text:\n%s", want)
|
||||
}
|
||||
|
||||
func (tt *testgeth) matchExactOutput(want []byte) error {
|
||||
buf := make([]byte, len(want))
|
||||
n := 0
|
||||
tt.withKillTimeout(func() { n, _ = io.ReadFull(tt.stdout, buf) })
|
||||
buf = buf[:n]
|
||||
if n < len(want) || !bytes.Equal(buf, want) {
|
||||
// Grab any additional buffered output in case of mismatch
|
||||
// because it might help with debugging.
|
||||
buf = append(buf, make([]byte, tt.stdout.Buffered())...)
|
||||
tt.stdout.Read(buf[n:])
|
||||
// Find the mismatch position.
|
||||
for i := 0; i < n; i++ {
|
||||
if want[i] != buf[i] {
|
||||
return fmt.Errorf("Output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s",
|
||||
buf[:i], buf[i:n], want)
|
||||
}
|
||||
}
|
||||
if n < len(want) {
|
||||
return fmt.Errorf("Not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s",
|
||||
buf, want[:n], want[n:])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// expectRegexp expects the child process to output text matching the
|
||||
// given regular expression within 5s.
|
||||
//
|
||||
// Note that an arbitrary amount of output may be consumed by the
|
||||
// regular expression. This usually means that expect cannot be used
|
||||
// after expectRegexp.
|
||||
func (tt *testgeth) expectRegexp(resource string) (*regexp.Regexp, []string) {
|
||||
var (
|
||||
re = regexp.MustCompile(resource)
|
||||
rtee = &runeTee{in: tt.stdout}
|
||||
matches []int
|
||||
)
|
||||
tt.withKillTimeout(func() { matches = re.FindReaderSubmatchIndex(rtee) })
|
||||
output := rtee.buf.Bytes()
|
||||
if matches == nil {
|
||||
tt.Fatalf("Output did not match:\n---------------- (stdout text)\n%s\n---------------- (regular expression)\n%s",
|
||||
output, resource)
|
||||
return re, nil
|
||||
}
|
||||
tt.Logf("Matched stdout text:\n%s", output)
|
||||
var submatch []string
|
||||
for i := 0; i < len(matches); i += 2 {
|
||||
submatch = append(submatch, string(output[i:i+1]))
|
||||
}
|
||||
return re, submatch
|
||||
}
|
||||
|
||||
// expectExit expects the child process to exit within 5s without
|
||||
// printing any additional text on stdout.
|
||||
func (tt *testgeth) expectExit() {
|
||||
var output []byte
|
||||
tt.withKillTimeout(func() {
|
||||
output, _ = ioutil.ReadAll(tt.stdout)
|
||||
})
|
||||
tt.cmd.Wait()
|
||||
if tt.removeDatadir {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
}
|
||||
if len(output) > 0 {
|
||||
tt.Errorf("Unmatched stdout text:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func (tt *testgeth) interrupt() {
|
||||
tt.cmd.Process.Signal(os.Interrupt)
|
||||
}
|
||||
|
||||
// stderrText returns any stderr output written so far.
|
||||
// The returned text holds all log lines after expectExit has
|
||||
// returned.
|
||||
func (tt *testgeth) stderrText() string {
|
||||
tt.stderr.mu.Lock()
|
||||
defer tt.stderr.mu.Unlock()
|
||||
return tt.stderr.buf.String()
|
||||
}
|
||||
|
||||
func (tt *testgeth) withKillTimeout(fn func()) {
|
||||
timeout := time.AfterFunc(5*time.Second, func() {
|
||||
tt.Log("killing the child process (timeout)")
|
||||
tt.cmd.Process.Kill()
|
||||
if tt.removeDatadir {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
}
|
||||
})
|
||||
defer timeout.Stop()
|
||||
fn()
|
||||
}
|
||||
|
||||
// testlogger logs all written lines via t.Log and also
|
||||
// collects them for later inspection.
|
||||
type testlogger struct {
|
||||
t *testing.T
|
||||
mu sync.Mutex
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (tl *testlogger) Write(b []byte) (n int, err error) {
|
||||
lines := bytes.Split(b, []byte("\n"))
|
||||
for _, line := range lines {
|
||||
if len(line) > 0 {
|
||||
tl.t.Logf("(stderr) %s", line)
|
||||
}
|
||||
}
|
||||
tl.mu.Lock()
|
||||
tl.buf.Write(b)
|
||||
tl.mu.Unlock()
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
// runeTee collects text read through it into buf.
|
||||
type runeTee struct {
|
||||
in interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
io.RuneReader
|
||||
}
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (rtee *runeTee) Read(b []byte) (n int, err error) {
|
||||
n, err = rtee.in.Read(b)
|
||||
rtee.buf.Write(b[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rtee *runeTee) ReadRune() (r rune, size int, err error) {
|
||||
r, size, err = rtee.in.ReadRune()
|
||||
if err == nil {
|
||||
rtee.buf.WriteRune(r)
|
||||
}
|
||||
return r, size, err
|
||||
}
|
||||
|
||||
func (rtee *runeTee) ReadByte() (b byte, err error) {
|
||||
b, err = rtee.in.ReadByte()
|
||||
if err == nil {
|
||||
rtee.buf.WriteByte(b)
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
@ -95,6 +95,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
@ -187,6 +188,10 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.NoCompactionFlag,
|
||||
}, debug.Flags...),
|
||||
},
|
||||
{
|
||||
Name: "WHISPER (EXPERIMENTAL)",
|
||||
Flags: whisperFlags,
|
||||
},
|
||||
{
|
||||
Name: "DEPRECATED",
|
||||
Flags: []cli.Flag{
|
||||
@ -195,10 +200,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "EXPERIMENTAL",
|
||||
Flags: []cli.Flag{
|
||||
utils.WhisperEnabledFlag,
|
||||
},
|
||||
Name: "MISC",
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -165,8 +165,7 @@ func (w *wizard) deployFaucet() {
|
||||
}
|
||||
// Load up the credential needed to release funds
|
||||
if infos.node.keyJSON != "" {
|
||||
var key keystore.Key
|
||||
if err := json.Unmarshal([]byte(infos.node.keyJSON), &key); err != nil {
|
||||
if key, err := keystore.DecryptKey([]byte(infos.node.keyJSON), infos.node.keyPass); err != nil {
|
||||
infos.node.keyJSON, infos.node.keyPass = "", ""
|
||||
} else {
|
||||
fmt.Println()
|
||||
|
@ -17,21 +17,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/contracts/ens"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
@ -40,6 +44,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
@ -87,15 +92,23 @@ var (
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
}
|
||||
SwarmSwapAPIFlag = cli.StringFlag{
|
||||
Name: "swap-api",
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
}
|
||||
SwarmSyncEnabledFlag = cli.BoolTFlag{
|
||||
Name: "sync",
|
||||
Usage: "Swarm Syncing enabled (default true)",
|
||||
}
|
||||
EthAPIFlag = cli.StringFlag{
|
||||
Name: "ethapi",
|
||||
Usage: "URL of the Ethereum API provider",
|
||||
EnsAPIFlag = cli.StringFlag{
|
||||
Name: "ens-api",
|
||||
Usage: "URL of the Ethereum API provider to use for ENS record lookups",
|
||||
Value: node.DefaultIPCEndpoint("geth"),
|
||||
}
|
||||
EnsAddrFlag = cli.StringFlag{
|
||||
Name: "ens-addr",
|
||||
Usage: "ENS contract address (default is detected as testnet or mainnet using --ens-api)",
|
||||
}
|
||||
SwarmApiFlag = cli.StringFlag{
|
||||
Name: "bzzapi",
|
||||
Usage: "Swarm HTTP endpoint",
|
||||
@ -125,6 +138,12 @@ var (
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
}
|
||||
|
||||
// the following flags are deprecated and should be removed in the future
|
||||
DeprecatedEthAPIFlag = cli.StringFlag{
|
||||
Name: "ethapi",
|
||||
Usage: "DEPRECATED: please use --ens-api and --swap-api",
|
||||
}
|
||||
)
|
||||
|
||||
var defaultNodeConfig = node.DefaultConfig
|
||||
@ -249,9 +268,11 @@ Cleans database of corrupted entries.
|
||||
utils.PasswordFileFlag,
|
||||
// bzzd-specific flags
|
||||
CorsStringFlag,
|
||||
EthAPIFlag,
|
||||
EnsAPIFlag,
|
||||
EnsAddrFlag,
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSwapAPIFlag,
|
||||
SwarmSyncEnabledFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
@ -265,6 +286,8 @@ Cleans database of corrupted entries.
|
||||
SwarmUploadDefaultPath,
|
||||
SwarmUpFromStdinFlag,
|
||||
SwarmUploadMimeType,
|
||||
//deprecated flags
|
||||
DeprecatedEthAPIFlag,
|
||||
}
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
@ -299,6 +322,11 @@ func version(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func bzzd(ctx *cli.Context) error {
|
||||
// exit if the deprecated --ethapi flag is set
|
||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||
}
|
||||
|
||||
cfg := defaultNodeConfig
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
stack, err := node.New(&cfg)
|
||||
@ -333,6 +361,38 @@ func bzzd(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// detectEnsAddr determines the ENS contract address by getting both the
|
||||
// version and genesis hash using the client and matching them to either
|
||||
// mainnet or testnet addresses
|
||||
func detectEnsAddr(client *rpc.Client) (common.Address, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var version string
|
||||
if err := client.CallContext(ctx, &version, "net_version"); err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
block, err := ethclient.NewClient(client).BlockByNumber(ctx, big.NewInt(0))
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
switch {
|
||||
|
||||
case version == "1" && block.Hash() == params.MainnetGenesisHash:
|
||||
log.Info("using Mainnet ENS contract address", "addr", ens.MainNetAddress)
|
||||
return ens.MainNetAddress, nil
|
||||
|
||||
case version == "3" && block.Hash() == params.TestnetGenesisHash:
|
||||
log.Info("using Testnet ENS contract address", "addr", ens.TestNetAddress)
|
||||
return ens.TestNetAddress, nil
|
||||
|
||||
default:
|
||||
return common.Address{}, fmt.Errorf("unknown version and genesis hash: %s %s", version, block.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func registerBzzService(ctx *cli.Context, stack *node.Node) {
|
||||
prvkey := getAccount(ctx, stack)
|
||||
|
||||
@ -356,20 +416,48 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
|
||||
swapEnabled := ctx.GlobalBool(SwarmSwapEnabledFlag.Name)
|
||||
syncEnabled := ctx.GlobalBoolT(SwarmSyncEnabledFlag.Name)
|
||||
|
||||
ethapi := ctx.GlobalString(EthAPIFlag.Name)
|
||||
swapapi := ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if swapEnabled && swapapi == "" {
|
||||
utils.Fatalf("SWAP is enabled but --swap-api is not set")
|
||||
}
|
||||
|
||||
ensapi := ctx.GlobalString(EnsAPIFlag.Name)
|
||||
ensAddr := ctx.GlobalString(EnsAddrFlag.Name)
|
||||
|
||||
cors := ctx.GlobalString(CorsStringFlag.Name)
|
||||
|
||||
boot := func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var client *ethclient.Client
|
||||
if len(ethapi) > 0 {
|
||||
client, err = ethclient.Dial(ethapi)
|
||||
var swapClient *ethclient.Client
|
||||
if swapapi != "" {
|
||||
log.Info("connecting to SWAP API", "url", swapapi)
|
||||
swapClient, err = ethclient.Dial(swapapi)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't connect: %v", err)
|
||||
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", swapapi, err)
|
||||
}
|
||||
} else {
|
||||
swapEnabled = false
|
||||
}
|
||||
return swarm.NewSwarm(ctx, client, bzzconfig, swapEnabled, syncEnabled, cors)
|
||||
|
||||
var ensClient *ethclient.Client
|
||||
if ensapi != "" {
|
||||
log.Info("connecting to ENS API", "url", ensapi)
|
||||
client, err := rpc.Dial(ensapi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error connecting to ENS API %s: %s", ensapi, err)
|
||||
}
|
||||
ensClient = ethclient.NewClient(client)
|
||||
|
||||
if ensAddr != "" {
|
||||
bzzconfig.EnsRoot = common.HexToAddress(ensAddr)
|
||||
} else {
|
||||
ensAddr, err := detectEnsAddr(client)
|
||||
if err == nil {
|
||||
bzzconfig.EnsRoot = ensAddr
|
||||
} else {
|
||||
log.Warn(fmt.Sprintf("could not determine ENS contract address, using default %s", bzzconfig.EnsRoot), "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return swarm.NewSwarm(ctx, swapClient, ensClient, bzzconfig, swapEnabled, syncEnabled, cors)
|
||||
}
|
||||
if err := stack.Register(boot); err != nil {
|
||||
utils.Fatalf("Failed to register the Swarm service: %v", err)
|
||||
|
255
cmd/swarm/run_test.go
Normal file
255
cmd/swarm/run_test.go
Normal file
@ -0,0 +1,255 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Run the app if we've been exec'd as "swarm-test" in runSwarm.
|
||||
reexec.Register("swarm-test", func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
|
||||
tt := cmdtest.NewTestCmd(t, nil)
|
||||
|
||||
// Boot "swarm". This actually runs the test binary but the TestMain
|
||||
// function will prevent any tests from running.
|
||||
tt.Run("swarm-test", args...)
|
||||
|
||||
return tt
|
||||
}
|
||||
|
||||
type testCluster struct {
|
||||
Nodes []*testNode
|
||||
TmpDir string
|
||||
}
|
||||
|
||||
// newTestCluster starts a test swarm cluster of the given size.
|
||||
//
|
||||
// A temporary directory is created and each node gets a data directory inside
|
||||
// it.
|
||||
//
|
||||
// Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
|
||||
// ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
|
||||
// as flags).
|
||||
//
|
||||
// When starting more than one node, they are connected together using the
|
||||
// admin SetPeer RPC method.
|
||||
func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster := &testCluster{}
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
cluster.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cluster.TmpDir = tmpdir
|
||||
|
||||
// start the nodes
|
||||
cluster.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes, node)
|
||||
}
|
||||
|
||||
if size == 1 {
|
||||
return cluster
|
||||
}
|
||||
|
||||
// connect the nodes together
|
||||
for _, node := range cluster.Nodes {
|
||||
if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// wait until all nodes have the correct number of peers
|
||||
outer:
|
||||
for _, node := range cluster.Nodes {
|
||||
var peers []*p2p.PeerInfo
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
|
||||
if err := node.Client.Call(&peers, "admin_peers"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(peers) == len(cluster.Nodes)-1 {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
func (c *testCluster) Shutdown() {
|
||||
for _, node := range c.Nodes {
|
||||
node.Shutdown()
|
||||
}
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
type testNode struct {
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
}
|
||||
|
||||
const testPassphrase = "swarm-test-passphrase"
|
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
// create key
|
||||
conf := &node.Config{
|
||||
DataDir: dir,
|
||||
IPCPath: "bzzd.ipc",
|
||||
}
|
||||
n, err := node.New(conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
account, err := n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" {
|
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
|
||||
}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--ens-api", "",
|
||||
"--bzzaccount", account.Address.String(),
|
||||
"--bzznetworkid", "321",
|
||||
"--bzzport", httpPort,
|
||||
"--verbosity", "6",
|
||||
)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
|
||||
node.URL = "http://" + node.Addr
|
||||
|
||||
var nodeInfo p2p.NodeInfo
|
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (n *testNode) Shutdown() {
|
||||
if n.Cmd != nil {
|
||||
n.Cmd.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
func assignTCPPort() (string, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
l.Close()
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return port, nil
|
||||
}
|
@ -18,6 +18,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -87,24 +88,32 @@ func upload(ctx *cli.Context) {
|
||||
if err != nil {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
var hash string
|
||||
|
||||
// define a function which either uploads a directory or single file
|
||||
// based on the type of the file being uploaded
|
||||
var doUpload func() (hash string, err error)
|
||||
if stat.IsDir() {
|
||||
if !recursive {
|
||||
utils.Fatalf("Argument is a directory and recursive upload is disabled")
|
||||
doUpload = func() (string, error) {
|
||||
if !recursive {
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
return client.UploadDirectory(file, defaultPath, "")
|
||||
}
|
||||
hash, err = client.UploadDirectory(file, defaultPath, "")
|
||||
} else {
|
||||
if mimeType == "" {
|
||||
mimeType = detectMimeType(file)
|
||||
doUpload = func() (string, error) {
|
||||
f, err := swarm.Open(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if mimeType == "" {
|
||||
mimeType = detectMimeType(file)
|
||||
}
|
||||
f.ContentType = mimeType
|
||||
return client.Upload(f, "")
|
||||
}
|
||||
f, err := swarm.Open(file)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
f.ContentType = mimeType
|
||||
hash, err = client.Upload(f, "")
|
||||
}
|
||||
hash, err := doUpload()
|
||||
if err != nil {
|
||||
utils.Fatalf("Upload failed: %s", err)
|
||||
}
|
||||
|
78
cmd/swarm/upload_test.go
Normal file
78
cmd/swarm/upload_test.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUp(t *testing.T) {
|
||||
t.Skip("flaky test")
|
||||
|
||||
// start 3 node cluster
|
||||
t.Log("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
assertNil(t, err)
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = io.WriteString(tmp, "data")
|
||||
assertNil(t, err)
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
t.Log("uploading file with 'swarm up'")
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
t.Logf("file uploaded with hash %s", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
t.Logf("getting file from %s", node.Name)
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
assertNil(t, err)
|
||||
assertHTTPResponse(t, res, http.StatusOK, "data")
|
||||
}
|
||||
}
|
||||
|
||||
func assertNil(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) {
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != expectedStatus {
|
||||
t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
assertNil(t, err)
|
||||
if string(data) != expectedBody {
|
||||
t.Fatalf("expected HTTP body %q, got %q", expectedBody, data)
|
||||
}
|
||||
}
|
@ -209,6 +209,10 @@ var (
|
||||
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
}
|
||||
// Transaction pool settings
|
||||
TxPoolNoLocalsFlag = cli.BoolFlag{
|
||||
Name: "txpool.nolocals",
|
||||
Usage: "Disables price exemptions for locally submitted transactions",
|
||||
}
|
||||
TxPoolPriceLimitFlag = cli.Uint64Flag{
|
||||
Name: "txpool.pricelimit",
|
||||
Usage: "Minimum gas price limit to enforce for acceptance into the pool",
|
||||
@ -440,11 +444,6 @@ var (
|
||||
Usage: "Restricts network communication to the given IP networks (CIDR masks)",
|
||||
}
|
||||
|
||||
WhisperEnabledFlag = cli.BoolFlag{
|
||||
Name: "shh",
|
||||
Usage: "Enable Whisper",
|
||||
}
|
||||
|
||||
// ATM the url is left to the user and deployment to
|
||||
JSpathFlag = cli.StringFlag{
|
||||
Name: "jspath",
|
||||
@ -463,6 +462,20 @@ var (
|
||||
Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices",
|
||||
Value: eth.DefaultConfig.GPO.Percentile,
|
||||
}
|
||||
WhisperEnabledFlag = cli.BoolFlag{
|
||||
Name: "shh",
|
||||
Usage: "Enable Whisper",
|
||||
}
|
||||
WhisperMaxMessageSizeFlag = cli.IntFlag{
|
||||
Name: "shh.maxmessagesize",
|
||||
Usage: "Max message size accepted",
|
||||
Value: int(whisper.DefaultMaxMessageSize),
|
||||
}
|
||||
WhisperMinPOWFlag = cli.Float64Flag{
|
||||
Name: "shh.pow",
|
||||
Usage: "Minimum POW accepted",
|
||||
Value: whisper.DefaultMinimumPoW,
|
||||
}
|
||||
)
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
@ -822,6 +835,9 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
}
|
||||
|
||||
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) {
|
||||
cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) {
|
||||
cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name)
|
||||
}
|
||||
@ -878,6 +894,16 @@ func checkExclusive(ctx *cli.Context, flags ...cli.Flag) {
|
||||
}
|
||||
}
|
||||
|
||||
// SetShhConfig applies shh-related command line flags to the config.
|
||||
func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
|
||||
if ctx.GlobalIsSet(WhisperMaxMessageSizeFlag.Name) {
|
||||
cfg.MaxMessageSize = uint32(ctx.GlobalUint(WhisperMaxMessageSizeFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(WhisperMinPOWFlag.Name) {
|
||||
cfg.MinimumAcceptedPOW = ctx.GlobalFloat64(WhisperMinPOWFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// SetEthConfig applies eth-related command line flags to the config.
|
||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
@ -983,8 +1009,10 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
|
||||
// RegisterShhService configures Whisper and adds it to the given node.
|
||||
func RegisterShhService(stack *node.Node) {
|
||||
if err := stack.Register(func(*node.ServiceContext) (node.Service, error) { return whisper.New(), nil }); err != nil {
|
||||
func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
|
||||
return whisper.New(cfg), nil
|
||||
}); err != nil {
|
||||
Fatalf("Failed to register the Whisper service: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ var (
|
||||
argVerbosity = flag.Int("verbosity", int(log.LvlError), "log verbosity level")
|
||||
argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
|
||||
argWorkTime = flag.Uint("work", 5, "work time in seconds")
|
||||
argMaxSize = flag.Int("maxsize", whisper.DefaultMaxMessageLength, "max size of message")
|
||||
argMaxSize = flag.Uint("maxsize", uint(whisper.DefaultMaxMessageSize), "max size of message")
|
||||
argPoW = flag.Float64("pow", whisper.DefaultMinimumPoW, "PoW for normal messages in float format (e.g. 2.7)")
|
||||
argServerPoW = flag.Float64("mspow", whisper.DefaultMinimumPoW, "PoW requirement for Mail Server request")
|
||||
|
||||
@ -198,6 +198,11 @@ func initialize() {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
|
||||
cfg := &whisper.Config{
|
||||
MaxMessageSize: uint32(*argMaxSize),
|
||||
MinimumAcceptedPOW: *argPoW,
|
||||
}
|
||||
|
||||
if *mailServerMode {
|
||||
if len(msPassword) == 0 {
|
||||
msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
|
||||
@ -205,11 +210,12 @@ func initialize() {
|
||||
utils.Fatalf("Failed to read Mail Server password: %s", err)
|
||||
}
|
||||
}
|
||||
shh = whisper.New()
|
||||
|
||||
shh = whisper.New(cfg)
|
||||
shh.RegisterServer(&mailServer)
|
||||
mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW)
|
||||
} else {
|
||||
shh = whisper.New()
|
||||
shh = whisper.New(cfg)
|
||||
}
|
||||
|
||||
if *argPoW != whisper.DefaultMinimumPoW {
|
||||
@ -219,8 +225,8 @@ func initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
if *argMaxSize != whisper.DefaultMaxMessageLength {
|
||||
err := shh.SetMaxMessageLength(*argMaxSize)
|
||||
if uint32(*argMaxSize) != whisper.DefaultMaxMessageSize {
|
||||
err := shh.SetMaxMessageSize(uint32(*argMaxSize))
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to set max message size: %s", err)
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ package hexutil
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
@ -41,17 +40,23 @@ import (
|
||||
const uintBits = 32 << (uint64(^uint(0)) >> 63)
|
||||
|
||||
var (
|
||||
ErrEmptyString = errors.New("empty hex string")
|
||||
ErrMissingPrefix = errors.New("missing 0x prefix for hex data")
|
||||
ErrSyntax = errors.New("invalid hex")
|
||||
ErrEmptyNumber = errors.New("hex number has no digits after 0x")
|
||||
ErrLeadingZero = errors.New("hex number has leading zero digits after 0x")
|
||||
ErrOddLength = errors.New("hex string has odd length")
|
||||
ErrUint64Range = errors.New("hex number does not fit into 64 bits")
|
||||
ErrUintRange = fmt.Errorf("hex number does not fit into %d bits", uintBits)
|
||||
ErrBig256Range = errors.New("hex number does not fit into 256 bits")
|
||||
ErrEmptyString = &decError{"empty hex string"}
|
||||
ErrSyntax = &decError{"invalid hex string"}
|
||||
ErrMissingPrefix = &decError{"hex string without 0x prefix"}
|
||||
ErrOddLength = &decError{"hex string of odd length"}
|
||||
ErrEmptyNumber = &decError{"hex string \"0x\""}
|
||||
ErrLeadingZero = &decError{"hex number with leading zero digits"}
|
||||
ErrUint64Range = &decError{"hex number > 64 bits"}
|
||||
ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", uintBits)}
|
||||
ErrBig256Range = &decError{"hex number > 256 bits"}
|
||||
)
|
||||
|
||||
type decError struct{ msg string }
|
||||
|
||||
func (err decError) Error() string {
|
||||
return string(err.msg)
|
||||
}
|
||||
|
||||
// Decode decodes a hex string with 0x prefix.
|
||||
func Decode(input string) ([]byte, error) {
|
||||
if len(input) == 0 {
|
||||
|
@ -18,15 +18,19 @@ package hexutil
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
textZero = []byte(`0x0`)
|
||||
errNonString = errors.New("cannot unmarshal non-string as hex data")
|
||||
textZero = []byte(`0x0`)
|
||||
bytesT = reflect.TypeOf(Bytes(nil))
|
||||
bigT = reflect.TypeOf((*Big)(nil))
|
||||
uintT = reflect.TypeOf(Uint(0))
|
||||
uint64T = reflect.TypeOf(Uint64(0))
|
||||
)
|
||||
|
||||
// Bytes marshals/unmarshals as a JSON string with 0x prefix.
|
||||
@ -44,9 +48,9 @@ func (b Bytes) MarshalText() ([]byte, error) {
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (b *Bytes) UnmarshalJSON(input []byte) error {
|
||||
if !isString(input) {
|
||||
return errNonString
|
||||
return errNonString(bytesT)
|
||||
}
|
||||
return b.UnmarshalText(input[1 : len(input)-1])
|
||||
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bytesT)
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
@ -69,6 +73,16 @@ func (b Bytes) String() string {
|
||||
return Encode(b)
|
||||
}
|
||||
|
||||
// UnmarshalFixedJSON decodes the input as a string with 0x prefix. The length of out
|
||||
// determines the required input length. This function is commonly used to implement the
|
||||
// UnmarshalJSON method for fixed-size types.
|
||||
func UnmarshalFixedJSON(typ reflect.Type, input, out []byte) error {
|
||||
if !isString(input) {
|
||||
return errNonString(typ)
|
||||
}
|
||||
return wrapTypeError(UnmarshalFixedText(typ.String(), input[1:len(input)-1], out), typ)
|
||||
}
|
||||
|
||||
// UnmarshalFixedText decodes the input as a string with 0x prefix. The length of out
|
||||
// determines the required input length. This function is commonly used to implement the
|
||||
// UnmarshalText method for fixed-size types.
|
||||
@ -127,9 +141,9 @@ func (b Big) MarshalText() ([]byte, error) {
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (b *Big) UnmarshalJSON(input []byte) error {
|
||||
if !isString(input) {
|
||||
return errNonString
|
||||
return errNonString(bigT)
|
||||
}
|
||||
return b.UnmarshalText(input[1 : len(input)-1])
|
||||
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bigT)
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler
|
||||
@ -189,9 +203,9 @@ func (b Uint64) MarshalText() ([]byte, error) {
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (b *Uint64) UnmarshalJSON(input []byte) error {
|
||||
if !isString(input) {
|
||||
return errNonString
|
||||
return errNonString(uint64T)
|
||||
}
|
||||
return b.UnmarshalText(input[1 : len(input)-1])
|
||||
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), uint64T)
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler
|
||||
@ -233,9 +247,9 @@ func (b Uint) MarshalText() ([]byte, error) {
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (b *Uint) UnmarshalJSON(input []byte) error {
|
||||
if !isString(input) {
|
||||
return errNonString
|
||||
return errNonString(uintT)
|
||||
}
|
||||
return b.UnmarshalText(input[1 : len(input)-1])
|
||||
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), uintT)
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
@ -295,3 +309,14 @@ func checkNumberText(input []byte) (raw []byte, err error) {
|
||||
}
|
||||
return input, nil
|
||||
}
|
||||
|
||||
func wrapTypeError(err error, typ reflect.Type) error {
|
||||
if _, ok := err.(*decError); ok {
|
||||
return &json.UnmarshalTypeError{Value: err.Error(), Type: typ}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func errNonString(typ reflect.Type) error {
|
||||
return &json.UnmarshalTypeError{Value: "non-string", Type: typ}
|
||||
}
|
||||
|
@ -62,12 +62,12 @@ var errJSONEOF = errors.New("unexpected end of JSON input")
|
||||
var unmarshalBytesTests = []unmarshalTest{
|
||||
// invalid encoding
|
||||
{input: "", wantErr: errJSONEOF},
|
||||
{input: "null", wantErr: errNonString},
|
||||
{input: "10", wantErr: errNonString},
|
||||
{input: `"0"`, wantErr: ErrMissingPrefix},
|
||||
{input: `"0x0"`, wantErr: ErrOddLength},
|
||||
{input: `"0xxx"`, wantErr: ErrSyntax},
|
||||
{input: `"0x01zz01"`, wantErr: ErrSyntax},
|
||||
{input: "null", wantErr: errNonString(bytesT)},
|
||||
{input: "10", wantErr: errNonString(bytesT)},
|
||||
{input: `"0"`, wantErr: wrapTypeError(ErrMissingPrefix, bytesT)},
|
||||
{input: `"0x0"`, wantErr: wrapTypeError(ErrOddLength, bytesT)},
|
||||
{input: `"0xxx"`, wantErr: wrapTypeError(ErrSyntax, bytesT)},
|
||||
{input: `"0x01zz01"`, wantErr: wrapTypeError(ErrSyntax, bytesT)},
|
||||
|
||||
// valid encoding
|
||||
{input: `""`, want: referenceBytes("")},
|
||||
@ -127,16 +127,16 @@ func TestMarshalBytes(t *testing.T) {
|
||||
var unmarshalBigTests = []unmarshalTest{
|
||||
// invalid encoding
|
||||
{input: "", wantErr: errJSONEOF},
|
||||
{input: "null", wantErr: errNonString},
|
||||
{input: "10", wantErr: errNonString},
|
||||
{input: `"0"`, wantErr: ErrMissingPrefix},
|
||||
{input: `"0x"`, wantErr: ErrEmptyNumber},
|
||||
{input: `"0x01"`, wantErr: ErrLeadingZero},
|
||||
{input: `"0xx"`, wantErr: ErrSyntax},
|
||||
{input: `"0x1zz01"`, wantErr: ErrSyntax},
|
||||
{input: "null", wantErr: errNonString(bigT)},
|
||||
{input: "10", wantErr: errNonString(bigT)},
|
||||
{input: `"0"`, wantErr: wrapTypeError(ErrMissingPrefix, bigT)},
|
||||
{input: `"0x"`, wantErr: wrapTypeError(ErrEmptyNumber, bigT)},
|
||||
{input: `"0x01"`, wantErr: wrapTypeError(ErrLeadingZero, bigT)},
|
||||
{input: `"0xx"`, wantErr: wrapTypeError(ErrSyntax, bigT)},
|
||||
{input: `"0x1zz01"`, wantErr: wrapTypeError(ErrSyntax, bigT)},
|
||||
{
|
||||
input: `"0x10000000000000000000000000000000000000000000000000000000000000000"`,
|
||||
wantErr: ErrBig256Range,
|
||||
wantErr: wrapTypeError(ErrBig256Range, bigT),
|
||||
},
|
||||
|
||||
// valid encoding
|
||||
@ -208,14 +208,14 @@ func TestMarshalBig(t *testing.T) {
|
||||
var unmarshalUint64Tests = []unmarshalTest{
|
||||
// invalid encoding
|
||||
{input: "", wantErr: errJSONEOF},
|
||||
{input: "null", wantErr: errNonString},
|
||||
{input: "10", wantErr: errNonString},
|
||||
{input: `"0"`, wantErr: ErrMissingPrefix},
|
||||
{input: `"0x"`, wantErr: ErrEmptyNumber},
|
||||
{input: `"0x01"`, wantErr: ErrLeadingZero},
|
||||
{input: `"0xfffffffffffffffff"`, wantErr: ErrUint64Range},
|
||||
{input: `"0xx"`, wantErr: ErrSyntax},
|
||||
{input: `"0x1zz01"`, wantErr: ErrSyntax},
|
||||
{input: "null", wantErr: errNonString(uint64T)},
|
||||
{input: "10", wantErr: errNonString(uint64T)},
|
||||
{input: `"0"`, wantErr: wrapTypeError(ErrMissingPrefix, uint64T)},
|
||||
{input: `"0x"`, wantErr: wrapTypeError(ErrEmptyNumber, uint64T)},
|
||||
{input: `"0x01"`, wantErr: wrapTypeError(ErrLeadingZero, uint64T)},
|
||||
{input: `"0xfffffffffffffffff"`, wantErr: wrapTypeError(ErrUint64Range, uint64T)},
|
||||
{input: `"0xx"`, wantErr: wrapTypeError(ErrSyntax, uint64T)},
|
||||
{input: `"0x1zz01"`, wantErr: wrapTypeError(ErrSyntax, uint64T)},
|
||||
|
||||
// valid encoding
|
||||
{input: `""`, want: uint64(0)},
|
||||
@ -298,15 +298,15 @@ var (
|
||||
var unmarshalUintTests = []unmarshalTest{
|
||||
// invalid encoding
|
||||
{input: "", wantErr: errJSONEOF},
|
||||
{input: "null", wantErr: errNonString},
|
||||
{input: "10", wantErr: errNonString},
|
||||
{input: `"0"`, wantErr: ErrMissingPrefix},
|
||||
{input: `"0x"`, wantErr: ErrEmptyNumber},
|
||||
{input: `"0x01"`, wantErr: ErrLeadingZero},
|
||||
{input: `"0x100000000"`, want: uint(maxUint33bits), wantErr32bit: ErrUintRange},
|
||||
{input: `"0xfffffffffffffffff"`, wantErr: ErrUintRange},
|
||||
{input: `"0xx"`, wantErr: ErrSyntax},
|
||||
{input: `"0x1zz01"`, wantErr: ErrSyntax},
|
||||
{input: "null", wantErr: errNonString(uintT)},
|
||||
{input: "10", wantErr: errNonString(uintT)},
|
||||
{input: `"0"`, wantErr: wrapTypeError(ErrMissingPrefix, uintT)},
|
||||
{input: `"0x"`, wantErr: wrapTypeError(ErrEmptyNumber, uintT)},
|
||||
{input: `"0x01"`, wantErr: wrapTypeError(ErrLeadingZero, uintT)},
|
||||
{input: `"0x100000000"`, want: uint(maxUint33bits), wantErr32bit: wrapTypeError(ErrUintRange, uintT)},
|
||||
{input: `"0xfffffffffffffffff"`, wantErr: wrapTypeError(ErrUintRange, uintT)},
|
||||
{input: `"0xx"`, wantErr: wrapTypeError(ErrSyntax, uintT)},
|
||||
{input: `"0x1zz01"`, wantErr: wrapTypeError(ErrSyntax, uintT)},
|
||||
|
||||
// valid encoding
|
||||
{input: `""`, want: uint(0)},
|
||||
@ -317,7 +317,7 @@ var unmarshalUintTests = []unmarshalTest{
|
||||
{input: `"0x1122aaff"`, want: uint(0x1122aaff)},
|
||||
{input: `"0xbbb"`, want: uint(0xbbb)},
|
||||
{input: `"0xffffffff"`, want: uint(0xffffffff)},
|
||||
{input: `"0xffffffffffffffff"`, want: uint(maxUint64bits), wantErr32bit: ErrUintRange},
|
||||
{input: `"0xffffffffffffffff"`, want: uint(maxUint64bits), wantErr32bit: wrapTypeError(ErrUintRange, uintT)},
|
||||
}
|
||||
|
||||
func TestUnmarshalUint(t *testing.T) {
|
||||
|
@ -130,6 +130,34 @@ func PaddedBigBytes(bigint *big.Int, n int) []byte {
|
||||
return ret
|
||||
}
|
||||
|
||||
// bigEndianByteAt returns the byte at position n,
|
||||
// in Big-Endian encoding
|
||||
// So n==0 returns the least significant byte
|
||||
func bigEndianByteAt(bigint *big.Int, n int) byte {
|
||||
words := bigint.Bits()
|
||||
// Check word-bucket the byte will reside in
|
||||
i := n / wordBytes
|
||||
if i >= len(words) {
|
||||
return byte(0)
|
||||
}
|
||||
word := words[i]
|
||||
// Offset of the byte
|
||||
shift := 8 * uint(n%wordBytes)
|
||||
|
||||
return byte(word >> shift)
|
||||
}
|
||||
|
||||
// Byte returns the byte at position n,
|
||||
// with the supplied padlength in Little-Endian encoding.
|
||||
// n==0 returns the MSB
|
||||
// Example: bigint '5', padlength 32, n=31 => 5
|
||||
func Byte(bigint *big.Int, padlength, n int) byte {
|
||||
if n >= padlength {
|
||||
return byte(0)
|
||||
}
|
||||
return bigEndianByteAt(bigint, padlength-1-n)
|
||||
}
|
||||
|
||||
// ReadBits encodes the absolute value of bigint as big-endian bytes. Callers must ensure
|
||||
// that buf has enough space. If buf is too short the result will be incomplete.
|
||||
func ReadBits(bigint *big.Int, buf []byte) {
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestHexOrDecimal256(t *testing.T) {
|
||||
@ -133,8 +135,44 @@ func TestPaddedBigBytes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPaddedBigBytes(b *testing.B) {
|
||||
func BenchmarkPaddedBigBytesLargePadding(b *testing.B) {
|
||||
bigint := MustParseBig256("123456789123456789123456789123456789")
|
||||
for i := 0; i < b.N; i++ {
|
||||
PaddedBigBytes(bigint, 200)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPaddedBigBytesSmallPadding(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
PaddedBigBytes(bigint, 5)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPaddedBigBytesSmallOnePadding(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
PaddedBigBytes(bigint, 32)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAtBrandNew(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
bigEndianByteAt(bigint, 15)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAt(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
bigEndianByteAt(bigint, 15)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAtOld(b *testing.B) {
|
||||
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
PaddedBigBytes(bigint, 32)
|
||||
}
|
||||
@ -174,6 +212,65 @@ func TestU256(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBigEndianByteAt(t *testing.T) {
|
||||
tests := []struct {
|
||||
x string
|
||||
y int
|
||||
exp byte
|
||||
}{
|
||||
{"00", 0, 0x00},
|
||||
{"01", 1, 0x00},
|
||||
{"00", 1, 0x00},
|
||||
{"01", 0, 0x01},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 0, 0x30},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 1, 0x20},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 31, 0xAB},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 32, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 500, 0x00},
|
||||
}
|
||||
for _, test := range tests {
|
||||
v := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
|
||||
actual := bigEndianByteAt(v, test.y)
|
||||
if actual != test.exp {
|
||||
t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.x, test.y, test.exp, actual)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
func TestLittleEndianByteAt(t *testing.T) {
|
||||
tests := []struct {
|
||||
x string
|
||||
y int
|
||||
exp byte
|
||||
}{
|
||||
{"00", 0, 0x00},
|
||||
{"01", 1, 0x00},
|
||||
{"00", 1, 0x00},
|
||||
{"01", 0, 0x00},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 0, 0x00},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 1, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 31, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 32, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 0, 0xAB},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 1, 0xCD},
|
||||
{"00CDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff", 0, 0x00},
|
||||
{"00CDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff", 1, 0xCD},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 31, 0x30},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 30, 0x20},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 32, 0x0},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 31, 0xFF},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 0xFFFF, 0x0},
|
||||
}
|
||||
for _, test := range tests {
|
||||
v := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
|
||||
actual := Byte(v, 32, test.y)
|
||||
if actual != test.exp {
|
||||
t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.x, test.y, test.exp, actual)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestS256(t *testing.T) {
|
||||
tests := []struct{ x, y *big.Int }{
|
||||
{x: big.NewInt(0), y: big.NewInt(0)},
|
||||
|
@ -31,6 +31,11 @@ const (
|
||||
AddressLength = 20
|
||||
)
|
||||
|
||||
var (
|
||||
hashT = reflect.TypeOf(Hash{})
|
||||
addressT = reflect.TypeOf(Address{})
|
||||
)
|
||||
|
||||
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
|
||||
type Hash [HashLength]byte
|
||||
|
||||
@ -72,6 +77,11 @@ func (h *Hash) UnmarshalText(input []byte) error {
|
||||
return hexutil.UnmarshalFixedText("Hash", input, h[:])
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses a hash in hex syntax.
|
||||
func (h *Hash) UnmarshalJSON(input []byte) error {
|
||||
return hexutil.UnmarshalFixedJSON(hashT, input, h[:])
|
||||
}
|
||||
|
||||
// MarshalText returns the hex representation of h.
|
||||
func (h Hash) MarshalText() ([]byte, error) {
|
||||
return hexutil.Bytes(h[:]).MarshalText()
|
||||
@ -194,6 +204,11 @@ func (a *Address) UnmarshalText(input []byte) error {
|
||||
return hexutil.UnmarshalFixedText("Address", input, a[:])
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses a hash in hex syntax.
|
||||
func (a *Address) UnmarshalJSON(input []byte) error {
|
||||
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
|
||||
}
|
||||
|
||||
// UnprefixedHash allows marshaling an Address without 0x prefix.
|
||||
type UnprefixedAddress Address
|
||||
|
||||
|
@ -21,8 +21,6 @@ import (
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func TestBytesConversion(t *testing.T) {
|
||||
@ -43,10 +41,10 @@ func TestHashJsonValidation(t *testing.T) {
|
||||
Size int
|
||||
Error string
|
||||
}{
|
||||
{"", 62, hexutil.ErrMissingPrefix.Error()},
|
||||
{"0x", 66, "hex string has length 66, want 64 for Hash"},
|
||||
{"0x", 63, hexutil.ErrOddLength.Error()},
|
||||
{"0x", 0, "hex string has length 0, want 64 for Hash"},
|
||||
{"", 62, "json: cannot unmarshal hex string without 0x prefix into Go value of type common.Hash"},
|
||||
{"0x", 66, "hex string has length 66, want 64 for common.Hash"},
|
||||
{"0x", 63, "json: cannot unmarshal hex string of odd length into Go value of type common.Hash"},
|
||||
{"0x", 0, "hex string has length 0, want 64 for common.Hash"},
|
||||
{"0x", 64, ""},
|
||||
{"0X", 64, ""},
|
||||
}
|
||||
|
@ -33,102 +33,18 @@ func (s *CompressionRleSuite) TestDecompressSimple(c *checker.C) {
|
||||
res, err := Decompress([]byte{token, 0xfd})
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(res, checker.DeepEquals, exp)
|
||||
// if bytes.Compare(res, exp) != 0 {
|
||||
// t.Error("empty sha3", res)
|
||||
// }
|
||||
|
||||
exp = []byte{0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x1, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21}
|
||||
res, err = Decompress([]byte{token, 0xfe})
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(res, checker.DeepEquals, exp)
|
||||
// if bytes.Compare(res, exp) != 0 {
|
||||
// t.Error("0x80 sha3", res)
|
||||
// }
|
||||
|
||||
res, err = Decompress([]byte{token, 0xff})
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(res, checker.DeepEquals, []byte{token})
|
||||
// if bytes.Compare(res, []byte{token}) != 0 {
|
||||
// t.Error("token", res)
|
||||
// }
|
||||
|
||||
res, err = Decompress([]byte{token, 12})
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(res, checker.DeepEquals, make([]byte, 10))
|
||||
// if bytes.Compare(res, make([]byte, 10)) != 0 {
|
||||
// t.Error("10 * zero", res)
|
||||
// }
|
||||
|
||||
}
|
||||
|
||||
// func TestDecompressMulti(t *testing.T) {
|
||||
// res, err := Decompress([]byte{token, 0xfd, token, 0xfe, token, 12})
|
||||
// if err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
|
||||
// var exp []byte
|
||||
// exp = append(exp, crypto.Keccak256([]byte(""))...)
|
||||
// exp = append(exp, crypto.Keccak256([]byte{0x80})...)
|
||||
// exp = append(exp, make([]byte, 10)...)
|
||||
|
||||
// if bytes.Compare(res, res) != 0 {
|
||||
// t.Error("Expected", exp, "result", res)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestCompressSimple(t *testing.T) {
|
||||
// res := Compress([]byte{0, 0, 0, 0, 0})
|
||||
// if bytes.Compare(res, []byte{token, 7}) != 0 {
|
||||
// t.Error("5 * zero", res)
|
||||
// }
|
||||
|
||||
// res = Compress(crypto.Keccak256([]byte("")))
|
||||
// if bytes.Compare(res, []byte{token, emptyShaToken}) != 0 {
|
||||
// t.Error("empty sha", res)
|
||||
// }
|
||||
|
||||
// res = Compress(crypto.Keccak256([]byte{0x80}))
|
||||
// if bytes.Compare(res, []byte{token, emptyListShaToken}) != 0 {
|
||||
// t.Error("empty list sha", res)
|
||||
// }
|
||||
|
||||
// res = Compress([]byte{token})
|
||||
// if bytes.Compare(res, []byte{token, tokenToken}) != 0 {
|
||||
// t.Error("token", res)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestCompressMulti(t *testing.T) {
|
||||
// in := []byte{0, 0, 0, 0, 0}
|
||||
// in = append(in, crypto.Keccak256([]byte(""))...)
|
||||
// in = append(in, crypto.Keccak256([]byte{0x80})...)
|
||||
// in = append(in, token)
|
||||
// res := Compress(in)
|
||||
|
||||
// exp := []byte{token, 7, token, emptyShaToken, token, emptyListShaToken, token, tokenToken}
|
||||
// if bytes.Compare(res, exp) != 0 {
|
||||
// t.Error("expected", exp, "got", res)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestCompressDecompress(t *testing.T) {
|
||||
// var in []byte
|
||||
|
||||
// for i := 0; i < 20; i++ {
|
||||
// in = append(in, []byte{0, 0, 0, 0, 0}...)
|
||||
// in = append(in, crypto.Keccak256([]byte(""))...)
|
||||
// in = append(in, crypto.Keccak256([]byte{0x80})...)
|
||||
// in = append(in, []byte{123, 2, 19, 89, 245, 254, 255, token, 98, 233}...)
|
||||
// in = append(in, token)
|
||||
// }
|
||||
|
||||
// c := Compress(in)
|
||||
// d, err := Decompress(c)
|
||||
// if err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
|
||||
// if bytes.Compare(d, in) != 0 {
|
||||
// t.Error("multi failed\n", d, "\n", in)
|
||||
// }
|
||||
// }
|
||||
|
@ -76,7 +76,7 @@ var (
|
||||
errUnknownBlock = errors.New("unknown block")
|
||||
|
||||
// errInvalidCheckpointBeneficiary is returned if a checkpoint/epoch transition
|
||||
// block has a beneficiary set to non zeroes.
|
||||
// block has a beneficiary set to non-zeroes.
|
||||
errInvalidCheckpointBeneficiary = errors.New("beneficiary in checkpoint block non-zero")
|
||||
|
||||
// errInvalidVote is returned if a nonce value is something else that the two
|
||||
@ -84,7 +84,7 @@ var (
|
||||
errInvalidVote = errors.New("vote nonce not 0x00..0 or 0xff..f")
|
||||
|
||||
// errInvalidCheckpointVote is returned if a checkpoint/epoch transition block
|
||||
// has a vote nonce set to non zeroes.
|
||||
// has a vote nonce set to non-zeroes.
|
||||
errInvalidCheckpointVote = errors.New("vote nonce in checkpoint block non-zero")
|
||||
|
||||
// errMissingVanity is returned if a block's extra-data section is shorter than
|
||||
@ -99,12 +99,12 @@ var (
|
||||
// their extra-data fields.
|
||||
errExtraSigners = errors.New("non-checkpoint block contains extra signer list")
|
||||
|
||||
// drrInvalidCheckpointSigners is returned if a checkpoint block contains an
|
||||
// errInvalidCheckpointSigners is returned if a checkpoint block contains an
|
||||
// invalid list of signers (i.e. non divisible by 20 bytes, or not the correct
|
||||
// ones).
|
||||
drrInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block")
|
||||
errInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block")
|
||||
|
||||
// errInvalidMixDigest is returned if a block's mix digest is non zero.
|
||||
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
|
||||
errInvalidMixDigest = errors.New("non-zero mix digest")
|
||||
|
||||
// errInvalidUncleHash is returned if a block contains an non-empty uncle list.
|
||||
@ -122,7 +122,7 @@ var (
|
||||
// be modified via out-of-range or non-contiguous headers.
|
||||
errInvalidVotingChain = errors.New("invalid voting chain")
|
||||
|
||||
// errUnauthorized is returned if a header is signed by a non authorized entity.
|
||||
// errUnauthorized is returned if a header is signed by a non-authorized entity.
|
||||
errUnauthorized = errors.New("unauthorized")
|
||||
)
|
||||
|
||||
@ -297,7 +297,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
|
||||
return errExtraSigners
|
||||
}
|
||||
if checkpoint && signersBytes%common.AddressLength != 0 {
|
||||
return drrInvalidCheckpointSigners
|
||||
return errInvalidCheckpointSigners
|
||||
}
|
||||
// Ensure that the mix digest is zero as we don't have fork protection currently
|
||||
if header.MixDigest != (common.Hash{}) {
|
||||
@ -353,7 +353,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
|
||||
}
|
||||
extraSuffix := len(header.Extra) - extraSeal
|
||||
if !bytes.Equal(header.Extra[extraVanity:extraSuffix], signers) {
|
||||
return drrInvalidCheckpointSigners
|
||||
return errInvalidCheckpointSigners
|
||||
}
|
||||
}
|
||||
// All basic checks passed, verify the seal and return
|
||||
@ -467,7 +467,6 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.recents.Add(snap.Hash, snap)
|
||||
|
||||
// Resolve the authorization key and check against signers
|
||||
signer, err := ecrecover(header, c.signatures)
|
||||
@ -479,13 +478,13 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
}
|
||||
for seen, recent := range snap.Recents {
|
||||
if recent == signer {
|
||||
// Signer is among recents, only fail if the current block doens't shift it out
|
||||
// Signer is among recents, only fail if the current block doesn't shift it out
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); seen > number-limit {
|
||||
return errUnauthorized
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure that the difficulty corresponts to the turn-ness of the signer
|
||||
// Ensure that the difficulty corresponds to the turn-ness of the signer
|
||||
inturn := snap.inturn(header.Number.Uint64(), signer)
|
||||
if inturn && header.Difficulty.Cmp(diffInTurn) != 0 {
|
||||
return errInvalidDifficulty
|
||||
@ -499,18 +498,29 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
// Prepare implements consensus.Engine, preparing all the consensus fields of the
|
||||
// header for running the transactions on top.
|
||||
func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||
// If the block isn't a checkpoint, cast a random vote (good enough fror now)
|
||||
// If the block isn't a checkpoint, cast a random vote (good enough for now)
|
||||
header.Coinbase = common.Address{}
|
||||
header.Nonce = types.BlockNonce{}
|
||||
|
||||
number := header.Number.Uint64()
|
||||
|
||||
// Assemble the voting snapshot to check which votes make sense
|
||||
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if number%c.config.Epoch != 0 {
|
||||
c.lock.RLock()
|
||||
if len(c.proposals) > 0 {
|
||||
addresses := make([]common.Address, 0, len(c.proposals))
|
||||
for address := range c.proposals {
|
||||
|
||||
// Gather all the proposals that make sense voting on
|
||||
addresses := make([]common.Address, 0, len(c.proposals))
|
||||
for address, authorize := range c.proposals {
|
||||
if snap.validVote(address, authorize) {
|
||||
addresses = append(addresses, address)
|
||||
}
|
||||
}
|
||||
// If there's pending proposals, cast a vote on them
|
||||
if len(addresses) > 0 {
|
||||
header.Coinbase = addresses[rand.Intn(len(addresses))]
|
||||
if c.proposals[header.Coinbase] {
|
||||
copy(header.Nonce[:], nonceAuthVote)
|
||||
@ -520,11 +530,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
}
|
||||
c.lock.RUnlock()
|
||||
}
|
||||
// Assemble the voting snapshot and set the correct difficulty
|
||||
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Set the correct difficulty
|
||||
header.Difficulty = diffNoTurn
|
||||
if snap.inturn(header.Number.Uint64(), c.signer) {
|
||||
header.Difficulty = diffInTurn
|
||||
@ -601,10 +607,10 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
|
||||
if _, authorized := snap.Signers[signer]; !authorized {
|
||||
return nil, errUnauthorized
|
||||
}
|
||||
// If we're amongs the recent signers, wait for the next block
|
||||
// If we're amongst the recent signers, wait for the next block
|
||||
for seen, recent := range snap.Recents {
|
||||
if recent == signer {
|
||||
// Signer is among recents, only wait if the current block doens't shift it out
|
||||
// Signer is among recents, only wait if the current block doesn't shift it out
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
|
||||
log.Info("Signed recently, must wait for others")
|
||||
<-stop
|
||||
|
@ -39,7 +39,7 @@ type Vote struct {
|
||||
// Tally is a simple vote tally to keep the current score of votes. Votes that
|
||||
// go against the proposal aren't counted since it's equivalent to not voting.
|
||||
type Tally struct {
|
||||
Authorize bool `json:"authorize"` // Whether the vote it about authorizing or kicking someone
|
||||
Authorize bool `json:"authorize"` // Whether the vote is about authorizing or kicking someone
|
||||
Votes int `json:"votes"` // Number of votes until now wanting to pass the proposal
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ type Snapshot struct {
|
||||
Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating
|
||||
}
|
||||
|
||||
// newSnapshot create a new snapshot with the specified startup parameters. This
|
||||
// newSnapshot creates a new snapshot with the specified startup parameters. This
|
||||
// method does not initialize the set of recent signers, so only ever use if for
|
||||
// the genesis block.
|
||||
func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
|
||||
@ -126,11 +126,17 @@ func (s *Snapshot) copy() *Snapshot {
|
||||
return cpy
|
||||
}
|
||||
|
||||
// validVote returns whether it makes sense to cast the specified vote in the
|
||||
// given snapshot context (e.g. don't try to add an already authorized signer).
|
||||
func (s *Snapshot) validVote(address common.Address, authorize bool) bool {
|
||||
_, signer := s.Signers[address]
|
||||
return (signer && !authorize) || (!signer && authorize)
|
||||
}
|
||||
|
||||
// cast adds a new vote into the tally.
|
||||
func (s *Snapshot) cast(address common.Address, authorize bool) bool {
|
||||
// Ensure the vote is meaningful
|
||||
_, signer := s.Signers[address]
|
||||
if (signer && authorize) || (!signer && !authorize) {
|
||||
if !s.validVote(address, authorize) {
|
||||
return false
|
||||
}
|
||||
// Cast the vote into an existing or new tally
|
||||
|
@ -243,7 +243,7 @@ func TestVoting(t *testing.T) {
|
||||
},
|
||||
results: []string{"A", "B"},
|
||||
}, {
|
||||
// Cascading changes are not allowed, only the the account being voted on may change
|
||||
// Cascading changes are not allowed, only the account being voted on may change
|
||||
signers: []string{"A", "B", "C", "D"},
|
||||
votes: []testerVote{
|
||||
{signer: "A", voted: "C", auth: false},
|
||||
@ -293,7 +293,7 @@ func TestVoting(t *testing.T) {
|
||||
results: []string{"A", "B", "C"},
|
||||
}, {
|
||||
// Ensure that pending votes don't survive authorization status changes. This
|
||||
// corner case can only appear if a signer is quickly added, remove and then
|
||||
// corner case can only appear if a signer is quickly added, removed and then
|
||||
// readded (or the inverse), while one of the original voters dropped. If a
|
||||
// past vote is left cached in the system somewhere, this will interfere with
|
||||
// the final signer outcome.
|
||||
|
@ -79,8 +79,7 @@ type Engine interface {
|
||||
|
||||
// Finalize runs any post-transaction state modifications (e.g. block rewards)
|
||||
// and assembles the final block.
|
||||
//
|
||||
// Note, the block header and state database might be updated to reflect any
|
||||
// Note: The block header and state database might be updated to reflect any
|
||||
// consensus rules that happen at finalization (e.g. block rewards).
|
||||
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
|
||||
|
@ -53,7 +53,6 @@ type hasher func(dest []byte, data []byte)
|
||||
|
||||
// makeHasher creates a repetitive hasher, allowing the same hash data structures
|
||||
// to be reused between hash runs instead of requiring new ones to be created.
|
||||
//
|
||||
// The returned function is not thread safe!
|
||||
func makeHasher(h hash.Hash) hasher {
|
||||
return func(dest []byte, data []byte) {
|
||||
@ -82,7 +81,6 @@ func seedHash(block uint64) []byte {
|
||||
// memory, then performing two passes of Sergio Demian Lerner's RandMemoHash
|
||||
// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
|
||||
// set of 524288 64-byte values.
|
||||
//
|
||||
// This method places the result into dest in machine byte order.
|
||||
func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
// Print some debug logs to allow analysis on low end devices
|
||||
@ -220,7 +218,6 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte
|
||||
}
|
||||
|
||||
// generateDataset generates the entire ethash dataset for mining.
|
||||
//
|
||||
// This method places the result into dest in machine byte order.
|
||||
func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
||||
// Print some debug logs to allow analysis on low end devices
|
||||
|
@ -20,7 +20,7 @@ package ethash
|
||||
|
||||
import "testing"
|
||||
|
||||
// Tests whether the dataset size calculator work correctly by cross checking the
|
||||
// Tests whether the dataset size calculator works correctly by cross checking the
|
||||
// hard coded lookup table with the value generated by it.
|
||||
func TestSizeCalculations(t *testing.T) {
|
||||
var tests []uint64
|
||||
|
@ -218,7 +218,6 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||
|
||||
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum ethash engine.
|
||||
//
|
||||
// See YP section 4.3.4. "Block Header Validity"
|
||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||
// Ensure that the header's extra-data section is of a reasonable size
|
||||
@ -286,7 +285,6 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time
|
||||
// given the parent block's time and difficulty.
|
||||
//
|
||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||
next := new(big.Int).Add(parent.Number, common.Big1)
|
||||
@ -462,7 +460,6 @@ var (
|
||||
// AccumulateRewards credits the coinbase of the given block with the mining
|
||||
// reward. The total reward consists of the static block reward and rewards for
|
||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||
//
|
||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||
func AccumulateRewards(state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||
reward := new(big.Int).Set(blockReward)
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
@ -57,9 +58,9 @@ func (d *diffTest) UnmarshalJSON(b []byte) (err error) {
|
||||
}
|
||||
|
||||
func TestCalcDifficulty(t *testing.T) {
|
||||
file, err := os.Open("../../tests/files/BasicTests/difficulty.json")
|
||||
file, err := os.Open(filepath.Join("..", "..", "tests", "testdata", "BasicTests", "difficulty.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Skip(err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
|
@ -308,14 +308,14 @@ func (d *dataset) release() {
|
||||
|
||||
// MakeCache generates a new ethash cache and optionally stores it to disk.
|
||||
func MakeCache(block uint64, dir string) {
|
||||
c := cache{epoch: block/epochLength + 1}
|
||||
c := cache{epoch: block / epochLength}
|
||||
c.generate(dir, math.MaxInt32, false)
|
||||
c.release()
|
||||
}
|
||||
|
||||
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
|
||||
func MakeDataset(block uint64, dir string) {
|
||||
d := dataset{epoch: block/epochLength + 1}
|
||||
d := dataset{epoch: block / epochLength}
|
||||
d.generate(dir, math.MaxInt32, false)
|
||||
d.release()
|
||||
}
|
||||
@ -355,7 +355,7 @@ type Ethash struct {
|
||||
// New creates a full sized ethash PoW scheme.
|
||||
func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
|
||||
if cachesinmem <= 0 {
|
||||
log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
|
||||
log.Warn("One ethash cache must always be in memory", "requested", cachesinmem)
|
||||
cachesinmem = 1
|
||||
}
|
||||
if cachedir != "" && cachesondisk > 0 {
|
||||
@ -412,7 +412,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||
return &Ethash{fakeMode: true, fakeDelay: delay}
|
||||
}
|
||||
|
||||
// NewFullFaker creates a ethash consensus engine with a full fake scheme that
|
||||
// NewFullFaker creates an ethash consensus engine with a full fake scheme that
|
||||
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
||||
func NewFullFaker() *Ethash {
|
||||
return &Ethash{fakeMode: true, fakeFull: true}
|
||||
|
@ -54,7 +54,7 @@ func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header)
|
||||
if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
|
||||
return nil
|
||||
}
|
||||
// Depending whether we support or oppose the fork, validate the extra-data contents
|
||||
// Depending on whether we support or oppose the fork, validate the extra-data contents
|
||||
if config.DAOForkSupport {
|
||||
if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
|
||||
return ErrBadProDAOExtra
|
||||
|
@ -2,7 +2,7 @@ FROM alpine:3.5
|
||||
|
||||
RUN \
|
||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.5 https://github.com/ethereum/go-ethereum && \
|
||||
git clone --depth 1 --branch release/1.6 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apk del go git make gcc musl-dev linux-headers && \
|
||||
|
@ -49,7 +49,7 @@ import (
|
||||
// TODO(zelig): watch peer solvency and notify of bouncing cheques
|
||||
// TODO(zelig): enable paying with cheque by signing off
|
||||
|
||||
// Some functionality require interacting with the blockchain:
|
||||
// Some functionality requires interacting with the blockchain:
|
||||
// * setting current balance on peer's chequebook
|
||||
// * sending the transaction to cash the cheque
|
||||
// * depositing ether to the chequebook
|
||||
@ -100,13 +100,13 @@ type Chequebook struct {
|
||||
// persisted fields
|
||||
balance *big.Int // not synced with blockchain
|
||||
contractAddr common.Address // contract address
|
||||
sent map[common.Address]*big.Int //tallies for beneficiarys
|
||||
sent map[common.Address]*big.Int //tallies for beneficiaries
|
||||
|
||||
txhash string // tx hash of last deposit tx
|
||||
threshold *big.Int // threshold that triggers autodeposit if not nil
|
||||
buffer *big.Int // buffer to keep on top of balance for fork protection
|
||||
|
||||
log log.Logger // contextual logger with the contrac address embedded
|
||||
log log.Logger // contextual logger with the contract address embedded
|
||||
}
|
||||
|
||||
func (self *Chequebook) String() string {
|
||||
@ -442,7 +442,7 @@ type Inbox struct {
|
||||
maxUncashed *big.Int // threshold that triggers autocashing
|
||||
cashed *big.Int // cumulative amount cashed
|
||||
cheque *Cheque // last cheque, nil if none yet received
|
||||
log log.Logger // contextual logger with the contrac address embedded
|
||||
log log.Logger // contextual logger with the contract address embedded
|
||||
}
|
||||
|
||||
// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated
|
||||
@ -509,9 +509,8 @@ func (self *Inbox) AutoCash(cashInterval time.Duration, maxUncashed *big.Int) {
|
||||
self.autoCash(cashInterval)
|
||||
}
|
||||
|
||||
// autoCash starts a loop that periodically clears the last check
|
||||
// autoCash starts a loop that periodically clears the last cheque
|
||||
// if the peer is trusted. Clearing period could be 24h or a week.
|
||||
//
|
||||
// The caller must hold self.lock.
|
||||
func (self *Inbox) autoCash(cashInterval time.Duration) {
|
||||
if self.quit != nil {
|
||||
@ -557,10 +556,10 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
||||
|
||||
var sum *big.Int
|
||||
if self.cheque == nil {
|
||||
// the sum is checked against the blockchain once a check is received
|
||||
// the sum is checked against the blockchain once a cheque is received
|
||||
tally, err := self.session.Sent(self.beneficiary)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("inbox: error calling backend to set amount: %v", err)
|
||||
return nil, fmt.Errorf("inbox: error calling backend to set amount: %v", err)
|
||||
}
|
||||
sum = tally
|
||||
} else {
|
||||
|
@ -414,21 +414,10 @@ func TestCash(t *testing.T) {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance := big.NewInt(2)
|
||||
// gotBalance := backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
// after 3x interval time and 2 cheques received, exactly one cashing tx is sent
|
||||
time.Sleep(4 * interval)
|
||||
backend.Commit()
|
||||
|
||||
// expBalance = big.NewInt(4)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
// after stopping autocash no more tx are sent
|
||||
ch2, err := chbook.Issue(addr1, amount)
|
||||
if err != nil {
|
||||
@ -441,11 +430,6 @@ func TestCash(t *testing.T) {
|
||||
}
|
||||
time.Sleep(2 * interval)
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(4)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
// autocash below 1
|
||||
chbook.balance = big.NewInt(2)
|
||||
@ -456,11 +440,6 @@ func TestCash(t *testing.T) {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(4)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
ch4, err := chbook.Issue(addr1, amount)
|
||||
if err != nil {
|
||||
@ -479,13 +458,6 @@ func TestCash(t *testing.T) {
|
||||
}
|
||||
backend.Commit()
|
||||
|
||||
// 2 checks of amount 1 received, exactly 1 tx is sent
|
||||
// expBalance = big.NewInt(6)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
// autochash on receipt when maxUncashed is 0
|
||||
chbook.balance = new(big.Int).Set(common.Big2)
|
||||
chbox.AutoCash(0, common.Big0)
|
||||
@ -495,11 +467,6 @@ func TestCash(t *testing.T) {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(5)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
ch6, err := chbook.Issue(addr1, amount)
|
||||
if err != nil {
|
||||
@ -511,21 +478,11 @@ func TestCash(t *testing.T) {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(4)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
_, err = chbox.Receive(ch6)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(6)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
}
|
||||
|
@ -3,12 +3,12 @@
|
||||
## Usage
|
||||
|
||||
Full documentation for the Ethereum Name Service [can be found as EIP 137](https://github.com/ethereum/EIPs/issues/137).
|
||||
This package offers a simple binding that streamlines the registration arbitrary utf8 domain names to swarm content hashes.
|
||||
This package offers a simple binding that streamlines the registration of arbitrary UTF8 domain names to swarm content hashes.
|
||||
|
||||
## Development
|
||||
|
||||
The SOL file in contract subdirectory implements the ENS root registry, a simple
|
||||
first-in-first-served registrar for the root namespace, and a simple resolver contract;
|
||||
first-in, first-served registrar for the root namespace, and a simple resolver contract;
|
||||
they're used in tests, and can be used to deploy these contracts for your own purposes.
|
||||
|
||||
The solidity source code can be found at [github.com/arachnid/ens/](https://github.com/arachnid/ens/).
|
||||
|
@ -29,6 +29,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
var (
|
||||
MainNetAddress = common.HexToAddress("0x314159265dD8dbb310642f98f50C066173C1259b")
|
||||
TestNetAddress = common.HexToAddress("0x112234455c3a32fd11230c42e7bccd4a84e02010")
|
||||
)
|
||||
|
||||
// swarm domain name registry and resolver
|
||||
type ENS struct {
|
||||
*contract.ENSSession
|
||||
@ -52,7 +57,7 @@ func NewENS(transactOpts *bind.TransactOpts, contractAddr common.Address, contra
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeployENS deploys an instance of the ENS nameservice, with a 'first in first served' root registrar.
|
||||
// DeployENS deploys an instance of the ENS nameservice, with a 'first-in, first-served' root registrar.
|
||||
func DeployENS(transactOpts *bind.TransactOpts, contractBackend bind.ContractBackend) (*ENS, error) {
|
||||
// Deploy the ENS registry
|
||||
ensAddr, _, _, err := contract.DeployENS(transactOpts, contractBackend, transactOpts.From)
|
||||
|
@ -79,7 +79,7 @@ func TestSignerPromotion(t *testing.T) {
|
||||
// Gradually promote the keys, until all are authorized
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
// Check that no votes are accepted from the not yet authed user
|
||||
// Check that no votes are accepted from the not yet authorized user
|
||||
if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[i]), common.Address{}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed invalid promotion attempt: %v", i, err)
|
||||
}
|
||||
@ -216,7 +216,7 @@ func TestVersionRelease(t *testing.T) {
|
||||
// Gradually push releases, always requiring more signers than previously
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
// Check that no votes are accepted from the not yet authed user
|
||||
// Check that no votes are accepted from the not yet authorized user
|
||||
if _, err := oracle.Release(bind.NewKeyedTransactor(keys[i]), 0, 0, 0, [20]byte{0}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed invalid release attempt: %v", i, err)
|
||||
}
|
||||
|
@ -52,16 +52,10 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
|
||||
// validated at this point.
|
||||
func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||
// Check whether the block's known, and if not, that it's linkable
|
||||
if v.bc.HasBlock(block.Hash()) {
|
||||
if _, err := state.New(block.Root(), v.bc.chainDb); err == nil {
|
||||
return ErrKnownBlock
|
||||
}
|
||||
if v.bc.HasBlockAndState(block.Hash()) {
|
||||
return ErrKnownBlock
|
||||
}
|
||||
parent := v.bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
if _, err := state.New(parent.Root(), v.bc.chainDb); err != nil {
|
||||
if !v.bc.HasBlockAndState(block.ParentHash()) {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
// Header validity is known at this point, check the uncles and transactions
|
||||
|
@ -92,7 +92,7 @@ type BlockChain struct {
|
||||
currentBlock *types.Block // Current head of the block chain
|
||||
currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!)
|
||||
|
||||
stateCache *state.StateDB // State database to reuse between imports (contains state cache)
|
||||
stateCache state.Database // State database to reuse between imports (contains state cache)
|
||||
bodyCache *lru.Cache // Cache for the most recent block bodies
|
||||
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
|
||||
blockCache *lru.Cache // Cache for the most recent entire blocks
|
||||
@ -125,6 +125,7 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, engine co
|
||||
bc := &BlockChain{
|
||||
config: config,
|
||||
chainDb: chainDb,
|
||||
stateCache: state.NewDatabase(chainDb),
|
||||
eventMux: mux,
|
||||
quit: make(chan struct{}),
|
||||
bodyCache: bodyCache,
|
||||
@ -190,7 +191,7 @@ func (bc *BlockChain) loadLastState() error {
|
||||
return bc.Reset()
|
||||
}
|
||||
// Make sure the state associated with the block is available
|
||||
if _, err := state.New(currentBlock.Root(), bc.chainDb); err != nil {
|
||||
if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
|
||||
// Dangling block without a state associated, init from scratch
|
||||
log.Warn("Head state missing, resetting chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
|
||||
return bc.Reset()
|
||||
@ -214,12 +215,6 @@ func (bc *BlockChain) loadLastState() error {
|
||||
bc.currentFastBlock = block
|
||||
}
|
||||
}
|
||||
// Initialize a statedb cache to ensure singleton account bloom filter generation
|
||||
statedb, err := state.New(bc.currentBlock.Root(), bc.chainDb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bc.stateCache = statedb
|
||||
|
||||
// Issue a status log for the user
|
||||
headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
|
||||
@ -261,7 +256,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
bc.currentBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())
|
||||
}
|
||||
if bc.currentBlock != nil {
|
||||
if _, err := state.New(bc.currentBlock.Root(), bc.chainDb); err != nil {
|
||||
if _, err := state.New(bc.currentBlock.Root(), bc.stateCache); err != nil {
|
||||
// Rewound state missing, rolled back to before pivot, reset to genesis
|
||||
bc.currentBlock = nil
|
||||
}
|
||||
@ -384,7 +379,7 @@ func (bc *BlockChain) State() (*state.StateDB, error) {
|
||||
|
||||
// StateAt returns a new mutable state based on a particular point in time.
|
||||
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
|
||||
return bc.stateCache.New(root)
|
||||
return state.New(root, bc.stateCache)
|
||||
}
|
||||
|
||||
// Reset purges the entire blockchain, restoring it to its genesis state.
|
||||
@ -531,7 +526,7 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash) bool {
|
||||
return false
|
||||
}
|
||||
// Ensure the associated state is also present
|
||||
_, err := state.New(block.Root(), bc.chainDb)
|
||||
_, err := bc.stateCache.OpenTrie(block.Root())
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@ -959,31 +954,30 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
}
|
||||
// Create a new statedb using the parent block and report an
|
||||
// error if it fails.
|
||||
switch {
|
||||
case i == 0:
|
||||
err = bc.stateCache.Reset(bc.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
|
||||
default:
|
||||
err = bc.stateCache.Reset(chain[i-1].Root())
|
||||
var parent *types.Block
|
||||
if i == 0 {
|
||||
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
} else {
|
||||
parent = chain[i-1]
|
||||
}
|
||||
state, err := state.New(parent.Root(), bc.stateCache)
|
||||
if err != nil {
|
||||
bc.reportBlock(block, nil, err)
|
||||
return i, err
|
||||
}
|
||||
// Process block using the parent state as reference point.
|
||||
receipts, logs, usedGas, err := bc.processor.Process(block, bc.stateCache, bc.vmConfig)
|
||||
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
|
||||
if err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
return i, err
|
||||
}
|
||||
// Validate the state using the default validator
|
||||
err = bc.Validator().ValidateState(block, bc.GetBlock(block.ParentHash(), block.NumberU64()-1), bc.stateCache, receipts, usedGas)
|
||||
err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
|
||||
if err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
return i, err
|
||||
}
|
||||
// Write state changes to database
|
||||
_, err = bc.stateCache.Commit(bc.config.IsEIP158(block.Number()))
|
||||
if err != nil {
|
||||
if _, err = state.CommitTo(bc.chainDb, bc.config.IsEIP158(block.Number())); err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
@ -1021,7 +1015,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
return i, err
|
||||
}
|
||||
// Write hash preimages
|
||||
if err := WritePreimages(bc.chainDb, block.NumberU64(), bc.stateCache.Preimages()); err != nil {
|
||||
if err := WritePreimages(bc.chainDb, block.NumberU64(), state.Preimages()); err != nil {
|
||||
return i, err
|
||||
}
|
||||
case SideStatTy:
|
||||
@ -1079,7 +1073,7 @@ func (st *insertStats) report(chain []*types.Block, index int) {
|
||||
}
|
||||
log.Info("Imported new chain segment", context...)
|
||||
|
||||
*st = insertStats{startTime: now, lastIndex: index}
|
||||
*st = insertStats{startTime: now, lastIndex: index + 1}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.chainDb)
|
||||
statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -148,7 +148,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||
blockchain.mu.Lock()
|
||||
WriteTd(blockchain.chainDb, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash())))
|
||||
WriteBlock(blockchain.chainDb, block)
|
||||
statedb.Commit(false)
|
||||
statedb.CommitTo(blockchain.chainDb, false)
|
||||
blockchain.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
@ -1131,7 +1131,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||
if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !blockchain.stateCache.Exist(theAddr) {
|
||||
if st, _ := blockchain.State(); !st.Exist(theAddr) {
|
||||
t.Error("expected account to exist")
|
||||
}
|
||||
|
||||
@ -1139,7 +1139,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||
if _, err := blockchain.InsertChain(types.Blocks{blocks[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if blockchain.stateCache.Exist(theAddr) {
|
||||
if st, _ := blockchain.State(); st.Exist(theAddr) {
|
||||
t.Error("account should not exist")
|
||||
}
|
||||
|
||||
@ -1147,7 +1147,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||
if _, err := blockchain.InsertChain(types.Blocks{blocks[2]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if blockchain.stateCache.Exist(theAddr) {
|
||||
if st, _ := blockchain.State(); st.Exist(theAddr) {
|
||||
t.Error("account should not exist")
|
||||
}
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, db ethdb.Dat
|
||||
gen(i, b)
|
||||
}
|
||||
ethash.AccumulateRewards(statedb, h, b.uncles)
|
||||
root, err := statedb.Commit(config.IsEIP158(h.Number))
|
||||
root, err := statedb.CommitTo(db, config.IsEIP158(h.Number))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("state write error: %v", err))
|
||||
}
|
||||
@ -189,7 +189,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, db ethdb.Dat
|
||||
return types.NewBlock(h, b.txs, b.uncles, b.receipts), b.receipts
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
statedb, err := state.New(parent.Root(), db)
|
||||
statedb, err := state.New(parent.Root(), state.NewDatabase(db))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -24,9 +24,6 @@ import (
|
||||
// TxPreEvent is posted when a transaction enters the transaction pool.
|
||||
type TxPreEvent struct{ Tx *types.Transaction }
|
||||
|
||||
// TxPostEvent is posted when a transaction has been processed.
|
||||
type TxPostEvent struct{ Tx *types.Transaction }
|
||||
|
||||
// PendingLogsEvent is posted pre mining and notifies of pending logs.
|
||||
type PendingLogsEvent struct {
|
||||
Logs []*types.Log
|
||||
@ -54,17 +51,4 @@ type ChainSideEvent struct {
|
||||
Block *types.Block
|
||||
}
|
||||
|
||||
type PendingBlockEvent struct {
|
||||
Block *types.Block
|
||||
Logs []*types.Log
|
||||
}
|
||||
|
||||
type ChainUncleEvent struct {
|
||||
Block *types.Block
|
||||
}
|
||||
|
||||
type ChainHeadEvent struct{ Block *types.Block }
|
||||
|
||||
// Mining operation events
|
||||
type StartMining struct{}
|
||||
type TopMining struct{}
|
||||
|
@ -13,24 +13,27 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
var _ = (*genesisSpecMarshaling)(nil)
|
||||
|
||||
func (g Genesis) MarshalJSON() ([]byte, error) {
|
||||
type Genesis struct {
|
||||
Config *params.ChainConfig `json:"config"`
|
||||
Nonce math.HexOrDecimal64 `json:"nonce"`
|
||||
Timestamp math.HexOrDecimal64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
|
||||
Mixhash common.Hash `json:"mixHash"`
|
||||
Coinbase common.Address `json:"coinbase"`
|
||||
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
|
||||
Number uint64 `json:"number"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
}
|
||||
var enc Genesis
|
||||
enc.Config = g.Config
|
||||
enc.Nonce = math.HexOrDecimal64(g.Nonce)
|
||||
enc.Timestamp = math.HexOrDecimal64(g.Timestamp)
|
||||
enc.ParentHash = g.ParentHash
|
||||
enc.ExtraData = g.ExtraData
|
||||
enc.GasLimit = math.HexOrDecimal64(g.GasLimit)
|
||||
enc.Difficulty = (*math.HexOrDecimal256)(g.Difficulty)
|
||||
@ -42,6 +45,9 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
|
||||
enc.Alloc[common.UnprefixedAddress(k)] = v
|
||||
}
|
||||
}
|
||||
enc.Number = g.Number
|
||||
enc.GasUsed = math.HexOrDecimal64(g.GasUsed)
|
||||
enc.ParentHash = g.ParentHash
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
@ -50,13 +56,15 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
|
||||
Config *params.ChainConfig `json:"config"`
|
||||
Nonce *math.HexOrDecimal64 `json:"nonce"`
|
||||
Timestamp *math.HexOrDecimal64 `json:"timestamp"`
|
||||
ParentHash *common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
|
||||
Mixhash *common.Hash `json:"mixHash"`
|
||||
Coinbase *common.Address `json:"coinbase"`
|
||||
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
|
||||
Number *uint64 `json:"number"`
|
||||
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
|
||||
ParentHash *common.Hash `json:"parentHash"`
|
||||
}
|
||||
var dec Genesis
|
||||
if err := json.Unmarshal(input, &dec); err != nil {
|
||||
@ -71,9 +79,6 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
|
||||
if dec.Timestamp != nil {
|
||||
g.Timestamp = uint64(*dec.Timestamp)
|
||||
}
|
||||
if dec.ParentHash != nil {
|
||||
g.ParentHash = *dec.ParentHash
|
||||
}
|
||||
if dec.ExtraData != nil {
|
||||
g.ExtraData = dec.ExtraData
|
||||
}
|
||||
@ -98,5 +103,14 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
|
||||
for k, v := range dec.Alloc {
|
||||
g.Alloc[common.Address(k)] = v
|
||||
}
|
||||
if dec.Number != nil {
|
||||
g.Number = *dec.Number
|
||||
}
|
||||
if dec.GasUsed != nil {
|
||||
g.GasUsed = uint64(*dec.GasUsed)
|
||||
}
|
||||
if dec.ParentHash != nil {
|
||||
g.ParentHash = *dec.ParentHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -12,27 +12,37 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
)
|
||||
|
||||
var _ = (*genesisAccountMarshaling)(nil)
|
||||
|
||||
func (g GenesisAccount) MarshalJSON() ([]byte, error) {
|
||||
type GenesisAccount struct {
|
||||
Code hexutil.Bytes `json:"code,omitempty"`
|
||||
Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
|
||||
Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"`
|
||||
Nonce math.HexOrDecimal64 `json:"nonce,omitempty"`
|
||||
Code hexutil.Bytes `json:"code,omitempty"`
|
||||
Storage map[storageJSON]storageJSON `json:"storage,omitempty"`
|
||||
Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"`
|
||||
Nonce math.HexOrDecimal64 `json:"nonce,omitempty"`
|
||||
PrivateKey hexutil.Bytes `json:"secretKey,omitempty"`
|
||||
}
|
||||
var enc GenesisAccount
|
||||
enc.Code = g.Code
|
||||
enc.Storage = g.Storage
|
||||
if g.Storage != nil {
|
||||
enc.Storage = make(map[storageJSON]storageJSON, len(g.Storage))
|
||||
for k, v := range g.Storage {
|
||||
enc.Storage[storageJSON(k)] = storageJSON(v)
|
||||
}
|
||||
}
|
||||
enc.Balance = (*math.HexOrDecimal256)(g.Balance)
|
||||
enc.Nonce = math.HexOrDecimal64(g.Nonce)
|
||||
enc.PrivateKey = g.PrivateKey
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
func (g *GenesisAccount) UnmarshalJSON(input []byte) error {
|
||||
type GenesisAccount struct {
|
||||
Code hexutil.Bytes `json:"code,omitempty"`
|
||||
Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
|
||||
Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"`
|
||||
Nonce *math.HexOrDecimal64 `json:"nonce,omitempty"`
|
||||
Code hexutil.Bytes `json:"code,omitempty"`
|
||||
Storage map[storageJSON]storageJSON `json:"storage,omitempty"`
|
||||
Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"`
|
||||
Nonce *math.HexOrDecimal64 `json:"nonce,omitempty"`
|
||||
PrivateKey hexutil.Bytes `json:"secretKey,omitempty"`
|
||||
}
|
||||
var dec GenesisAccount
|
||||
if err := json.Unmarshal(input, &dec); err != nil {
|
||||
@ -42,7 +52,10 @@ func (g *GenesisAccount) UnmarshalJSON(input []byte) error {
|
||||
g.Code = dec.Code
|
||||
}
|
||||
if dec.Storage != nil {
|
||||
g.Storage = dec.Storage
|
||||
g.Storage = make(map[common.Hash]common.Hash, len(dec.Storage))
|
||||
for k, v := range dec.Storage {
|
||||
g.Storage[common.Hash(k)] = common.Hash(v)
|
||||
}
|
||||
}
|
||||
if dec.Balance == nil {
|
||||
return errors.New("missing required field 'balance' for GenesisAccount")
|
||||
@ -51,5 +64,8 @@ func (g *GenesisAccount) UnmarshalJSON(input []byte) error {
|
||||
if dec.Nonce != nil {
|
||||
g.Nonce = uint64(*dec.Nonce)
|
||||
}
|
||||
if dec.PrivateKey != nil {
|
||||
g.PrivateKey = dec.PrivateKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -17,6 +17,9 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
@ -44,24 +47,42 @@ type Genesis struct {
|
||||
Config *params.ChainConfig `json:"config"`
|
||||
Nonce uint64 `json:"nonce"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData []byte `json:"extraData"`
|
||||
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
|
||||
Difficulty *big.Int `json:"difficulty" gencodec:"required"`
|
||||
Mixhash common.Hash `json:"mixHash"`
|
||||
Coinbase common.Address `json:"coinbase"`
|
||||
Alloc GenesisAlloc `json:"alloc" gencodec:"required"`
|
||||
|
||||
// These fields are used for consensus tests. Please don't use them
|
||||
// in actual genesis blocks.
|
||||
Number uint64 `json:"number"`
|
||||
GasUsed uint64 `json:"gasUsed"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
}
|
||||
|
||||
// GenesisAlloc specifies the initial state that is part of the genesis block.
|
||||
type GenesisAlloc map[common.Address]GenesisAccount
|
||||
|
||||
func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
|
||||
m := make(map[common.UnprefixedAddress]GenesisAccount)
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
*ga = make(GenesisAlloc)
|
||||
for addr, a := range m {
|
||||
(*ga)[common.Address(addr)] = a
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenesisAccount is an account in the state of the genesis block.
|
||||
type GenesisAccount struct {
|
||||
Code []byte `json:"code,omitempty"`
|
||||
Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
|
||||
Balance *big.Int `json:"balance" gencodec:"required"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Code []byte `json:"code,omitempty"`
|
||||
Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
|
||||
Balance *big.Int `json:"balance" gencodec:"required"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
PrivateKey []byte `json:"secretKey,omitempty"` // for tests
|
||||
}
|
||||
|
||||
// field type overrides for gencodec
|
||||
@ -70,13 +91,38 @@ type genesisSpecMarshaling struct {
|
||||
Timestamp math.HexOrDecimal64
|
||||
ExtraData hexutil.Bytes
|
||||
GasLimit math.HexOrDecimal64
|
||||
GasUsed math.HexOrDecimal64
|
||||
Difficulty *math.HexOrDecimal256
|
||||
Alloc map[common.UnprefixedAddress]GenesisAccount
|
||||
}
|
||||
|
||||
type genesisAccountMarshaling struct {
|
||||
Code hexutil.Bytes
|
||||
Balance *math.HexOrDecimal256
|
||||
Nonce math.HexOrDecimal64
|
||||
Code hexutil.Bytes
|
||||
Balance *math.HexOrDecimal256
|
||||
Nonce math.HexOrDecimal64
|
||||
Storage map[storageJSON]storageJSON
|
||||
PrivateKey hexutil.Bytes
|
||||
}
|
||||
|
||||
// storageJSON represents a 256 bit byte array, but allows less than 256 bits when
|
||||
// unmarshaling from hex.
|
||||
type storageJSON common.Hash
|
||||
|
||||
func (h *storageJSON) UnmarshalText(text []byte) error {
|
||||
text = bytes.TrimPrefix(text, []byte("0x"))
|
||||
if len(text) > 64 {
|
||||
return fmt.Errorf("too many hex characters in storage key/value %q", text)
|
||||
}
|
||||
offset := len(h) - len(text)/2 // pad on the left
|
||||
if _, err := hex.Decode(h[offset:], text); err != nil {
|
||||
fmt.Println(err)
|
||||
return fmt.Errorf("invalid hex storage key/value %q", text)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h storageJSON) MarshalText() ([]byte, error) {
|
||||
return hexutil.Bytes(h[:]).MarshalText()
|
||||
}
|
||||
|
||||
// GenesisMismatchError is raised when trying to overwrite an existing
|
||||
@ -143,7 +189,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
|
||||
// Special case: don't change the existing config of a non-mainnet chain if no new
|
||||
// config is supplied. These chains would get AllProtocolChanges (and a compat error)
|
||||
// if we just continued here.
|
||||
if genesis == nil && stored != params.MainNetGenesisHash {
|
||||
if genesis == nil && stored != params.MainnetGenesisHash {
|
||||
return storedcfg, stored, nil
|
||||
}
|
||||
|
||||
@ -164,9 +210,9 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
|
||||
switch {
|
||||
case g != nil:
|
||||
return g.Config
|
||||
case ghash == params.MainNetGenesisHash:
|
||||
case ghash == params.MainnetGenesisHash:
|
||||
return params.MainnetChainConfig
|
||||
case ghash == params.TestNetGenesisHash:
|
||||
case ghash == params.TestnetGenesisHash:
|
||||
return params.TestnetChainConfig
|
||||
default:
|
||||
return params.AllProtocolChanges
|
||||
@ -176,7 +222,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
|
||||
// ToBlock creates the block and state of a genesis specification.
|
||||
func (g *Genesis) ToBlock() (*types.Block, *state.StateDB) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
|
||||
for addr, account := range g.Alloc {
|
||||
statedb.AddBalance(addr, account.Balance)
|
||||
statedb.SetCode(addr, account.Code)
|
||||
@ -187,11 +233,13 @@ func (g *Genesis) ToBlock() (*types.Block, *state.StateDB) {
|
||||
}
|
||||
root := statedb.IntermediateRoot(false)
|
||||
head := &types.Header{
|
||||
Number: new(big.Int).SetUint64(g.Number),
|
||||
Nonce: types.EncodeNonce(g.Nonce),
|
||||
Time: new(big.Int).SetUint64(g.Timestamp),
|
||||
ParentHash: g.ParentHash,
|
||||
Extra: g.ExtraData,
|
||||
GasLimit: new(big.Int).SetUint64(g.GasLimit),
|
||||
GasUsed: new(big.Int).SetUint64(g.GasUsed),
|
||||
Difficulty: g.Difficulty,
|
||||
MixDigest: g.Mixhash,
|
||||
Coinbase: g.Coinbase,
|
||||
@ -210,6 +258,9 @@ func (g *Genesis) ToBlock() (*types.Block, *state.StateDB) {
|
||||
// The block is committed as the canonical head block.
|
||||
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
||||
block, statedb := g.ToBlock()
|
||||
if block.Number().Sign() != 0 {
|
||||
return nil, fmt.Errorf("can't commit genesis block with number > 0")
|
||||
}
|
||||
if _, err := statedb.CommitTo(db, false); err != nil {
|
||||
return nil, fmt.Errorf("cannot write state: %v", err)
|
||||
}
|
||||
|
@ -32,12 +32,12 @@ import (
|
||||
|
||||
func TestDefaultGenesisBlock(t *testing.T) {
|
||||
block, _ := DefaultGenesisBlock().ToBlock()
|
||||
if block.Hash() != params.MainNetGenesisHash {
|
||||
t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainNetGenesisHash)
|
||||
if block.Hash() != params.MainnetGenesisHash {
|
||||
t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash)
|
||||
}
|
||||
block, _ = DefaultTestnetGenesisBlock().ToBlock()
|
||||
if block.Hash() != params.TestNetGenesisHash {
|
||||
t.Errorf("wrong testnet genesis hash, got %v, want %v", block.Hash(), params.TestNetGenesisHash)
|
||||
if block.Hash() != params.TestnetGenesisHash {
|
||||
t.Errorf("wrong testnet genesis hash, got %v, want %v", block.Hash(), params.TestnetGenesisHash)
|
||||
}
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ func TestSetupGenesis(t *testing.T) {
|
||||
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
||||
return SetupGenesisBlock(db, nil)
|
||||
},
|
||||
wantHash: params.MainNetGenesisHash,
|
||||
wantHash: params.MainnetGenesisHash,
|
||||
wantConfig: params.MainnetChainConfig,
|
||||
},
|
||||
{
|
||||
@ -82,7 +82,7 @@ func TestSetupGenesis(t *testing.T) {
|
||||
DefaultGenesisBlock().MustCommit(db)
|
||||
return SetupGenesisBlock(db, nil)
|
||||
},
|
||||
wantHash: params.MainNetGenesisHash,
|
||||
wantHash: params.MainnetGenesisHash,
|
||||
wantConfig: params.MainnetChainConfig,
|
||||
},
|
||||
{
|
||||
@ -100,8 +100,8 @@ func TestSetupGenesis(t *testing.T) {
|
||||
customg.MustCommit(db)
|
||||
return SetupGenesisBlock(db, DefaultTestnetGenesisBlock())
|
||||
},
|
||||
wantErr: &GenesisMismatchError{Stored: customghash, New: params.TestNetGenesisHash},
|
||||
wantHash: params.TestNetGenesisHash,
|
||||
wantErr: &GenesisMismatchError{Stored: customghash, New: params.TestnetGenesisHash},
|
||||
wantHash: params.TestnetGenesisHash,
|
||||
wantConfig: params.TestnetChainConfig,
|
||||
},
|
||||
{
|
||||
|
154
core/state/database.go
Normal file
154
core/state/database.go
Normal file
@ -0,0 +1,154 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
// Trie cache generation limit after which to evic trie nodes from memory.
|
||||
var MaxTrieCacheGen = uint16(120)
|
||||
|
||||
const (
|
||||
// Number of past tries to keep. This value is chosen such that
|
||||
// reasonable chain reorg depths will hit an existing trie.
|
||||
maxPastTries = 12
|
||||
|
||||
// Number of codehash->size associations to keep.
|
||||
codeSizeCacheSize = 100000
|
||||
)
|
||||
|
||||
// Database wraps access to tries and contract code.
|
||||
type Database interface {
|
||||
// Accessing tries:
|
||||
// OpenTrie opens the main account trie.
|
||||
// OpenStorageTrie opens the storage trie of an account.
|
||||
OpenTrie(root common.Hash) (Trie, error)
|
||||
OpenStorageTrie(addrHash, root common.Hash) (Trie, error)
|
||||
// Accessing contract code:
|
||||
ContractCode(addrHash, codeHash common.Hash) ([]byte, error)
|
||||
ContractCodeSize(addrHash, codeHash common.Hash) (int, error)
|
||||
// CopyTrie returns an independent copy of the given trie.
|
||||
CopyTrie(Trie) Trie
|
||||
}
|
||||
|
||||
// Trie is a Ethereum Merkle Trie.
|
||||
type Trie interface {
|
||||
TryGet(key []byte) ([]byte, error)
|
||||
TryUpdate(key, value []byte) error
|
||||
TryDelete(key []byte) error
|
||||
CommitTo(trie.DatabaseWriter) (common.Hash, error)
|
||||
Hash() common.Hash
|
||||
NodeIterator(startKey []byte) trie.NodeIterator
|
||||
GetKey([]byte) []byte // TODO(fjl): remove this when SecureTrie is removed
|
||||
}
|
||||
|
||||
// NewDatabase creates a backing store for state. The returned database is safe for
|
||||
// concurrent use and retains cached trie nodes in memory.
|
||||
func NewDatabase(db ethdb.Database) Database {
|
||||
csc, _ := lru.New(codeSizeCacheSize)
|
||||
return &cachingDB{db: db, codeSizeCache: csc}
|
||||
}
|
||||
|
||||
type cachingDB struct {
|
||||
db ethdb.Database
|
||||
mu sync.Mutex
|
||||
pastTries []*trie.SecureTrie
|
||||
codeSizeCache *lru.Cache
|
||||
}
|
||||
|
||||
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
|
||||
for i := len(db.pastTries) - 1; i >= 0; i-- {
|
||||
if db.pastTries[i].Hash() == root {
|
||||
return cachedTrie{db.pastTries[i].Copy(), db}, nil
|
||||
}
|
||||
}
|
||||
tr, err := trie.NewSecure(root, db.db, MaxTrieCacheGen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cachedTrie{tr, db}, nil
|
||||
}
|
||||
|
||||
func (db *cachingDB) pushTrie(t *trie.SecureTrie) {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
|
||||
if len(db.pastTries) >= maxPastTries {
|
||||
copy(db.pastTries, db.pastTries[1:])
|
||||
db.pastTries[len(db.pastTries)-1] = t
|
||||
} else {
|
||||
db.pastTries = append(db.pastTries, t)
|
||||
}
|
||||
}
|
||||
|
||||
func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
|
||||
return trie.NewSecure(root, db.db, 0)
|
||||
}
|
||||
|
||||
func (db *cachingDB) CopyTrie(t Trie) Trie {
|
||||
switch t := t.(type) {
|
||||
case cachedTrie:
|
||||
return cachedTrie{t.SecureTrie.Copy(), db}
|
||||
case *trie.SecureTrie:
|
||||
return t.Copy()
|
||||
default:
|
||||
panic(fmt.Errorf("unknown trie type %T", t))
|
||||
}
|
||||
}
|
||||
|
||||
func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
|
||||
code, err := db.db.Get(codeHash[:])
|
||||
if err == nil {
|
||||
db.codeSizeCache.Add(codeHash, len(code))
|
||||
}
|
||||
return code, err
|
||||
}
|
||||
|
||||
func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
|
||||
if cached, ok := db.codeSizeCache.Get(codeHash); ok {
|
||||
return cached.(int), nil
|
||||
}
|
||||
code, err := db.ContractCode(addrHash, codeHash)
|
||||
if err == nil {
|
||||
db.codeSizeCache.Add(codeHash, len(code))
|
||||
}
|
||||
return len(code), err
|
||||
}
|
||||
|
||||
// cachedTrie inserts its trie into a cachingDB on commit.
|
||||
type cachedTrie struct {
|
||||
*trie.SecureTrie
|
||||
db *cachingDB
|
||||
}
|
||||
|
||||
func (m cachedTrie) CommitTo(dbw trie.DatabaseWriter) (common.Hash, error) {
|
||||
root, err := m.SecureTrie.CommitTo(dbw)
|
||||
if err == nil {
|
||||
m.db.pushTrie(m.SecureTrie)
|
||||
}
|
||||
return root, err
|
||||
}
|
@ -41,7 +41,7 @@ type Dump struct {
|
||||
|
||||
func (self *StateDB) RawDump() Dump {
|
||||
dump := Dump{
|
||||
Root: common.Bytes2Hex(self.trie.Root()),
|
||||
Root: fmt.Sprintf("%x", self.trie.Hash()),
|
||||
Accounts: make(map[string]DumpAccount),
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,6 @@ package state
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -105,16 +104,11 @@ func (it *NodeIterator) step() error {
|
||||
return nil
|
||||
}
|
||||
// Otherwise we've reached an account node, initiate data iteration
|
||||
var account struct {
|
||||
Nonce uint64
|
||||
Balance *big.Int
|
||||
Root common.Hash
|
||||
CodeHash []byte
|
||||
}
|
||||
var account Account
|
||||
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
|
||||
return err
|
||||
}
|
||||
dataTrie, err := trie.New(account.Root, it.state.db)
|
||||
dataTrie, err := it.state.db.OpenStorageTrie(common.BytesToHash(it.stateIt.LeafKey()), account.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -124,7 +118,8 @@ func (it *NodeIterator) step() error {
|
||||
}
|
||||
if !bytes.Equal(account.CodeHash, emptyCodeHash) {
|
||||
it.codeHash = common.BytesToHash(account.CodeHash)
|
||||
it.code, err = it.state.db.Get(account.CodeHash)
|
||||
addrHash := common.BytesToHash(it.stateIt.LeafKey())
|
||||
it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash))
|
||||
if err != nil {
|
||||
return fmt.Errorf("code %x: %v", account.CodeHash, err)
|
||||
}
|
||||
|
@ -21,13 +21,12 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
)
|
||||
|
||||
// Tests that the node iterator indeed walks over the entire database contents.
|
||||
func TestNodeIteratorCoverage(t *testing.T) {
|
||||
// Create some arbitrary test state to iterate
|
||||
db, root, _ := makeTestState()
|
||||
db, mem, root, _ := makeTestState()
|
||||
|
||||
state, err := New(root, db)
|
||||
if err != nil {
|
||||
@ -40,13 +39,14 @@ func TestNodeIteratorCoverage(t *testing.T) {
|
||||
hashes[it.Hash] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Cross check the hashes and the database itself
|
||||
for hash := range hashes {
|
||||
if _, err := db.Get(hash.Bytes()); err != nil {
|
||||
if _, err := mem.Get(hash.Bytes()); err != nil {
|
||||
t.Errorf("failed to retrieve reported node %x: %v", hash, err)
|
||||
}
|
||||
}
|
||||
for _, key := range db.(*ethdb.MemDatabase).Keys() {
|
||||
for _, key := range mem.Keys() {
|
||||
if bytes.HasPrefix(key, []byte("secure-key-")) {
|
||||
continue
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ var addr = common.BytesToAddress([]byte("test"))
|
||||
|
||||
func create() (*ManagedState, *account) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := New(common.Hash{}, db)
|
||||
statedb, _ := New(common.Hash{}, NewDatabase(db))
|
||||
ms := ManageState(statedb)
|
||||
ms.StateDB.SetNonce(addr, 100)
|
||||
ms.accounts[addr] = newAccount(ms.StateDB.getStateObject(addr))
|
||||
|
@ -62,9 +62,10 @@ func (self Storage) Copy() Storage {
|
||||
// Account values can be accessed and modified through the object.
|
||||
// Finally, call CommitTrie to write the modified storage trie into a database.
|
||||
type stateObject struct {
|
||||
address common.Address // Ethereum address of this account
|
||||
data Account
|
||||
db *StateDB
|
||||
address common.Address
|
||||
addrHash common.Hash // hash of ethereum address of the account
|
||||
data Account
|
||||
db *StateDB
|
||||
|
||||
// DB error.
|
||||
// State objects are used by the consensus core and VM which are
|
||||
@ -74,8 +75,8 @@ type stateObject struct {
|
||||
dbErr error
|
||||
|
||||
// Write caches.
|
||||
trie *trie.SecureTrie // storage trie, which becomes non-nil on first access
|
||||
code Code // contract bytecode, which gets set when code is loaded
|
||||
trie Trie // storage trie, which becomes non-nil on first access
|
||||
code Code // contract bytecode, which gets set when code is loaded
|
||||
|
||||
cachedStorage Storage // Storage entry cache to avoid duplicate reads
|
||||
dirtyStorage Storage // Storage entries that need to be flushed to disk
|
||||
@ -112,7 +113,15 @@ func newObject(db *StateDB, address common.Address, data Account, onDirty func(a
|
||||
if data.CodeHash == nil {
|
||||
data.CodeHash = emptyCodeHash
|
||||
}
|
||||
return &stateObject{db: db, address: address, data: data, cachedStorage: make(Storage), dirtyStorage: make(Storage), onDirty: onDirty}
|
||||
return &stateObject{
|
||||
db: db,
|
||||
address: address,
|
||||
addrHash: crypto.Keccak256Hash(address[:]),
|
||||
data: data,
|
||||
cachedStorage: make(Storage),
|
||||
dirtyStorage: make(Storage),
|
||||
onDirty: onDirty,
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeRLP implements rlp.Encoder.
|
||||
@ -148,12 +157,12 @@ func (c *stateObject) touch() {
|
||||
c.touched = true
|
||||
}
|
||||
|
||||
func (c *stateObject) getTrie(db trie.Database) *trie.SecureTrie {
|
||||
func (c *stateObject) getTrie(db Database) Trie {
|
||||
if c.trie == nil {
|
||||
var err error
|
||||
c.trie, err = trie.NewSecure(c.data.Root, db, 0)
|
||||
c.trie, err = db.OpenStorageTrie(c.addrHash, c.data.Root)
|
||||
if err != nil {
|
||||
c.trie, _ = trie.NewSecure(common.Hash{}, db, 0)
|
||||
c.trie, _ = db.OpenStorageTrie(c.addrHash, common.Hash{})
|
||||
c.setError(fmt.Errorf("can't create storage trie: %v", err))
|
||||
}
|
||||
}
|
||||
@ -161,13 +170,18 @@ func (c *stateObject) getTrie(db trie.Database) *trie.SecureTrie {
|
||||
}
|
||||
|
||||
// GetState returns a value in account storage.
|
||||
func (self *stateObject) GetState(db trie.Database, key common.Hash) common.Hash {
|
||||
func (self *stateObject) GetState(db Database, key common.Hash) common.Hash {
|
||||
value, exists := self.cachedStorage[key]
|
||||
if exists {
|
||||
return value
|
||||
}
|
||||
// Load from DB in case it is missing.
|
||||
if enc := self.getTrie(db).Get(key[:]); len(enc) > 0 {
|
||||
enc, err := self.getTrie(db).TryGet(key[:])
|
||||
if err != nil {
|
||||
self.setError(err)
|
||||
return common.Hash{}
|
||||
}
|
||||
if len(enc) > 0 {
|
||||
_, content, _, err := rlp.Split(enc)
|
||||
if err != nil {
|
||||
self.setError(err)
|
||||
@ -181,7 +195,7 @@ func (self *stateObject) GetState(db trie.Database, key common.Hash) common.Hash
|
||||
}
|
||||
|
||||
// SetState updates a value in account storage.
|
||||
func (self *stateObject) SetState(db trie.Database, key, value common.Hash) {
|
||||
func (self *stateObject) SetState(db Database, key, value common.Hash) {
|
||||
self.db.journal = append(self.db.journal, storageChange{
|
||||
account: &self.address,
|
||||
key: key,
|
||||
@ -201,30 +215,30 @@ func (self *stateObject) setState(key, value common.Hash) {
|
||||
}
|
||||
|
||||
// updateTrie writes cached storage modifications into the object's storage trie.
|
||||
func (self *stateObject) updateTrie(db trie.Database) *trie.SecureTrie {
|
||||
func (self *stateObject) updateTrie(db Database) Trie {
|
||||
tr := self.getTrie(db)
|
||||
for key, value := range self.dirtyStorage {
|
||||
delete(self.dirtyStorage, key)
|
||||
if (value == common.Hash{}) {
|
||||
tr.Delete(key[:])
|
||||
self.setError(tr.TryDelete(key[:]))
|
||||
continue
|
||||
}
|
||||
// Encoding []byte cannot fail, ok to ignore the error.
|
||||
v, _ := rlp.EncodeToBytes(bytes.TrimLeft(value[:], "\x00"))
|
||||
tr.Update(key[:], v)
|
||||
self.setError(tr.TryUpdate(key[:], v))
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
// UpdateRoot sets the trie root to the current root hash of
|
||||
func (self *stateObject) updateRoot(db trie.Database) {
|
||||
func (self *stateObject) updateRoot(db Database) {
|
||||
self.updateTrie(db)
|
||||
self.data.Root = self.trie.Hash()
|
||||
}
|
||||
|
||||
// CommitTrie the storage trie of the object to dwb.
|
||||
// This updates the trie root.
|
||||
func (self *stateObject) CommitTrie(db trie.Database, dbw trie.DatabaseWriter) error {
|
||||
func (self *stateObject) CommitTrie(db Database, dbw trie.DatabaseWriter) error {
|
||||
self.updateTrie(db)
|
||||
if self.dbErr != nil {
|
||||
return self.dbErr
|
||||
@ -282,9 +296,7 @@ func (c *stateObject) ReturnGas(gas *big.Int) {}
|
||||
func (self *stateObject) deepCopy(db *StateDB, onDirty func(addr common.Address)) *stateObject {
|
||||
stateObject := newObject(db, self.address, self.data, onDirty)
|
||||
if self.trie != nil {
|
||||
// A shallow copy makes the two tries independent.
|
||||
cpy := *self.trie
|
||||
stateObject.trie = &cpy
|
||||
stateObject.trie = db.db.CopyTrie(self.trie)
|
||||
}
|
||||
stateObject.code = self.code
|
||||
stateObject.dirtyStorage = self.dirtyStorage.Copy()
|
||||
@ -305,14 +317,14 @@ func (c *stateObject) Address() common.Address {
|
||||
}
|
||||
|
||||
// Code returns the contract code associated with this object, if any.
|
||||
func (self *stateObject) Code(db trie.Database) []byte {
|
||||
func (self *stateObject) Code(db Database) []byte {
|
||||
if self.code != nil {
|
||||
return self.code
|
||||
}
|
||||
if bytes.Equal(self.CodeHash(), emptyCodeHash) {
|
||||
return nil
|
||||
}
|
||||
code, err := db.Get(self.CodeHash())
|
||||
code, err := db.ContractCode(self.addrHash, common.BytesToHash(self.CodeHash()))
|
||||
if err != nil {
|
||||
self.setError(fmt.Errorf("can't load code hash %x: %v", self.CodeHash(), err))
|
||||
}
|
||||
|
@ -21,14 +21,14 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
checker "gopkg.in/check.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
checker "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type StateSuite struct {
|
||||
db *ethdb.MemDatabase
|
||||
state *StateDB
|
||||
}
|
||||
|
||||
@ -48,7 +48,7 @@ func (s *StateSuite) TestDump(c *checker.C) {
|
||||
// write some of them to the trie
|
||||
s.state.updateStateObject(obj1)
|
||||
s.state.updateStateObject(obj2)
|
||||
s.state.Commit(false)
|
||||
s.state.CommitTo(s.db, false)
|
||||
|
||||
// check that dump contains the state objects that are in trie
|
||||
got := string(s.state.Dump())
|
||||
@ -87,23 +87,20 @@ func (s *StateSuite) TestDump(c *checker.C) {
|
||||
}
|
||||
|
||||
func (s *StateSuite) SetUpTest(c *checker.C) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
s.state, _ = New(common.Hash{}, db)
|
||||
s.db, _ = ethdb.NewMemDatabase()
|
||||
s.state, _ = New(common.Hash{}, NewDatabase(s.db))
|
||||
}
|
||||
|
||||
func TestNull(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
state, _ := New(common.Hash{}, db)
|
||||
|
||||
func (s *StateSuite) TestNull(c *checker.C) {
|
||||
address := common.HexToAddress("0x823140710bf13990e4500136726d8b55")
|
||||
state.CreateAccount(address)
|
||||
s.state.CreateAccount(address)
|
||||
//value := common.FromHex("0x823140710bf13990e4500136726d8b55")
|
||||
var value common.Hash
|
||||
state.SetState(address, common.Hash{}, value)
|
||||
state.Commit(false)
|
||||
value = state.GetState(address, common.Hash{})
|
||||
s.state.SetState(address, common.Hash{}, value)
|
||||
s.state.CommitTo(s.db, false)
|
||||
value = s.state.GetState(address, common.Hash{})
|
||||
if !common.EmptyHash(value) {
|
||||
t.Errorf("expected empty hash. got %x", value)
|
||||
c.Errorf("expected empty hash. got %x", value)
|
||||
}
|
||||
}
|
||||
|
||||
@ -129,17 +126,15 @@ func (s *StateSuite) TestSnapshot(c *checker.C) {
|
||||
c.Assert(data1, checker.DeepEquals, res)
|
||||
}
|
||||
|
||||
func TestSnapshotEmpty(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
state, _ := New(common.Hash{}, db)
|
||||
state.RevertToSnapshot(state.Snapshot())
|
||||
func (s *StateSuite) TestSnapshotEmpty(c *checker.C) {
|
||||
s.state.RevertToSnapshot(s.state.Snapshot())
|
||||
}
|
||||
|
||||
// use testing instead of checker because checker does not support
|
||||
// printing/logging in tests (-check.vv does not work)
|
||||
func TestSnapshot2(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
state, _ := New(common.Hash{}, db)
|
||||
state, _ := New(common.Hash{}, NewDatabase(db))
|
||||
|
||||
stateobjaddr0 := toAddr([]byte("so0"))
|
||||
stateobjaddr1 := toAddr([]byte("so1"))
|
||||
@ -160,7 +155,7 @@ func TestSnapshot2(t *testing.T) {
|
||||
so0.deleted = false
|
||||
state.setStateObject(so0)
|
||||
|
||||
root, _ := state.Commit(false)
|
||||
root, _ := state.CommitTo(db, false)
|
||||
state.Reset(root)
|
||||
|
||||
// and one with deleted == true
|
||||
@ -182,8 +177,8 @@ func TestSnapshot2(t *testing.T) {
|
||||
|
||||
so0Restored := state.getStateObject(stateobjaddr0)
|
||||
// Update lazily-loaded values before comparing.
|
||||
so0Restored.GetState(db, storageaddr)
|
||||
so0Restored.Code(db)
|
||||
so0Restored.GetState(state.db, storageaddr)
|
||||
so0Restored.Code(state.db)
|
||||
// non-deleted is equal (restored)
|
||||
compareStateObjects(so0Restored, so0, t)
|
||||
|
||||
|
@ -26,23 +26,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
// Trie cache generation limit after which to evic trie nodes from memory.
|
||||
var MaxTrieCacheGen = uint16(120)
|
||||
|
||||
const (
|
||||
// Number of past tries to keep. This value is chosen such that
|
||||
// reasonable chain reorg depths will hit an existing trie.
|
||||
maxPastTries = 12
|
||||
|
||||
// Number of codehash->size associations to keep.
|
||||
codeSizeCacheSize = 100000
|
||||
)
|
||||
|
||||
type revision struct {
|
||||
@ -56,16 +42,21 @@ type revision struct {
|
||||
// * Contracts
|
||||
// * Accounts
|
||||
type StateDB struct {
|
||||
db ethdb.Database
|
||||
trie *trie.SecureTrie
|
||||
pastTries []*trie.SecureTrie
|
||||
codeSizeCache *lru.Cache
|
||||
db Database
|
||||
trie Trie
|
||||
|
||||
// This map holds 'live' objects, which will get modified while processing a state transition.
|
||||
stateObjects map[common.Address]*stateObject
|
||||
stateObjectsDirty map[common.Address]struct{}
|
||||
stateObjectsDestructed map[common.Address]struct{}
|
||||
|
||||
// DB error.
|
||||
// State objects are used by the consensus core and VM which are
|
||||
// unable to deal with database-level errors. Any error that occurs
|
||||
// during a database read is memoized here and will eventually be returned
|
||||
// by StateDB.Commit.
|
||||
dbErr error
|
||||
|
||||
// The refund counter, also used by state transitioning.
|
||||
refund *big.Int
|
||||
|
||||
@ -86,16 +77,14 @@ type StateDB struct {
|
||||
}
|
||||
|
||||
// Create a new state from a given trie
|
||||
func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
|
||||
tr, err := trie.NewSecure(root, db, MaxTrieCacheGen)
|
||||
func New(root common.Hash, db Database) (*StateDB, error) {
|
||||
tr, err := db.OpenTrie(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
csc, _ := lru.New(codeSizeCacheSize)
|
||||
return &StateDB{
|
||||
db: db,
|
||||
trie: tr,
|
||||
codeSizeCache: csc,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}),
|
||||
@ -105,36 +94,21 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// New creates a new statedb by reusing any journalled tries to avoid costly
|
||||
// disk io.
|
||||
func (self *StateDB) New(root common.Hash) (*StateDB, error) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
|
||||
tr, err := self.openTrie(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// setError remembers the first non-nil error it is called with.
|
||||
func (self *StateDB) setError(err error) {
|
||||
if self.dbErr == nil {
|
||||
self.dbErr = err
|
||||
}
|
||||
return &StateDB{
|
||||
db: self.db,
|
||||
trie: tr,
|
||||
codeSizeCache: self.codeSizeCache,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (self *StateDB) Error() error {
|
||||
return self.dbErr
|
||||
}
|
||||
|
||||
// Reset clears out all emphemeral state objects from the state db, but keeps
|
||||
// the underlying state trie to avoid reloading data for the next operations.
|
||||
func (self *StateDB) Reset(root common.Hash) error {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
|
||||
tr, err := self.openTrie(root)
|
||||
tr, err := self.db.OpenTrie(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -149,34 +123,9 @@ func (self *StateDB) Reset(root common.Hash) error {
|
||||
self.logSize = 0
|
||||
self.preimages = make(map[common.Hash][]byte)
|
||||
self.clearJournalAndRefund()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// openTrie creates a trie. It uses an existing trie if one is available
|
||||
// from the journal if available.
|
||||
func (self *StateDB) openTrie(root common.Hash) (*trie.SecureTrie, error) {
|
||||
for i := len(self.pastTries) - 1; i >= 0; i-- {
|
||||
if self.pastTries[i].Hash() == root {
|
||||
tr := *self.pastTries[i]
|
||||
return &tr, nil
|
||||
}
|
||||
}
|
||||
return trie.NewSecure(root, self.db, MaxTrieCacheGen)
|
||||
}
|
||||
|
||||
func (self *StateDB) pushTrie(t *trie.SecureTrie) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
|
||||
if len(self.pastTries) >= maxPastTries {
|
||||
copy(self.pastTries, self.pastTries[1:])
|
||||
self.pastTries[len(self.pastTries)-1] = t
|
||||
} else {
|
||||
self.pastTries = append(self.pastTries, t)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateDB) AddLog(log *types.Log) {
|
||||
self.journal = append(self.journal, addLogChange{txhash: self.thash})
|
||||
|
||||
@ -254,10 +203,7 @@ func (self *StateDB) GetNonce(addr common.Address) uint64 {
|
||||
func (self *StateDB) GetCode(addr common.Address) []byte {
|
||||
stateObject := self.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
code := stateObject.Code(self.db)
|
||||
key := common.BytesToHash(stateObject.CodeHash())
|
||||
self.codeSizeCache.Add(key, len(code))
|
||||
return code
|
||||
return stateObject.Code(self.db)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -267,13 +213,12 @@ func (self *StateDB) GetCodeSize(addr common.Address) int {
|
||||
if stateObject == nil {
|
||||
return 0
|
||||
}
|
||||
key := common.BytesToHash(stateObject.CodeHash())
|
||||
if cached, ok := self.codeSizeCache.Get(key); ok {
|
||||
return cached.(int)
|
||||
if stateObject.code != nil {
|
||||
return len(stateObject.code)
|
||||
}
|
||||
size := len(stateObject.Code(self.db))
|
||||
if stateObject.dbErr == nil {
|
||||
self.codeSizeCache.Add(key, size)
|
||||
size, err := self.db.ContractCodeSize(stateObject.addrHash, common.BytesToHash(stateObject.CodeHash()))
|
||||
if err != nil {
|
||||
self.setError(err)
|
||||
}
|
||||
return size
|
||||
}
|
||||
@ -296,7 +241,7 @@ func (self *StateDB) GetState(a common.Address, b common.Hash) common.Hash {
|
||||
|
||||
// StorageTrie returns the storage trie of an account.
|
||||
// The return value is a copy and is nil for non-existent accounts.
|
||||
func (self *StateDB) StorageTrie(a common.Address) *trie.SecureTrie {
|
||||
func (self *StateDB) StorageTrie(a common.Address) Trie {
|
||||
stateObject := self.getStateObject(a)
|
||||
if stateObject == nil {
|
||||
return nil
|
||||
@ -394,14 +339,14 @@ func (self *StateDB) updateStateObject(stateObject *stateObject) {
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
|
||||
}
|
||||
self.trie.Update(addr[:], data)
|
||||
self.setError(self.trie.TryUpdate(addr[:], data))
|
||||
}
|
||||
|
||||
// deleteStateObject removes the given object from the state trie.
|
||||
func (self *StateDB) deleteStateObject(stateObject *stateObject) {
|
||||
stateObject.deleted = true
|
||||
addr := stateObject.Address()
|
||||
self.trie.Delete(addr[:])
|
||||
self.setError(self.trie.TryDelete(addr[:]))
|
||||
}
|
||||
|
||||
// Retrieve a state object given my the address. Returns nil if not found.
|
||||
@ -415,8 +360,9 @@ func (self *StateDB) getStateObject(addr common.Address) (stateObject *stateObje
|
||||
}
|
||||
|
||||
// Load the object from the database.
|
||||
enc := self.trie.Get(addr[:])
|
||||
enc, err := self.trie.TryGet(addr[:])
|
||||
if len(enc) == 0 {
|
||||
self.setError(err)
|
||||
return nil
|
||||
}
|
||||
var data Account
|
||||
@ -512,8 +458,6 @@ func (self *StateDB) Copy() *StateDB {
|
||||
state := &StateDB{
|
||||
db: self.db,
|
||||
trie: self.trie,
|
||||
pastTries: self.pastTries,
|
||||
codeSizeCache: self.codeSizeCache,
|
||||
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}, len(self.stateObjectsDestructed)),
|
||||
@ -636,23 +580,6 @@ func (s *StateDB) DeleteSuicides() {
|
||||
}
|
||||
}
|
||||
|
||||
// Commit commits all state changes to the database.
|
||||
func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
|
||||
root, batch := s.CommitBatch(deleteEmptyObjects)
|
||||
return root, batch.Write()
|
||||
}
|
||||
|
||||
// CommitBatch commits all state changes to a write batch but does not
|
||||
// execute the batch. It is used to validate state changes against
|
||||
// the root hash stored in a block.
|
||||
func (s *StateDB) CommitBatch(deleteEmptyObjects bool) (root common.Hash, batch ethdb.Batch) {
|
||||
batch = s.db.NewBatch()
|
||||
root, _ = s.CommitTo(batch, deleteEmptyObjects)
|
||||
|
||||
log.Debug("Trie cache stats after commit", "misses", trie.CacheMisses(), "unloads", trie.CacheUnloads())
|
||||
return root, batch
|
||||
}
|
||||
|
||||
func (s *StateDB) clearJournalAndRefund() {
|
||||
s.journal = nil
|
||||
s.validRevisions = s.validRevisions[:0]
|
||||
@ -690,8 +617,6 @@ func (s *StateDB) CommitTo(dbw trie.DatabaseWriter, deleteEmptyObjects bool) (ro
|
||||
}
|
||||
// Write trie changes.
|
||||
root, err = s.trie.CommitTo(dbw)
|
||||
if err == nil {
|
||||
s.pushTrie(s.trie)
|
||||
}
|
||||
log.Debug("Trie cache stats after commit", "misses", trie.CacheMisses(), "unloads", trie.CacheUnloads())
|
||||
return root, err
|
||||
}
|
||||
|
@ -28,6 +28,8 @@ import (
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
check "gopkg.in/check.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
@ -38,7 +40,7 @@ import (
|
||||
func TestUpdateLeaks(t *testing.T) {
|
||||
// Create an empty state database
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
state, _ := New(common.Hash{}, db)
|
||||
state, _ := New(common.Hash{}, NewDatabase(db))
|
||||
|
||||
// Update it with some accounts
|
||||
for i := byte(0); i < 255; i++ {
|
||||
@ -66,8 +68,8 @@ func TestIntermediateLeaks(t *testing.T) {
|
||||
// Create two state databases, one transitioning to the final state, the other final from the beginning
|
||||
transDb, _ := ethdb.NewMemDatabase()
|
||||
finalDb, _ := ethdb.NewMemDatabase()
|
||||
transState, _ := New(common.Hash{}, transDb)
|
||||
finalState, _ := New(common.Hash{}, finalDb)
|
||||
transState, _ := New(common.Hash{}, NewDatabase(transDb))
|
||||
finalState, _ := New(common.Hash{}, NewDatabase(finalDb))
|
||||
|
||||
modify := func(state *StateDB, addr common.Address, i, tweak byte) {
|
||||
state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak)))
|
||||
@ -95,10 +97,10 @@ func TestIntermediateLeaks(t *testing.T) {
|
||||
}
|
||||
|
||||
// Commit and cross check the databases.
|
||||
if _, err := transState.Commit(false); err != nil {
|
||||
if _, err := transState.CommitTo(transDb, false); err != nil {
|
||||
t.Fatalf("failed to commit transition state: %v", err)
|
||||
}
|
||||
if _, err := finalState.Commit(false); err != nil {
|
||||
if _, err := finalState.CommitTo(finalDb, false); err != nil {
|
||||
t.Fatalf("failed to commit final state: %v", err)
|
||||
}
|
||||
for _, key := range finalDb.Keys() {
|
||||
@ -282,7 +284,7 @@ func (test *snapshotTest) run() bool {
|
||||
// Run all actions and create snapshots.
|
||||
var (
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
state, _ = New(common.Hash{}, db)
|
||||
state, _ = New(common.Hash{}, NewDatabase(db))
|
||||
snapshotRevs = make([]int, len(test.snapshots))
|
||||
sindex = 0
|
||||
)
|
||||
@ -297,7 +299,7 @@ func (test *snapshotTest) run() bool {
|
||||
// Revert all snapshots in reverse order. Each revert must yield a state
|
||||
// that is equivalent to fresh state with all actions up the snapshot applied.
|
||||
for sindex--; sindex >= 0; sindex-- {
|
||||
checkstate, _ := New(common.Hash{}, db)
|
||||
checkstate, _ := New(common.Hash{}, NewDatabase(db))
|
||||
for _, action := range test.actions[:test.snapshots[sindex]] {
|
||||
action.fn(action, checkstate)
|
||||
}
|
||||
@ -354,21 +356,19 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestTouchDelete(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
state, _ := New(common.Hash{}, db)
|
||||
state.GetOrNewStateObject(common.Address{})
|
||||
root, _ := state.Commit(false)
|
||||
state.Reset(root)
|
||||
func (s *StateSuite) TestTouchDelete(c *check.C) {
|
||||
s.state.GetOrNewStateObject(common.Address{})
|
||||
root, _ := s.state.CommitTo(s.db, false)
|
||||
s.state.Reset(root)
|
||||
|
||||
snapshot := state.Snapshot()
|
||||
state.AddBalance(common.Address{}, new(big.Int))
|
||||
if len(state.stateObjectsDirty) != 1 {
|
||||
t.Fatal("expected one dirty state object")
|
||||
snapshot := s.state.Snapshot()
|
||||
s.state.AddBalance(common.Address{}, new(big.Int))
|
||||
if len(s.state.stateObjectsDirty) != 1 {
|
||||
c.Fatal("expected one dirty state object")
|
||||
}
|
||||
|
||||
state.RevertToSnapshot(snapshot)
|
||||
if len(state.stateObjectsDirty) != 0 {
|
||||
t.Fatal("expected no dirty state object")
|
||||
s.state.RevertToSnapshot(snapshot)
|
||||
if len(s.state.stateObjectsDirty) != 0 {
|
||||
c.Fatal("expected no dirty state object")
|
||||
}
|
||||
}
|
||||
|
@ -59,10 +59,16 @@ func (s *StateSync) Missing(max int) []common.Hash {
|
||||
}
|
||||
|
||||
// Process injects a batch of retrieved trie nodes data, returning if something
|
||||
// was committed to the database and also the index of an entry if processing of
|
||||
// was committed to the memcache and also the index of an entry if processing of
|
||||
// it failed.
|
||||
func (s *StateSync) Process(list []trie.SyncResult, dbw trie.DatabaseWriter) (bool, int, error) {
|
||||
return (*trie.TrieSync)(s).Process(list, dbw)
|
||||
func (s *StateSync) Process(list []trie.SyncResult) (bool, int, error) {
|
||||
return (*trie.TrieSync)(s).Process(list)
|
||||
}
|
||||
|
||||
// Commit flushes the data stored in the internal memcache out to persistent
|
||||
// storage, returning th enumber of items written and any occurred error.
|
||||
func (s *StateSync) Commit(dbw trie.DatabaseWriter) (int, error) {
|
||||
return (*trie.TrieSync)(s).Commit(dbw)
|
||||
}
|
||||
|
||||
// Pending returns the number of state entries currently pending for download.
|
||||
|
@ -36,9 +36,10 @@ type testAccount struct {
|
||||
}
|
||||
|
||||
// makeTestState create a sample test state to test node-wise reconstruction.
|
||||
func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
|
||||
func makeTestState() (Database, *ethdb.MemDatabase, common.Hash, []*testAccount) {
|
||||
// Create an empty state
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
mem, _ := ethdb.NewMemDatabase()
|
||||
db := NewDatabase(mem)
|
||||
state, _ := New(common.Hash{}, db)
|
||||
|
||||
// Fill it with some arbitrary data
|
||||
@ -60,17 +61,17 @@ func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
|
||||
state.updateStateObject(obj)
|
||||
accounts = append(accounts, acc)
|
||||
}
|
||||
root, _ := state.Commit(false)
|
||||
root, _ := state.CommitTo(mem, false)
|
||||
|
||||
// Return the generated state
|
||||
return db, root, accounts
|
||||
return db, mem, root, accounts
|
||||
}
|
||||
|
||||
// checkStateAccounts cross references a reconstructed state with an expected
|
||||
// account array.
|
||||
func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) {
|
||||
// Check root availability and state contents
|
||||
state, err := New(root, db)
|
||||
state, err := New(root, NewDatabase(db))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create state trie at %x: %v", root, err)
|
||||
}
|
||||
@ -90,13 +91,28 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accou
|
||||
}
|
||||
}
|
||||
|
||||
// checkStateConsistency checks that all nodes in a state trie are indeed present.
|
||||
// checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present.
|
||||
func checkTrieConsistency(db ethdb.Database, root common.Hash) error {
|
||||
if v, _ := db.Get(root[:]); v == nil {
|
||||
return nil // Consider a non existent state consistent.
|
||||
}
|
||||
trie, err := trie.New(root, db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it := trie.NodeIterator(nil)
|
||||
for it.Next(true) {
|
||||
}
|
||||
return it.Error()
|
||||
}
|
||||
|
||||
// checkStateConsistency checks that all data of a state root is present.
|
||||
func checkStateConsistency(db ethdb.Database, root common.Hash) error {
|
||||
// Create and iterate a state trie rooted in a sub-node
|
||||
if _, err := db.Get(root.Bytes()); err != nil {
|
||||
return nil // Consider a non existent state consistent
|
||||
return nil // Consider a non existent state consistent.
|
||||
}
|
||||
state, err := New(root, db)
|
||||
state, err := New(root, NewDatabase(db))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -122,7 +138,7 @@ func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t,
|
||||
|
||||
func testIterativeStateSync(t *testing.T, batch int) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
_, srcMem, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
// Create a destination state and sync with the scheduler
|
||||
dstDb, _ := ethdb.NewMemDatabase()
|
||||
@ -132,15 +148,18 @@ func testIterativeStateSync(t *testing.T, batch int) {
|
||||
for len(queue) > 0 {
|
||||
results := make([]trie.SyncResult, len(queue))
|
||||
for i, hash := range queue {
|
||||
data, err := srcDb.Get(hash.Bytes())
|
||||
data, err := srcMem.Get(hash.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
|
||||
}
|
||||
results[i] = trie.SyncResult{Hash: hash, Data: data}
|
||||
}
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
queue = append(queue[:0], sched.Missing(batch)...)
|
||||
}
|
||||
// Cross check that the two states are in sync
|
||||
@ -151,7 +170,7 @@ func testIterativeStateSync(t *testing.T, batch int) {
|
||||
// partial results are returned, and the others sent only later.
|
||||
func TestIterativeDelayedStateSync(t *testing.T) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
_, srcMem, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
// Create a destination state and sync with the scheduler
|
||||
dstDb, _ := ethdb.NewMemDatabase()
|
||||
@ -162,15 +181,18 @@ func TestIterativeDelayedStateSync(t *testing.T) {
|
||||
// Sync only half of the scheduled nodes
|
||||
results := make([]trie.SyncResult, len(queue)/2+1)
|
||||
for i, hash := range queue[:len(results)] {
|
||||
data, err := srcDb.Get(hash.Bytes())
|
||||
data, err := srcMem.Get(hash.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
|
||||
}
|
||||
results[i] = trie.SyncResult{Hash: hash, Data: data}
|
||||
}
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
queue = append(queue[len(results):], sched.Missing(0)...)
|
||||
}
|
||||
// Cross check that the two states are in sync
|
||||
@ -185,7 +207,7 @@ func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomS
|
||||
|
||||
func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
_, srcMem, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
// Create a destination state and sync with the scheduler
|
||||
dstDb, _ := ethdb.NewMemDatabase()
|
||||
@ -199,16 +221,19 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
// Fetch all the queued nodes in a random order
|
||||
results := make([]trie.SyncResult, 0, len(queue))
|
||||
for hash := range queue {
|
||||
data, err := srcDb.Get(hash.Bytes())
|
||||
data, err := srcMem.Get(hash.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
|
||||
}
|
||||
results = append(results, trie.SyncResult{Hash: hash, Data: data})
|
||||
}
|
||||
// Feed the retrieved results back and queue new tasks
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
queue = make(map[common.Hash]struct{})
|
||||
for _, hash := range sched.Missing(batch) {
|
||||
queue[hash] = struct{}{}
|
||||
@ -222,7 +247,7 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
// partial results are returned (Even those randomly), others sent only later.
|
||||
func TestIterativeRandomDelayedStateSync(t *testing.T) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
_, srcMem, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
// Create a destination state and sync with the scheduler
|
||||
dstDb, _ := ethdb.NewMemDatabase()
|
||||
@ -238,7 +263,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
|
||||
for hash := range queue {
|
||||
delete(queue, hash)
|
||||
|
||||
data, err := srcDb.Get(hash.Bytes())
|
||||
data, err := srcMem.Get(hash.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
|
||||
}
|
||||
@ -249,9 +274,12 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Feed the retrieved results back and queue new tasks
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
for _, hash := range sched.Missing(0) {
|
||||
queue[hash] = struct{}{}
|
||||
}
|
||||
@ -264,7 +292,9 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
|
||||
// the database.
|
||||
func TestIncompleteStateSync(t *testing.T) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
_, srcMem, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
checkTrieConsistency(srcMem, srcRoot)
|
||||
|
||||
// Create a destination state and sync with the scheduler
|
||||
dstDb, _ := ethdb.NewMemDatabase()
|
||||
@ -276,34 +306,34 @@ func TestIncompleteStateSync(t *testing.T) {
|
||||
// Fetch a batch of state nodes
|
||||
results := make([]trie.SyncResult, len(queue))
|
||||
for i, hash := range queue {
|
||||
data, err := srcDb.Get(hash.Bytes())
|
||||
data, err := srcMem.Get(hash.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
|
||||
}
|
||||
results[i] = trie.SyncResult{Hash: hash, Data: data}
|
||||
}
|
||||
// Process each of the state nodes
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
for _, result := range results {
|
||||
added = append(added, result.Hash)
|
||||
}
|
||||
// Check that all known sub-tries in the synced state is complete
|
||||
for _, root := range added {
|
||||
// Skim through the accounts and make sure the root hash is not a code node
|
||||
codeHash := false
|
||||
// Check that all known sub-tries added so far are complete or missing entirely.
|
||||
checkSubtries:
|
||||
for _, hash := range added {
|
||||
for _, acc := range srcAccounts {
|
||||
if root == crypto.Keccak256Hash(acc.code) {
|
||||
codeHash = true
|
||||
break
|
||||
if hash == crypto.Keccak256Hash(acc.code) {
|
||||
continue checkSubtries // skip trie check of code nodes.
|
||||
}
|
||||
}
|
||||
// If the root is a real trie node, check consistency
|
||||
if !codeHash {
|
||||
if err := checkStateConsistency(dstDb, root); err != nil {
|
||||
t.Fatalf("state inconsistent: %v", err)
|
||||
}
|
||||
// Can't use checkStateConsistency here because subtrie keys may have odd
|
||||
// length and crash in LeafKey.
|
||||
if err := checkTrieConsistency(dstDb, hash); err != nil {
|
||||
t.Fatalf("state inconsistent: %v", err)
|
||||
}
|
||||
}
|
||||
// Fetch the next batch to retrieve
|
||||
|
@ -243,7 +243,7 @@ func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big
|
||||
ret, st.gas, vmerr = evm.Call(sender, st.to().Address(), st.data, st.gas, st.value)
|
||||
}
|
||||
if vmerr != nil {
|
||||
log.Debug("VM returned with error", "err", err)
|
||||
log.Debug("VM returned with error", "err", vmerr)
|
||||
// The only possible consensus-error would be if there wasn't
|
||||
// sufficient balance to make the transfer happen. The first
|
||||
// balance transfer may never fail.
|
||||
|
@ -420,18 +420,16 @@ func (l *txPricedList) Removed() {
|
||||
heap.Init(l.items)
|
||||
}
|
||||
|
||||
// Discard finds all the transactions below the given price threshold, drops them
|
||||
// Cap finds all the transactions below the given price threshold, drops them
|
||||
// from the priced list and returs them for further removal from the entire pool.
|
||||
func (l *txPricedList) Cap(threshold *big.Int, local *txSet) types.Transactions {
|
||||
func (l *txPricedList) Cap(threshold *big.Int, local *accountSet) types.Transactions {
|
||||
drop := make(types.Transactions, 0, 128) // Remote underpriced transactions to drop
|
||||
save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep
|
||||
|
||||
for len(*l.items) > 0 {
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(l.items).(*types.Transaction)
|
||||
|
||||
hash := tx.Hash()
|
||||
if _, ok := (*l.all)[hash]; !ok {
|
||||
if _, ok := (*l.all)[tx.Hash()]; !ok {
|
||||
l.stales--
|
||||
continue
|
||||
}
|
||||
@ -440,7 +438,7 @@ func (l *txPricedList) Cap(threshold *big.Int, local *txSet) types.Transactions
|
||||
break
|
||||
}
|
||||
// Non stale transaction found, discard unless local
|
||||
if local.contains(hash) {
|
||||
if local.containsTx(tx) {
|
||||
save = append(save, tx)
|
||||
} else {
|
||||
drop = append(drop, tx)
|
||||
@ -454,9 +452,9 @@ func (l *txPricedList) Cap(threshold *big.Int, local *txSet) types.Transactions
|
||||
|
||||
// Underpriced checks whether a transaction is cheaper than (or as cheap as) the
|
||||
// lowest priced transaction currently being tracked.
|
||||
func (l *txPricedList) Underpriced(tx *types.Transaction, local *txSet) bool {
|
||||
func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) bool {
|
||||
// Local transactions cannot be underpriced
|
||||
if local.contains(tx.Hash()) {
|
||||
if local.containsTx(tx) {
|
||||
return false
|
||||
}
|
||||
// Discard stale price points if found at the heap start
|
||||
@ -479,22 +477,20 @@ func (l *txPricedList) Underpriced(tx *types.Transaction, local *txSet) bool {
|
||||
}
|
||||
|
||||
// Discard finds a number of most underpriced transactions, removes them from the
|
||||
// priced list and returs them for further removal from the entire pool.
|
||||
func (l *txPricedList) Discard(count int, local *txSet) types.Transactions {
|
||||
// priced list and returns them for further removal from the entire pool.
|
||||
func (l *txPricedList) Discard(count int, local *accountSet) types.Transactions {
|
||||
drop := make(types.Transactions, 0, count) // Remote underpriced transactions to drop
|
||||
save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep
|
||||
|
||||
for len(*l.items) > 0 && count > 0 {
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(l.items).(*types.Transaction)
|
||||
|
||||
hash := tx.Hash()
|
||||
if _, ok := (*l.all)[hash]; !ok {
|
||||
if _, ok := (*l.all)[tx.Hash()]; !ok {
|
||||
l.stales--
|
||||
continue
|
||||
}
|
||||
// Non stale transaction found, discard unless local
|
||||
if local.contains(hash) {
|
||||
if local.containsTx(tx) {
|
||||
save = append(save, tx)
|
||||
} else {
|
||||
drop = append(drop, tx)
|
||||
|
406
core/tx_pool.go
406
core/tx_pool.go
@ -35,16 +35,41 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// Transaction Pool Errors
|
||||
ErrInvalidSender = errors.New("invalid sender")
|
||||
ErrNonce = errors.New("nonce too low")
|
||||
ErrUnderpriced = errors.New("transaction underpriced")
|
||||
// ErrInvalidSender is returned if the transaction contains an invalid signature.
|
||||
ErrInvalidSender = errors.New("invalid sender")
|
||||
|
||||
// ErrNonceTooLow is returned if the nonce of a transaction is lower than the
|
||||
// one present in the local chain.
|
||||
ErrNonceTooLow = errors.New("nonce too low")
|
||||
|
||||
// ErrUnderpriced is returned if a transaction's gas price is below the minimum
|
||||
// configured for the transaction pool.
|
||||
ErrUnderpriced = errors.New("transaction underpriced")
|
||||
|
||||
// ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
|
||||
// with a different one without the required price bump.
|
||||
ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
|
||||
ErrBalance = errors.New("insufficient balance")
|
||||
ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value")
|
||||
ErrIntrinsicGas = errors.New("intrinsic gas too low")
|
||||
ErrGasLimit = errors.New("exceeds block gas limit")
|
||||
ErrNegativeValue = errors.New("negative value")
|
||||
|
||||
// ErrInsufficientFunds is returned if the total cost of executing a transaction
|
||||
// is higher than the balance of the user's account.
|
||||
ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value")
|
||||
|
||||
// ErrIntrinsicGas is returned if the transaction is specified to use less gas
|
||||
// than required to start the invocation.
|
||||
ErrIntrinsicGas = errors.New("intrinsic gas too low")
|
||||
|
||||
// ErrGasLimit is returned if a transaction's requested gas limit exceeds the
|
||||
// maximum allowance of the current block.
|
||||
ErrGasLimit = errors.New("exceeds block gas limit")
|
||||
|
||||
// ErrNegativeValue is a sanity error to ensure noone is able to specify a
|
||||
// transaction with a negative value.
|
||||
ErrNegativeValue = errors.New("negative value")
|
||||
|
||||
// ErrOversizedData is returned if the input data of a transaction is greater
|
||||
// than some meaningful limit a user might use. This is not a consensus error
|
||||
// making the transaction invalid, rather a DOS protection.
|
||||
ErrOversizedData = errors.New("oversized data")
|
||||
)
|
||||
|
||||
var (
|
||||
@ -54,16 +79,16 @@ var (
|
||||
|
||||
var (
|
||||
// Metrics for the pending pool
|
||||
pendingDiscardCounter = metrics.NewCounter("txpool/pending/discard")
|
||||
pendingReplaceCounter = metrics.NewCounter("txpool/pending/replace")
|
||||
pendingRLCounter = metrics.NewCounter("txpool/pending/ratelimit") // Dropped due to rate limiting
|
||||
pendingNofundsCounter = metrics.NewCounter("txpool/pending/nofunds") // Dropped due to out-of-funds
|
||||
pendingDiscardCounter = metrics.NewCounter("txpool/pending/discard")
|
||||
pendingReplaceCounter = metrics.NewCounter("txpool/pending/replace")
|
||||
pendingRateLimitCounter = metrics.NewCounter("txpool/pending/ratelimit") // Dropped due to rate limiting
|
||||
pendingNofundsCounter = metrics.NewCounter("txpool/pending/nofunds") // Dropped due to out-of-funds
|
||||
|
||||
// Metrics for the queued pool
|
||||
queuedDiscardCounter = metrics.NewCounter("txpool/queued/discard")
|
||||
queuedReplaceCounter = metrics.NewCounter("txpool/queued/replace")
|
||||
queuedRLCounter = metrics.NewCounter("txpool/queued/ratelimit") // Dropped due to rate limiting
|
||||
queuedNofundsCounter = metrics.NewCounter("txpool/queued/nofunds") // Dropped due to out-of-funds
|
||||
queuedDiscardCounter = metrics.NewCounter("txpool/queued/discard")
|
||||
queuedReplaceCounter = metrics.NewCounter("txpool/queued/replace")
|
||||
queuedRateLimitCounter = metrics.NewCounter("txpool/queued/ratelimit") // Dropped due to rate limiting
|
||||
queuedNofundsCounter = metrics.NewCounter("txpool/queued/nofunds") // Dropped due to out-of-funds
|
||||
|
||||
// General tx metrics
|
||||
invalidTxCounter = metrics.NewCounter("txpool/invalid")
|
||||
@ -74,6 +99,8 @@ type stateFn func() (*state.StateDB, error)
|
||||
|
||||
// TxPoolConfig are the configuration parameters of the transaction pool.
|
||||
type TxPoolConfig struct {
|
||||
NoLocals bool // Whether local transaction handling should be disabled
|
||||
|
||||
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
|
||||
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
|
||||
|
||||
@ -130,7 +157,7 @@ type TxPool struct {
|
||||
gasPrice *big.Int
|
||||
eventMux *event.TypeMux
|
||||
events *event.TypeMuxSubscription
|
||||
locals *txSet
|
||||
locals *accountSet
|
||||
signer types.Signer
|
||||
mu sync.RWMutex
|
||||
|
||||
@ -166,10 +193,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, eventMux *e
|
||||
gasLimit: gasLimitFn,
|
||||
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
|
||||
pendingState: nil,
|
||||
locals: newTxSet(),
|
||||
events: eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
pool.locals = newAccountSet(pool.signer)
|
||||
pool.priced = newTxPricedList(&pool.all)
|
||||
pool.resetState()
|
||||
|
||||
@ -212,7 +239,7 @@ func (pool *TxPool) eventLoop() {
|
||||
pool.mu.Unlock()
|
||||
|
||||
case RemovedTransactionEvent:
|
||||
pool.AddBatch(ev.Txs)
|
||||
pool.addTxs(ev.Txs, false)
|
||||
}
|
||||
|
||||
// Handle stats reporting ticks
|
||||
@ -251,7 +278,7 @@ func (pool *TxPool) resetState() {
|
||||
}
|
||||
// Check the queue and move transactions over to the pending if possible
|
||||
// or remove those that have become invalid
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, nil)
|
||||
}
|
||||
|
||||
// Stop terminates the transaction pool.
|
||||
@ -339,17 +366,6 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check queue first
|
||||
pool.promoteExecutables(state)
|
||||
|
||||
// invalidate any txs
|
||||
pool.demoteUnexecutables(state)
|
||||
|
||||
pending := make(map[common.Address]types.Transactions)
|
||||
for addr, list := range pool.pending {
|
||||
pending[addr] = list.Flatten()
|
||||
@ -357,61 +373,49 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
|
||||
return pending, nil
|
||||
}
|
||||
|
||||
// SetLocal marks a transaction as local, skipping gas price
|
||||
// check against local miner minimum in the future
|
||||
func (pool *TxPool) SetLocal(tx *types.Transaction) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
pool.locals.add(tx.Hash())
|
||||
}
|
||||
|
||||
// validateTx checks whether a transaction is valid according
|
||||
// to the consensus rules.
|
||||
func (pool *TxPool) validateTx(tx *types.Transaction) error {
|
||||
local := pool.locals.contains(tx.Hash())
|
||||
// Drop transactions under our own minimal accepted gas price
|
||||
if !local && pool.gasPrice.Cmp(tx.GasPrice()) > 0 {
|
||||
return ErrUnderpriced
|
||||
// validateTx checks whether a transaction is valid according to the consensus
|
||||
// rules and adheres to some heuristic limits of the local node (price and size).
|
||||
func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
// Heuristic limit, reject transactions over 32KB to prevent DOS attacks
|
||||
if tx.Size() > 32*1024 {
|
||||
return ErrOversizedData
|
||||
}
|
||||
|
||||
currentState, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
// Transactions can't be negative. This may never happen using RLP decoded
|
||||
// transactions but may occur if you create a transaction using the RPC.
|
||||
if tx.Value().Sign() < 0 {
|
||||
return ErrNegativeValue
|
||||
}
|
||||
|
||||
// Ensure the transaction doesn't exceed the current block limit gas.
|
||||
if pool.gasLimit().Cmp(tx.Gas()) < 0 {
|
||||
return ErrGasLimit
|
||||
}
|
||||
// Make sure the transaction is signed properly
|
||||
from, err := types.Sender(pool.signer, tx)
|
||||
if err != nil {
|
||||
return ErrInvalidSender
|
||||
}
|
||||
// Last but not least check for nonce errors
|
||||
// Drop non-local transactions under our own minimal accepted gas price
|
||||
local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network
|
||||
if !local && pool.gasPrice.Cmp(tx.GasPrice()) > 0 {
|
||||
return ErrUnderpriced
|
||||
}
|
||||
// Ensure the transaction adheres to nonce ordering
|
||||
currentState, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if currentState.GetNonce(from) > tx.Nonce() {
|
||||
return ErrNonce
|
||||
return ErrNonceTooLow
|
||||
}
|
||||
|
||||
// Check the transaction doesn't exceed the current
|
||||
// block limit gas.
|
||||
if pool.gasLimit().Cmp(tx.Gas()) < 0 {
|
||||
return ErrGasLimit
|
||||
}
|
||||
|
||||
// Transactions can't be negative. This may never happen
|
||||
// using RLP decoded transactions but may occur if you create
|
||||
// a transaction using the RPC for example.
|
||||
if tx.Value().Sign() < 0 {
|
||||
return ErrNegativeValue
|
||||
}
|
||||
|
||||
// Transactor should have enough funds to cover the costs
|
||||
// cost == V + GP * GL
|
||||
if currentState.GetBalance(from).Cmp(tx.Cost()) < 0 {
|
||||
return ErrInsufficientFunds
|
||||
}
|
||||
|
||||
intrGas := IntrinsicGas(tx.Data(), tx.To() == nil, pool.homestead)
|
||||
if tx.Gas().Cmp(intrGas) < 0 {
|
||||
return ErrIntrinsicGas
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -419,7 +423,11 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
|
||||
// later pending promotion and execution. If the transaction is a replacement for
|
||||
// an already pending or queued one, it overwrites the previous and returns this
|
||||
// so outer code doesn't uselessly call promote.
|
||||
func (pool *TxPool) add(tx *types.Transaction) (bool, error) {
|
||||
//
|
||||
// If a newly added transaction is marked as local, its sending account will be
|
||||
// whitelisted, preventing any associated transaction from being dropped out of
|
||||
// the pool due to pricing constraints.
|
||||
func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
|
||||
// If the transaction is already known, discard it
|
||||
hash := tx.Hash()
|
||||
if pool.all[hash] != nil {
|
||||
@ -427,7 +435,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) {
|
||||
return false, fmt.Errorf("known transaction: %x", hash)
|
||||
}
|
||||
// If the transaction fails basic validation, discard it
|
||||
if err := pool.validateTx(tx); err != nil {
|
||||
if err := pool.validateTx(tx, local); err != nil {
|
||||
log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
|
||||
invalidTxCounter.Inc(1)
|
||||
return false, err
|
||||
@ -469,11 +477,14 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) {
|
||||
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
|
||||
return old != nil, nil
|
||||
}
|
||||
// New transaction isn't replacing a pending one, push into queue
|
||||
// New transaction isn't replacing a pending one, push into queue and potentially mark local
|
||||
replace, err := pool.enqueueTx(hash, tx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if local {
|
||||
pool.locals.add(from)
|
||||
}
|
||||
log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
|
||||
return replace, nil
|
||||
}
|
||||
@ -541,52 +552,83 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
|
||||
go pool.eventMux.Post(TxPreEvent{tx})
|
||||
}
|
||||
|
||||
// Add queues a single transaction in the pool if it is valid.
|
||||
func (pool *TxPool) Add(tx *types.Transaction) error {
|
||||
// AddLocal enqueues a single transaction into the pool if it is valid, marking
|
||||
// the sender as a local one in the mean time, ensuring it goes around the local
|
||||
// pricing constraints.
|
||||
func (pool *TxPool) AddLocal(tx *types.Transaction) error {
|
||||
return pool.addTx(tx, !pool.config.NoLocals)
|
||||
}
|
||||
|
||||
// AddRemote enqueues a single transaction into the pool if it is valid. If the
|
||||
// sender is not among the locally tracked ones, full pricing constraints will
|
||||
// apply.
|
||||
func (pool *TxPool) AddRemote(tx *types.Transaction) error {
|
||||
return pool.addTx(tx, false)
|
||||
}
|
||||
|
||||
// AddLocals enqueues a batch of transactions into the pool if they are valid,
|
||||
// marking the senders as a local ones in the mean time, ensuring they go around
|
||||
// the local pricing constraints.
|
||||
func (pool *TxPool) AddLocals(txs []*types.Transaction) error {
|
||||
return pool.addTxs(txs, !pool.config.NoLocals)
|
||||
}
|
||||
|
||||
// AddRemotes enqueues a batch of transactions into the pool if they are valid.
|
||||
// If the senders are not among the locally tracked ones, full pricing constraints
|
||||
// will apply.
|
||||
func (pool *TxPool) AddRemotes(txs []*types.Transaction) error {
|
||||
return pool.addTxs(txs, false)
|
||||
}
|
||||
|
||||
// addTx enqueues a single transaction into the pool if it is valid.
|
||||
func (pool *TxPool) addTx(tx *types.Transaction, local bool) error {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
// Try to inject the transaction and update any state
|
||||
replace, err := pool.add(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err := pool.currentState()
|
||||
replace, err := pool.add(tx, local)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we added a new transaction, run promotion checks and return
|
||||
if !replace {
|
||||
pool.promoteExecutables(state)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddBatch attempts to queue a batch of transactions.
|
||||
func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
// Add the batch of transaction, tracking the accepted ones
|
||||
replaced, added := true, 0
|
||||
for _, tx := range txs {
|
||||
if replace, err := pool.add(tx); err == nil {
|
||||
added++
|
||||
if !replace {
|
||||
replaced = false
|
||||
}
|
||||
}
|
||||
}
|
||||
// Only reprocess the internal state if something was actually added
|
||||
if added > 0 {
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !replaced {
|
||||
pool.promoteExecutables(state)
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
pool.promoteExecutables(state, []common.Address{from})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addTxs attempts to queue a batch of transactions if they are valid.
|
||||
func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) error {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
// Add the batch of transaction, tracking the accepted ones
|
||||
dirty := make(map[common.Address]struct{})
|
||||
for _, tx := range txs {
|
||||
if replace, err := pool.add(tx, local); err == nil {
|
||||
if !replace {
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
dirty[from] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Only reprocess the internal state if something was actually added
|
||||
if len(dirty) > 0 {
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addrs := make([]common.Address, 0, len(dirty))
|
||||
for addr, _ := range dirty {
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
pool.promoteExecutables(state, addrs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -646,8 +688,9 @@ func (pool *TxPool) removeTx(hash common.Hash) {
|
||||
}
|
||||
// Update the account nonce if needed
|
||||
if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce {
|
||||
pool.pendingState.SetNonce(addr, tx.Nonce())
|
||||
pool.pendingState.SetNonce(addr, nonce)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
// Transaction is in the future queue
|
||||
@ -662,12 +705,22 @@ func (pool *TxPool) removeTx(hash common.Hash) {
|
||||
// promoteExecutables moves transactions that have become processable from the
|
||||
// future queue to the set of pending transactions. During this process, all
|
||||
// invalidated transactions (low nonce, low balance) are deleted.
|
||||
func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
func (pool *TxPool) promoteExecutables(state *state.StateDB, accounts []common.Address) {
|
||||
gaslimit := pool.gasLimit()
|
||||
|
||||
// Gather all the accounts potentially needing updates
|
||||
if accounts == nil {
|
||||
accounts = make([]common.Address, 0, len(pool.queue))
|
||||
for addr, _ := range pool.queue {
|
||||
accounts = append(accounts, addr)
|
||||
}
|
||||
}
|
||||
// Iterate over all accounts and promote any executable transactions
|
||||
queued := uint64(0)
|
||||
for addr, list := range pool.queue {
|
||||
for _, addr := range accounts {
|
||||
list := pool.queue[addr]
|
||||
if list == nil {
|
||||
continue // Just in case someone calls with a non existing account
|
||||
}
|
||||
// Drop all transactions that are deemed too old (low nonce)
|
||||
for _, tx := range list.Forward(state.GetNonce(addr)) {
|
||||
hash := tx.Hash()
|
||||
@ -691,15 +744,15 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
pool.promoteTx(addr, hash, tx)
|
||||
}
|
||||
// Drop all transactions over the allowed limit
|
||||
for _, tx := range list.Cap(int(pool.config.AccountQueue)) {
|
||||
hash := tx.Hash()
|
||||
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
queuedRLCounter.Inc(1)
|
||||
if !pool.locals.contains(addr) {
|
||||
for _, tx := range list.Cap(int(pool.config.AccountQueue)) {
|
||||
hash := tx.Hash()
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
queuedRateLimitCounter.Inc(1)
|
||||
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
|
||||
}
|
||||
}
|
||||
queued += uint64(list.Len())
|
||||
|
||||
// Delete the entire queue entry if it became empty.
|
||||
if list.Empty() {
|
||||
delete(pool.queue, addr)
|
||||
@ -716,14 +769,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
spammers := prque.New()
|
||||
for addr, list := range pool.pending {
|
||||
// Only evict transactions from high rollers
|
||||
if uint64(list.Len()) > pool.config.AccountSlots {
|
||||
// Skip local accounts as pools should maintain backlogs for themselves
|
||||
for _, tx := range list.txs.items {
|
||||
if !pool.locals.contains(tx.Hash()) {
|
||||
spammers.Push(addr, float32(list.Len()))
|
||||
}
|
||||
break // Checking on transaction for locality is enough
|
||||
}
|
||||
if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
|
||||
spammers.Push(addr, float32(list.Len()))
|
||||
}
|
||||
}
|
||||
// Gradually drop transactions from offenders
|
||||
@ -742,7 +789,18 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
|
||||
for i := 0; i < len(offenders)-1; i++ {
|
||||
list := pool.pending[offenders[i]]
|
||||
list.Cap(list.Len() - 1)
|
||||
for _, tx := range list.Cap(list.Len() - 1) {
|
||||
// Drop the transaction from the global pools too
|
||||
hash := tx.Hash()
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
|
||||
// Update the account nonce to the dropped transaction
|
||||
if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce {
|
||||
pool.pendingState.SetNonce(offenders[i], nonce)
|
||||
}
|
||||
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
|
||||
}
|
||||
pending--
|
||||
}
|
||||
}
|
||||
@ -753,24 +811,41 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
|
||||
for _, addr := range offenders {
|
||||
list := pool.pending[addr]
|
||||
list.Cap(list.Len() - 1)
|
||||
for _, tx := range list.Cap(list.Len() - 1) {
|
||||
// Drop the transaction from the global pools too
|
||||
hash := tx.Hash()
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
|
||||
// Update the account nonce to the dropped transaction
|
||||
if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce {
|
||||
pool.pendingState.SetNonce(addr, nonce)
|
||||
}
|
||||
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
|
||||
}
|
||||
pending--
|
||||
}
|
||||
}
|
||||
}
|
||||
pendingRLCounter.Inc(int64(pendingBeforeCap - pending))
|
||||
pendingRateLimitCounter.Inc(int64(pendingBeforeCap - pending))
|
||||
}
|
||||
// If we've queued more transactions than the hard limit, drop oldest ones
|
||||
queued := uint64(0)
|
||||
for _, list := range pool.queue {
|
||||
queued += uint64(list.Len())
|
||||
}
|
||||
if queued > pool.config.GlobalQueue {
|
||||
// Sort all accounts with queued transactions by heartbeat
|
||||
addresses := make(addresssByHeartbeat, 0, len(pool.queue))
|
||||
for addr := range pool.queue {
|
||||
addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
|
||||
if !pool.locals.contains(addr) { // don't drop locals
|
||||
addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
|
||||
}
|
||||
}
|
||||
sort.Sort(addresses)
|
||||
|
||||
// Drop transactions until the total is below the limit
|
||||
for drop := queued - pool.config.GlobalQueue; drop > 0; {
|
||||
// Drop transactions until the total is below the limit or only locals remain
|
||||
for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
|
||||
addr := addresses[len(addresses)-1]
|
||||
list := pool.queue[addr.address]
|
||||
|
||||
@ -782,7 +857,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
pool.removeTx(tx.Hash())
|
||||
}
|
||||
drop -= size
|
||||
queuedRLCounter.Inc(int64(size))
|
||||
queuedRateLimitCounter.Inc(int64(size))
|
||||
continue
|
||||
}
|
||||
// Otherwise drop only last few transactions
|
||||
@ -790,7 +865,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
|
||||
pool.removeTx(txs[i].Hash())
|
||||
drop--
|
||||
queuedRLCounter.Inc(1)
|
||||
queuedRateLimitCounter.Inc(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -849,6 +924,11 @@ func (pool *TxPool) expirationLoop() {
|
||||
case <-evict.C:
|
||||
pool.mu.Lock()
|
||||
for addr := range pool.queue {
|
||||
// Skip local transactions from the eviction mechanism
|
||||
if pool.locals.contains(addr) {
|
||||
continue
|
||||
}
|
||||
// Any non-locals old enough should be removed
|
||||
if time.Since(pool.beats[addr]) > pool.config.Lifetime {
|
||||
for _, tx := range pool.queue[addr].Flatten() {
|
||||
pool.removeTx(tx.Hash())
|
||||
@ -875,48 +955,38 @@ func (a addresssByHeartbeat) Len() int { return len(a) }
|
||||
func (a addresssByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
|
||||
func (a addresssByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// txSet represents a set of transaction hashes in which entries
|
||||
// are automatically dropped after txSetDuration time
|
||||
type txSet struct {
|
||||
txMap map[common.Hash]struct{}
|
||||
txOrd map[uint64]txOrdType
|
||||
addPtr, delPtr uint64
|
||||
// accountSet is simply a set of addresses to check for existance, and a signer
|
||||
// capable of deriving addresses from transactions.
|
||||
type accountSet struct {
|
||||
accounts map[common.Address]struct{}
|
||||
signer types.Signer
|
||||
}
|
||||
|
||||
const txSetDuration = time.Hour * 2
|
||||
|
||||
// txOrdType represents an entry in the time-ordered list of transaction hashes
|
||||
type txOrdType struct {
|
||||
hash common.Hash
|
||||
time time.Time
|
||||
}
|
||||
|
||||
// newTxSet creates a new transaction set
|
||||
func newTxSet() *txSet {
|
||||
return &txSet{
|
||||
txMap: make(map[common.Hash]struct{}),
|
||||
txOrd: make(map[uint64]txOrdType),
|
||||
// newAccountSet creates a new address set with an associated signer for sender
|
||||
// derivations.
|
||||
func newAccountSet(signer types.Signer) *accountSet {
|
||||
return &accountSet{
|
||||
accounts: make(map[common.Address]struct{}),
|
||||
signer: signer,
|
||||
}
|
||||
}
|
||||
|
||||
// contains returns true if the set contains the given transaction hash
|
||||
// (not thread safe, should be called from a locked environment)
|
||||
func (ts *txSet) contains(hash common.Hash) bool {
|
||||
_, ok := ts.txMap[hash]
|
||||
return ok
|
||||
// contains checks if a given address is contained within the set.
|
||||
func (as *accountSet) contains(addr common.Address) bool {
|
||||
_, exist := as.accounts[addr]
|
||||
return exist
|
||||
}
|
||||
|
||||
// add adds a transaction hash to the set, then removes entries older than txSetDuration
|
||||
// (not thread safe, should be called from a locked environment)
|
||||
func (ts *txSet) add(hash common.Hash) {
|
||||
ts.txMap[hash] = struct{}{}
|
||||
now := time.Now()
|
||||
ts.txOrd[ts.addPtr] = txOrdType{hash: hash, time: now}
|
||||
ts.addPtr++
|
||||
delBefore := now.Add(-txSetDuration)
|
||||
for ts.delPtr < ts.addPtr && ts.txOrd[ts.delPtr].time.Before(delBefore) {
|
||||
delete(ts.txMap, ts.txOrd[ts.delPtr].hash)
|
||||
delete(ts.txOrd, ts.delPtr)
|
||||
ts.delPtr++
|
||||
// containsTx checks if the sender of a given tx is within the set. If the sender
|
||||
// cannot be derived, this method returns false.
|
||||
func (as *accountSet) containsTx(tx *types.Transaction) bool {
|
||||
if addr, err := types.Sender(as.signer, tx); err == nil {
|
||||
return as.contains(addr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// add inserts a new address into the set to track.
|
||||
func (as *accountSet) add(addr common.Address) {
|
||||
as.accounts[addr] = struct{}{}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -381,7 +381,7 @@ func (b *Block) Hash() common.Hash {
|
||||
if hash := b.hash.Load(); hash != nil {
|
||||
return hash.(common.Hash)
|
||||
}
|
||||
v := rlpHash(b.header)
|
||||
v := b.header.Hash()
|
||||
b.hash.Store(v)
|
||||
return v
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidChainId = errors.New("invalid chaid id for signer")
|
||||
ErrInvalidChainId = errors.New("invalid chain id for signer")
|
||||
|
||||
errAbstractSigner = errors.New("abstract signer")
|
||||
abstractSignerAddress = common.HexToAddress("ffffffffffffffffffffffffffffffffffffffff")
|
||||
|
@ -22,41 +22,39 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var bigMaxUint64 = new(big.Int).SetUint64(^uint64(0))
|
||||
|
||||
// destinations stores one map per contract (keyed by hash of code).
|
||||
// The maps contain an entry for each location of a JUMPDEST
|
||||
// instruction.
|
||||
type destinations map[common.Hash]map[uint64]struct{}
|
||||
type destinations map[common.Hash][]byte
|
||||
|
||||
// has checks whether code has a JUMPDEST at dest.
|
||||
func (d destinations) has(codehash common.Hash, code []byte, dest *big.Int) bool {
|
||||
// PC cannot go beyond len(code) and certainly can't be bigger than 64bits.
|
||||
// PC cannot go beyond len(code) and certainly can't be bigger than 63bits.
|
||||
// Don't bother checking for JUMPDEST in that case.
|
||||
if dest.Cmp(bigMaxUint64) > 0 {
|
||||
udest := dest.Uint64()
|
||||
if dest.BitLen() >= 63 || udest >= uint64(len(code)) {
|
||||
return false
|
||||
}
|
||||
|
||||
m, analysed := d[codehash]
|
||||
if !analysed {
|
||||
m = jumpdests(code)
|
||||
d[codehash] = m
|
||||
}
|
||||
_, ok := m[dest.Uint64()]
|
||||
return ok
|
||||
return (m[udest/8] & (1 << (udest % 8))) != 0
|
||||
}
|
||||
|
||||
// jumpdests creates a map that contains an entry for each
|
||||
// PC location that is a JUMPDEST instruction.
|
||||
func jumpdests(code []byte) map[uint64]struct{} {
|
||||
m := make(map[uint64]struct{})
|
||||
func jumpdests(code []byte) []byte {
|
||||
m := make([]byte, len(code)/8+1)
|
||||
for pc := uint64(0); pc < uint64(len(code)); pc++ {
|
||||
var op OpCode = OpCode(code[pc])
|
||||
switch op {
|
||||
case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32:
|
||||
op := OpCode(code[pc])
|
||||
if op == JUMPDEST {
|
||||
m[pc/8] |= 1 << (pc % 8)
|
||||
} else if op >= PUSH1 && op <= PUSH32 {
|
||||
a := uint64(op) - uint64(PUSH1) + 1
|
||||
pc += a
|
||||
case JUMPDEST:
|
||||
m[pc] = struct{}{}
|
||||
}
|
||||
}
|
||||
return m
|
||||
|
@ -17,7 +17,6 @@
|
||||
package vm
|
||||
|
||||
import (
|
||||
gmath "math"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -28,15 +27,20 @@ import (
|
||||
// memoryGasCosts calculates the quadratic gas for memory expansion. It does so
|
||||
// only for the memory region that is expanded, not the total memory.
|
||||
func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) {
|
||||
// The maximum that will fit in a uint64 is max_word_count - 1
|
||||
// anything above that will result in an overflow.
|
||||
if newMemSize > gmath.MaxUint64-32 {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
|
||||
if newMemSize == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
// The maximum that will fit in a uint64 is max_word_count - 1
|
||||
// anything above that will result in an overflow.
|
||||
// Additionally, a newMemSize which results in a
|
||||
// newMemSizeWords larger than 0x7ffffffff will cause the square operation
|
||||
// to overflow.
|
||||
// The constant 0xffffffffe0 is the highest number that can be used without
|
||||
// overflowing the gas calculation
|
||||
if newMemSize > 0xffffffffe0 {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
|
||||
newMemSizeWords := toWordSize(newMemSize)
|
||||
newMemSize = newMemSizeWords * 32
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user