Compare commits
76 Commits
Author | SHA1 | Date | |
---|---|---|---|
94ad694a26 | |||
4c6953606e | |||
fc0638f9d8 | |||
4b11f207cb | |||
7e5c49cafa | |||
efcfa2209b | |||
aa18aad7ad | |||
594328c112 | |||
2e6b9c141b | |||
b38cea6654 | |||
dd083aa34e | |||
f213a9d8e8 | |||
6826b2040f | |||
2a3657d8d9 | |||
566af6ef92 | |||
290e851f57 | |||
8f96d66241 | |||
5782164a35 | |||
27f657478f | |||
756b62988c | |||
f61e203c10 | |||
56ed6152a1 | |||
dc7f202ecd | |||
1d42061e2c | |||
b6135a72dd | |||
7d59c5c58d | |||
8aa4597c9e | |||
57ba1824ac | |||
c89f4352d0 | |||
6a00a3ade1 | |||
f821b0188a | |||
d79f2f2656 | |||
130bccc763 | |||
ae9ed5c420 | |||
a1c201a5ac | |||
844e911129 | |||
4b9de75623 | |||
2d7d7ef2fe | |||
14d5033c9d | |||
d52a693f80 | |||
258cc73ea9 | |||
79b7b5eeaa | |||
b4fbcd5060 | |||
488528e9e4 | |||
8110671960 | |||
32bb280179 | |||
4536b993ff | |||
586eddfd09 | |||
d46da273c6 | |||
ecd7199c43 | |||
1c20313a6a | |||
cfa999f006 | |||
c74a575725 | |||
9672a62a38 | |||
572da73d4d | |||
0f722df2d9 | |||
1b77d5090d | |||
e62c2aeb1b | |||
4880868c88 | |||
c3d5250473 | |||
e0dc45fce2 | |||
48cc36ce83 | |||
123aa659e4 | |||
cdcbb2f160 | |||
db62979514 | |||
5137c04ccf | |||
a20d3fc362 | |||
3d6d828caf | |||
6a543607ef | |||
c1a4dcfc87 | |||
70b8b54cd2 | |||
c88c89fd9e | |||
b06f44ecc2 | |||
87ae0df476 | |||
5127ec10cb | |||
18580e152c |
6
Makefile
6
Makefile
@ -13,7 +13,7 @@ GOBIN = build/bin
|
||||
GO ?= latest
|
||||
|
||||
geth:
|
||||
build/env.sh go install -v $(shell build/flags.sh) ./cmd/geth
|
||||
build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/geth ./cmd/geth
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||
|
||||
@ -103,7 +103,9 @@ evm:
|
||||
@echo "Run \"$(GOBIN)/evm to start the evm."
|
||||
|
||||
all:
|
||||
build/env.sh go install -v $(shell build/flags.sh) ./...
|
||||
for cmd in `ls ./cmd/`; do \
|
||||
build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/$$cmd ./cmd/$$cmd; \
|
||||
done
|
||||
|
||||
test: all
|
||||
build/env.sh go test ./...
|
||||
|
69
README.md
69
README.md
@ -9,7 +9,7 @@ master | [](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||
[](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
## Automated development builds
|
||||
@ -38,36 +38,47 @@ Once the dependencies are installed, run
|
||||
|
||||
## Executables
|
||||
|
||||
Go Ethereum comes with several wrappers/executables found in
|
||||
[the `cmd` directory](https://github.com/ethereum/go-ethereum/tree/develop/cmd):
|
||||
The go-ethereum project comes with several wrappers/executables found in the `cmd` directory.
|
||||
|
||||
Command | |
|
||||
----------|---------|
|
||||
`geth` | Ethereum CLI (ethereum command line interface client) |
|
||||
`bootnode` | runs a bootstrap node for the Discovery Protocol |
|
||||
`ethtest` | test tool which runs with the [tests](https://github.com/ethereum/tests) suite: `/path/to/test.json > ethtest --test BlockTests --stdin`.
|
||||
`evm` | is a generic Ethereum Virtual Machine: `evm -code 60ff60ff -gas 10000 -price 0 -dump`. See `-h` for a detailed description. |
|
||||
`disasm` | disassembles EVM code: `echo "6001" | disasm` |
|
||||
`rlpdump` | prints RLP structures |
|
||||
|
||||
## Command line options
|
||||
|
||||
`geth` can be configured via command line options, environment variables and config files.
|
||||
|
||||
To get the options available:
|
||||
|
||||
geth help
|
||||
|
||||
For further details on options, see the [wiki](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)
|
||||
| Command | Description |
|
||||
|:----------:|-------------|
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as an gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. Please see our [Command Line Options](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) wiki page for details. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `disasm` | Bytecode disassembler to convert EVM (Ethereum Virtual Machine) bytecode into more user friendly assembly-like opcodes (e.g. `echo "6001" | disasm`). For details on the individual opcodes, please see pages 22-30 of the [Ethereum Yellow Paper](http://gavwood.com/paper.pdf). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow insolated, fine graned debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
|
||||
## Contribution
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits who do not comply with the coding standards
|
||||
are ignored (use gofmt!). If you send pull requests make absolute sure that you
|
||||
commit on the `develop` branch and that you do not merge to master.
|
||||
Commits that are directly based on master are simply ignored.
|
||||
Thank you for considering to help out with the source code! We welcome contributions from
|
||||
anyone on the internet, and are grateful for even the smallest of fixes!
|
||||
|
||||
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, testing, and
|
||||
dependency management.
|
||||
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
|
||||
for the maintainers to review and merge into the main code base. If you wish to submit more
|
||||
complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum)
|
||||
to ensure those changes are in line with the general philosopy of the project and/or get some
|
||||
early feedback which can make both your efforts much lighter as well as our review and merge
|
||||
procedures quick and simple.
|
||||
|
||||
Please make sure your contributions adhere to our coding guidlines:
|
||||
|
||||
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
|
||||
* Code must be documented adherign to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
|
||||
* Pull requests need to be based on and opened against the `develop` branch.
|
||||
* Commit messages should be prefixed with the package(s) they modify.
|
||||
* E.g. "eth, rpc: make trace configs optional"
|
||||
|
||||
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, managing project dependencies and testing procedures.
|
||||
|
||||
## License
|
||||
|
||||
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
|
||||
[GNU Lesser General Public License v3.0](http://www.gnu.org/licenses/lgpl-3.0.en.html), also
|
||||
included in our repository in the `COPYING.LESSER` file.
|
||||
|
||||
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the
|
||||
[GNU General Public License v3.0](http://www.gnu.org/licenses/gpl-3.0.en.html), also included
|
||||
in our repository in the `COPYING` file.
|
||||
|
@ -48,42 +48,6 @@ func JSON(reader io.Reader) (ABI, error) {
|
||||
return abi, nil
|
||||
}
|
||||
|
||||
// tests, tests whether the given input would result in a successful
|
||||
// call. Checks argument list count and matches input to `input`.
|
||||
func (abi ABI) pack(method Method, args ...interface{}) ([]byte, error) {
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := method.Inputs[i]
|
||||
// pack the input
|
||||
packed, err := input.Type.pack(a)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("`%s` %v", method.Name, err)
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.T == StringTy || input.Type.T == BytesTy || input.Type.IsSlice {
|
||||
// calculate the offset
|
||||
offset := len(method.Inputs)*32 + len(variableInput)
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset), UintTy)...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
ret = append(ret, packed...)
|
||||
}
|
||||
}
|
||||
// append the variable input at the end of the packed input
|
||||
ret = append(ret, variableInput...)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Pack the given method name to conform the ABI. Method call's data
|
||||
// will consist of method_id, args0, arg1, ... argN. Method id consists
|
||||
// of 4 bytes and arguments are all 32 bytes.
|
||||
@ -102,11 +66,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
}
|
||||
method = m
|
||||
}
|
||||
// Make sure arguments match up and pack them
|
||||
if len(args) != len(method.Inputs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
|
||||
}
|
||||
arguments, err := abi.pack(method, args...)
|
||||
arguments, err := method.pack(method, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -126,18 +86,21 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
|
||||
}
|
||||
elem := t.Type.Elem
|
||||
|
||||
// first we need to create a slice of the type
|
||||
var refSlice reflect.Value
|
||||
switch t.Type.T {
|
||||
switch elem.T {
|
||||
case IntTy, UintTy, BoolTy: // int, uint, bool can all be of type big int.
|
||||
refSlice = reflect.ValueOf([]*big.Int(nil))
|
||||
case AddressTy: // address must be of slice Address
|
||||
refSlice = reflect.ValueOf([]common.Address(nil))
|
||||
case HashTy: // hash must be of slice hash
|
||||
refSlice = reflect.ValueOf([]common.Hash(nil))
|
||||
case FixedBytesTy:
|
||||
refSlice = reflect.ValueOf([]byte(nil))
|
||||
default: // no other types are supported
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", t.Type.T)
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
|
||||
}
|
||||
// get the offset which determines the start of this array ...
|
||||
offset := int(common.BytesToBig(output[index : index+32]).Uint64())
|
||||
@ -164,7 +127,7 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
)
|
||||
|
||||
// set inter to the correct type (cast)
|
||||
switch t.Type.T {
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
inter = common.BytesToBig(returnOutput)
|
||||
case BoolTy:
|
||||
@ -186,7 +149,7 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// we need to treat slices differently
|
||||
if t.Type.IsSlice {
|
||||
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy {
|
||||
return toGoSlice(i, t, output)
|
||||
}
|
||||
|
||||
@ -217,12 +180,33 @@ func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
returnOutput = output[index : index+32]
|
||||
}
|
||||
|
||||
// cast bytes to abi return type
|
||||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
case IntTy:
|
||||
return common.BytesToBig(returnOutput), nil
|
||||
case UintTy:
|
||||
return common.BytesToBig(returnOutput), nil
|
||||
case IntTy, UintTy:
|
||||
bigNum := common.BytesToBig(returnOutput)
|
||||
|
||||
// If the type is a integer convert to the integer type
|
||||
// specified by the ABI.
|
||||
switch t.Type.Kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(bigNum.Uint64()), nil
|
||||
case reflect.Uint16:
|
||||
return uint16(bigNum.Uint64()), nil
|
||||
case reflect.Uint32:
|
||||
return uint32(bigNum.Uint64()), nil
|
||||
case reflect.Uint64:
|
||||
return uint64(bigNum.Uint64()), nil
|
||||
case reflect.Int8:
|
||||
return int8(bigNum.Int64()), nil
|
||||
case reflect.Int16:
|
||||
return int16(bigNum.Int64()), nil
|
||||
case reflect.Int32:
|
||||
return int32(bigNum.Int64()), nil
|
||||
case reflect.Int64:
|
||||
return int64(bigNum.Int64()), nil
|
||||
case reflect.Ptr:
|
||||
return bigNum, nil
|
||||
}
|
||||
case BoolTy:
|
||||
return common.BytesToBig(returnOutput).Uint64() > 0, nil
|
||||
case AddressTy:
|
||||
@ -254,8 +238,16 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(v).Elem()
|
||||
typ := value.Type()
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
if len(method.Outputs) > 1 {
|
||||
switch value.Kind() {
|
||||
@ -284,6 +276,25 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
|
||||
}
|
||||
|
||||
// if the slice already contains values, set those instead of the interface slice itself.
|
||||
if value.Len() > 0 {
|
||||
if len(method.Outputs) > value.Len() {
|
||||
return fmt.Errorf("abi: cannot marshal in to slices of unequal size (require: %v, got: %v)", len(method.Outputs), value.Len())
|
||||
}
|
||||
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(value.Index(i).Elem(), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a new slice and start appending the unmarshalled
|
||||
// values to the new interface slice.
|
||||
z := reflect.MakeSlice(typ, 0, len(method.Outputs))
|
||||
@ -312,32 +323,6 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// set attempts to assign src to dst by either setting, copying or otherwise.
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if !dstType.Elem().AssignableTo(r_byte) {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
|
||||
}
|
||||
|
||||
if dst.Len() < output.Type.Size {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.Size, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
|
@ -29,66 +29,422 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// formatSilceOutput add padding to the value and adds a size
|
||||
func formatSliceOutput(v ...[]byte) []byte {
|
||||
off := common.LeftPadBytes(big.NewInt(int64(len(v))).Bytes(), 32)
|
||||
output := append(off, make([]byte, 0, len(v)*32)...)
|
||||
|
||||
for _, value := range v {
|
||||
output = append(output, common.LeftPadBytes(value, 32)...)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// quick helper padding
|
||||
func pad(input []byte, size int, left bool) []byte {
|
||||
if left {
|
||||
return common.LeftPadBytes(input, size)
|
||||
}
|
||||
return common.RightPadBytes(input, size)
|
||||
}
|
||||
|
||||
func TestTypeCheck(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), ""},
|
||||
{"int", big.NewInt(1), ""},
|
||||
{"uint30", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type []uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"address[]", []common.Address{common.Address{1}}, ""},
|
||||
{"address[1]", []common.Address{common.Address{1}}, ""},
|
||||
{"address[1]", [1]common.Address{common.Address{1}}, ""},
|
||||
{"address[2]", [1]common.Address{common.Address{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, ""},
|
||||
{"bytes", common.Hash{1}, ""},
|
||||
{"string", "hello world", ""},
|
||||
{"bytes32[]", [][32]byte{[32]byte{}}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
err = typeCheck(typ, reflect.ValueOf(test.input))
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMethodUnpack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
def string // definition of the **output** ABI params
|
||||
marshalledOutput []byte // evm return data
|
||||
expectedOut interface{} // the expected output
|
||||
outVar string // the output variable (e.g. uint32, *big.Int, etc)
|
||||
err string // empty or error if expected
|
||||
}{
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
uint32(1),
|
||||
"uint32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
int32(1),
|
||||
"int32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "address" } ]`,
|
||||
pad(pad([]byte{1}, 20, false), 32, true),
|
||||
common.Address{1},
|
||||
"address",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"bytes",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"hash",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"interface",
|
||||
"",
|
||||
},
|
||||
} {
|
||||
abiDefinition := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(abiDefinition))
|
||||
if err != nil {
|
||||
t.Errorf("%d failed. %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var outvar interface{}
|
||||
switch test.outVar {
|
||||
case "uint8":
|
||||
var v uint8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint16":
|
||||
var v uint16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint32":
|
||||
var v uint32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint64":
|
||||
var v uint64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int8":
|
||||
var v int8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int16":
|
||||
var v int16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int32":
|
||||
var v int32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int64":
|
||||
var v int64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "*big.Int":
|
||||
var v *big.Int
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "address":
|
||||
var v common.Address
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "bytes":
|
||||
var v []byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "hash":
|
||||
var v common.Hash
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "interface":
|
||||
err = abi.Unpack(&outvar, "method", test.marshalledOutput)
|
||||
default:
|
||||
t.Errorf("unsupported type '%v' please add it to the switch statement in this test", test.outVar)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// bit of an ugly hack for hash type but I don't feel like finding a proper solution
|
||||
if test.outVar == "hash" {
|
||||
tmp := outvar.(common.Hash) // without assignment it's unaddressable
|
||||
outvar = tmp[:]
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedOut, outvar) {
|
||||
t.Errorf("%d failed. Output error: expected %v, got %v", i, test.expectedOut, outvar)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Errorf("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Errorf("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
||||
input interface{}
|
||||
output []byte
|
||||
}{
|
||||
{"uint16", uint16(2), pad([]byte{2}, 32, true)},
|
||||
{"uint16[]", []uint16{1, 2}, formatSliceOutput([]byte{1}, []byte{2})},
|
||||
{"bytes20", [20]byte{1}, pad([]byte{1}, 32, false)},
|
||||
{"uint256[]", []*big.Int{big.NewInt(1), big.NewInt(2)}, formatSliceOutput([]byte{1}, []byte{2})},
|
||||
{"address[]", []common.Address{common.Address{1}, common.Address{2}}, formatSliceOutput(pad([]byte{1}, 20, false), pad([]byte{2}, 20, false))},
|
||||
{"bytes32[]", []common.Hash{common.Hash{1}, common.Hash{2}}, formatSliceOutput(pad([]byte{1}, 32, false), pad([]byte{2}, 32, false))},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatal("unexpected pack error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMethodPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sig := abi.Methods["slice"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err := abi.Pack("slice", []uint32{1, 2})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||
sig = abi.Methods["sliceAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||
sig = abi.Methods["sliceMultiAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = abi.Methods["slice256"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
const jsondata = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "const" : true },
|
||||
{ "type" : "function", "name" : "send", "const" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
|
||||
]`
|
||||
|
||||
const jsondata2 = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "const" : true },
|
||||
{ "type" : "function", "name" : "send", "const" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
||||
{ "type" : "function", "name" : "test", "const" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
|
||||
{ "type" : "function", "name" : "string", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
|
||||
{ "type" : "function", "name" : "bool", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
|
||||
{ "type" : "function", "name" : "address", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
|
||||
{ "type" : "function", "name" : "string32", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "string32" } ] },
|
||||
{ "type" : "function", "name" : "uint64[2]", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
|
||||
{ "type" : "function", "name" : "uint64[]", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
|
||||
{ "type" : "function", "name" : "foo", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
|
||||
{ "type" : "function", "name" : "bar", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
|
||||
{ "type" : "function", "name" : "slice", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
||||
{ "type" : "function", "name" : "slice256", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "sliceAddress", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
||||
{ "type" : "function", "name" : "sliceMultiAddress", "const" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
||||
{ "type" : "function", "name" : "test", "constant" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
|
||||
{ "type" : "function", "name" : "string", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
|
||||
{ "type" : "function", "name" : "bool", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
|
||||
{ "type" : "function", "name" : "address", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
|
||||
{ "type" : "function", "name" : "uint64[2]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
|
||||
{ "type" : "function", "name" : "uint64[]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
|
||||
{ "type" : "function", "name" : "foo", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
|
||||
{ "type" : "function", "name" : "bar", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
|
||||
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
||||
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
||||
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
|
||||
]`
|
||||
|
||||
func TestType(t *testing.T) {
|
||||
typ, err := NewType("uint32")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if typ.Kind != reflect.Uint {
|
||||
t.Error("expected uint32 to have kind Ptr")
|
||||
}
|
||||
|
||||
typ, err = NewType("uint32[]")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !typ.IsSlice {
|
||||
t.Error("expected uint32[] to be slice")
|
||||
}
|
||||
if typ.Type != ubig_t {
|
||||
t.Error("expcted uith32[] to have type uint64")
|
||||
}
|
||||
|
||||
typ, err = NewType("uint32[2]")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !typ.IsSlice {
|
||||
t.Error("expected uint32[2] to be slice")
|
||||
}
|
||||
if typ.Type != ubig_t {
|
||||
t.Error("expcted uith32[2] to have type uint64")
|
||||
}
|
||||
if typ.SliceSize != 2 {
|
||||
t.Error("expected uint32[2] to have a size of 2")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
Uint256, _ := NewType("uint256")
|
||||
exp := ABI{
|
||||
@ -164,21 +520,6 @@ func TestTestString(t *testing.T) {
|
||||
if _, err := abi.Pack("string", "hello world"); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
str10 := string(make([]byte, 10))
|
||||
if _, err := abi.Pack("string32", str10); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
str32 := string(make([]byte, 32))
|
||||
if _, err := abi.Pack("string32", str32); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
str33 := string(make([]byte, 33))
|
||||
if _, err := abi.Pack("string32", str33); err == nil {
|
||||
t.Error("expected str33 to throw out of bound error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestBool(t *testing.T) {
|
||||
@ -210,26 +551,10 @@ func TestTestSlice(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestImplicitTypeCasts(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
slice := make([]uint8, 2)
|
||||
_, err = abi.Pack("uint64[2]", slice)
|
||||
expStr := "`uint64[2]` abi: cannot use type uint8 as type uint64"
|
||||
if err.Error() != expStr {
|
||||
t.Errorf("expected %v, got %v", expStr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMethodSignature(t *testing.T) {
|
||||
String, _ := NewType("string")
|
||||
String32, _ := NewType("string32")
|
||||
m := Method{"foo", false, []Argument{Argument{"bar", String32, false}, Argument{"baz", String, false}}, nil}
|
||||
exp := "foo(string32,string)"
|
||||
m := Method{"foo", false, []Argument{Argument{"bar", String, false}, Argument{"baz", String, false}}, nil}
|
||||
exp := "foo(string,string)"
|
||||
if m.Sig() != exp {
|
||||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
||||
}
|
||||
@ -247,28 +572,6 @@ func TestMethodSignature(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
sig := crypto.Keccak256([]byte("foo(uint32)"))[:4]
|
||||
sig = append(sig, make([]byte, 32)...)
|
||||
sig[35] = 10
|
||||
|
||||
packed, err := abi.Pack("foo", uint32(10))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
@ -292,77 +595,6 @@ func TestMultiPack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackSlice(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
sig := crypto.Keccak256([]byte("slice(uint32[2])"))[:4]
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err := abi.Pack("slice", []uint32{1, 2})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||
sig = abi.Methods["sliceAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||
sig = abi.Methods["sliceMultiAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = crypto.Keccak256([]byte("slice256(uint256[2])"))[:4]
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
func ExampleJSON() {
|
||||
const definition = `[{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"isBar","outputs":[{"name":"","type":"bool"}],"type":"function"}]`
|
||||
|
||||
@ -382,9 +614,9 @@ func ExampleJSON() {
|
||||
|
||||
func TestInputVariableInputLength(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "type" : "function", "name" : "strOne", "const" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
|
||||
{ "type" : "function", "name" : "bytesOne", "const" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
|
||||
{ "type" : "function", "name" : "strTwo", "const" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "str1", "type" : "string" } ] }
|
||||
{ "type" : "function", "name" : "strOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
|
||||
{ "type" : "function", "name" : "bytesOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
|
||||
{ "type" : "function", "name" : "strTwo", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "str1", "type" : "string" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
@ -546,7 +778,7 @@ func TestBareEvents(t *testing.T) {
|
||||
|
||||
func TestMultiReturnWithStruct(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "const" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
@ -599,7 +831,7 @@ func TestMultiReturnWithStruct(t *testing.T) {
|
||||
|
||||
func TestMultiReturnWithSlice(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "const" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
@ -635,8 +867,8 @@ func TestMultiReturnWithSlice(t *testing.T) {
|
||||
|
||||
func TestMarshalArrays(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "bytes32", "const" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "const" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
{ "name" : "bytes32", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "constant" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
@ -694,14 +926,14 @@ func TestMarshalArrays(t *testing.T) {
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "int", "const" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
{ "name" : "bool", "const" : false, "outputs": [ { "type": "bool" } ] },
|
||||
{ "name" : "bytes", "const" : false, "outputs": [ { "type": "bytes" } ] },
|
||||
{ "name" : "fixed", "const" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "multi", "const" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||
{ "name" : "addressSliceSingle", "const" : false, "outputs": [ { "type": "address[]" } ] },
|
||||
{ "name" : "addressSliceDouble", "const" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||
{ "name" : "mixedBytes", "const" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
{ "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] },
|
||||
{ "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] },
|
||||
{ "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||
{ "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] },
|
||||
{ "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||
{ "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
|
@ -17,12 +17,22 @@
|
||||
package bind
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// ErrNoCode is returned by call and transact operations for which the requested
|
||||
// recipient contract to operate on does not exist in the state db or does not
|
||||
// have any code associated with it (i.e. suicided).
|
||||
//
|
||||
// Please note, this error string is part of the RPC API and is expected by the
|
||||
// native contract bindings to signal this particular error. Do not change this
|
||||
// as it will break all dependent code!
|
||||
var ErrNoCode = errors.New("no contract code at given address")
|
||||
|
||||
// ContractCaller defines the methods needed to allow operating with contract on a read
|
||||
// only basis.
|
||||
type ContractCaller interface {
|
||||
@ -37,7 +47,8 @@ type ContractCaller interface {
|
||||
// used when the user does not provide some needed values, but rather leaves it up
|
||||
// to the transactor to decide.
|
||||
type ContractTransactor interface {
|
||||
// Nonce retrieves the current pending nonce associated with an account.
|
||||
// PendingAccountNonce retrieves the current pending nonce associated with an
|
||||
// account.
|
||||
PendingAccountNonce(account common.Address) (uint64, error)
|
||||
|
||||
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
|
||||
@ -52,7 +63,7 @@ type ContractTransactor interface {
|
||||
EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error)
|
||||
|
||||
// SendTransaction injects the transaction into the pending pool for execution.
|
||||
SendTransaction(*types.Transaction) error
|
||||
SendTransaction(tx *types.Transaction) error
|
||||
}
|
||||
|
||||
// ContractBackend defines the methods needed to allow operating with contract
|
||||
|
@ -66,10 +66,16 @@ type request struct {
|
||||
type response struct {
|
||||
JSONRPC string `json:"jsonrpc"` // Version of the JSON RPC protocol, always set to 2.0
|
||||
ID int `json:"id"` // Auto incrementing ID number for this request
|
||||
Error json.RawMessage `json:"error"` // Any error returned by the remote side
|
||||
Error *failure `json:"error"` // Any error returned by the remote side
|
||||
Result json.RawMessage `json:"result"` // Whatever the remote side sends us in reply
|
||||
}
|
||||
|
||||
// failure is a JSON RPC response error field sent back from the API server.
|
||||
type failure struct {
|
||||
Code int `json:"code"` // JSON RPC error code associated with the failure
|
||||
Message string `json:"message"` // Specific error message of the failure
|
||||
}
|
||||
|
||||
// request forwards an API request to the RPC server, and parses the response.
|
||||
//
|
||||
// This is currently painfully non-concurrent, but it will have to do until we
|
||||
@ -96,8 +102,11 @@ func (b *rpcBackend) request(method string, params []interface{}) (json.RawMessa
|
||||
if err := b.client.Recv(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(res.Error) > 0 {
|
||||
return nil, fmt.Errorf("remote error: %s", string(res.Error))
|
||||
if res.Error != nil {
|
||||
if res.Error.Message == bind.ErrNoCode.Error() {
|
||||
return nil, bind.ErrNoCode
|
||||
}
|
||||
return nil, fmt.Errorf("remote error: %s", res.Error.Message)
|
||||
}
|
||||
return res.Result, nil
|
||||
}
|
||||
|
@ -92,6 +92,10 @@ func (b *SimulatedBackend) ContractCall(contract common.Address, data []byte, pe
|
||||
block = b.blockchain.CurrentBlock()
|
||||
statedb, _ = b.blockchain.State()
|
||||
}
|
||||
// If there's no code to interact with, respond with an appropriate error
|
||||
if code := statedb.GetCode(contract); len(code) == 0 {
|
||||
return nil, bind.ErrNoCode
|
||||
}
|
||||
// Set infinite balance to the a fake caller account
|
||||
from := statedb.GetOrNewStateObject(common.Address{})
|
||||
from.SetBalance(common.MaxBig)
|
||||
@ -134,7 +138,12 @@ func (b *SimulatedBackend) EstimateGasLimit(sender common.Address, contract *com
|
||||
block = b.pendingBlock
|
||||
statedb = b.pendingState.Copy()
|
||||
)
|
||||
|
||||
// If there's no code to interact with, respond with an appropriate error
|
||||
if contract != nil {
|
||||
if code := statedb.GetCode(*contract); len(code) == 0 {
|
||||
return nil, bind.ErrNoCode
|
||||
}
|
||||
}
|
||||
// Set infinite balance to the a fake caller account
|
||||
from := statedb.GetOrNewStateObject(sender)
|
||||
from.SetBalance(common.MaxBig)
|
||||
|
@ -194,12 +194,44 @@ var bindTests = []struct {
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that plain values can be properly returned and deserialized
|
||||
{
|
||||
`Getter`,
|
||||
`
|
||||
contract Getter {
|
||||
function getter() constant returns (string, int, bytes32) {
|
||||
return ("Hi", 1, sha3(""));
|
||||
}
|
||||
}
|
||||
`,
|
||||
`606060405260dc8060106000396000f3606060405260e060020a6000350463993a04b78114601a575b005b600060605260c0604052600260809081527f486900000000000000000000000000000000000000000000000000000000000060a05260017fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060e0829052610100819052606060c0908152600261012081905281906101409060a09080838184600060046012f1505081517fffff000000000000000000000000000000000000000000000000000000000000169091525050604051610160819003945092505050f3`,
|
||||
`[{"constant":true,"inputs":[],"name":"getter","outputs":[{"name":"","type":"string"},{"name":"","type":"int256"},{"name":"","type":"bytes32"}],"type":"function"}]`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
|
||||
|
||||
// Deploy a tuple tester contract and execute a structured call on it
|
||||
_, _, getter, err := DeployGetter(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy getter contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
if str, num, _, err := getter.Getter(nil); err != nil {
|
||||
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
||||
} else if str != "Hi" || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", str, num, "Hi", 1)
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that tuples can be properly returned and deserialized
|
||||
{
|
||||
`Tupler`,
|
||||
`
|
||||
contract Tupler {
|
||||
function tuple() returns (string a, int b, bytes32 c) {
|
||||
function tuple() constant returns (string a, int b, bytes32 c) {
|
||||
return ("Hi", 1, sha3(""));
|
||||
}
|
||||
}
|
||||
@ -219,8 +251,10 @@ var bindTests = []struct {
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
if _, err := tupler.Tuple(nil); err != nil {
|
||||
if res, err := tupler.Tuple(nil); err != nil {
|
||||
t.Fatalf("Failed to call structure retriever: %v", err)
|
||||
} else if res.A != "Hi" || res.B.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", res.A, res.B, "Hi", 1)
|
||||
}
|
||||
`,
|
||||
},
|
||||
@ -303,6 +337,34 @@ var bindTests = []struct {
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that non-existent contracts are reported as such (though only simulator test)
|
||||
{
|
||||
`NonExistent`,
|
||||
`
|
||||
contract NonExistent {
|
||||
function String() constant returns(string) {
|
||||
return "I don't exist";
|
||||
}
|
||||
}
|
||||
`,
|
||||
`6060604052609f8060106000396000f3606060405260e060020a6000350463f97a60058114601a575b005b600060605260c0604052600d60809081527f4920646f6e27742065786973740000000000000000000000000000000000000060a052602060c0908152600d60e081905281906101009060a09080838184600060046012f15050815172ffffffffffffffffffffffffffffffffffffff1916909152505060405161012081900392509050f3`,
|
||||
`[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`,
|
||||
`
|
||||
// Create a simulator and wrap a non-deployed contract
|
||||
sim := backends.NewSimulatedBackend()
|
||||
|
||||
nonexistent, err := NewNonExistent(common.Address{}, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to access non-existent contract: %v", err)
|
||||
}
|
||||
// Ensure that contract calls fail with the appropriate error
|
||||
if res, err := nonexistent.String(nil); err == nil {
|
||||
t.Fatalf("Call succeeded on non-existent contract: %v", res)
|
||||
} else if (err != bind.ErrNoCode) {
|
||||
t.Fatalf("Error mismatch: have %v, want %v", err, bind.ErrNoCode)
|
||||
}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
// Tests that packages generated by the binder can be successfully compiled and
|
||||
|
@ -211,7 +211,7 @@ package {{.Package}}
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}} = new({{bindtype .Type}})
|
||||
{{end}}
|
||||
){{end}}
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}[]interface{}{
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}&[]interface{}{
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}},
|
||||
{{end}}
|
||||
}{{end}}{{end}}
|
||||
|
79
accounts/abi/error.go
Normal file
79
accounts/abi/error.go
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// formatSliceString formats the reflection kind with the given slice size
|
||||
// and returns a formatted string representation.
|
||||
func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
||||
if sliceSize == -1 {
|
||||
return fmt.Sprintf("[]%v", kind)
|
||||
}
|
||||
return fmt.Sprintf("[%d]%v", sliceSize, kind)
|
||||
}
|
||||
|
||||
// sliceTypeCheck checks that the given slice can by assigned to the reflection
|
||||
// type in t.
|
||||
func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
|
||||
return typeErr(formatSliceString(t.Kind, t.SliceSize), val.Type())
|
||||
}
|
||||
if t.IsArray && val.Len() != t.SliceSize {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
||||
}
|
||||
|
||||
if t.Elem.IsSlice {
|
||||
if val.Len() > 0 {
|
||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||
}
|
||||
} else if t.Elem.IsArray {
|
||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||
}
|
||||
|
||||
if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), val.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// typeCheck checks that the given reflection value can be assigned to the reflection
|
||||
// type in t.
|
||||
func typeCheck(t Type, value reflect.Value) error {
|
||||
if t.IsSlice || t.IsArray {
|
||||
return sliceTypeCheck(t, value)
|
||||
}
|
||||
|
||||
// Check base type validity. Element types will be checked later on.
|
||||
if t.Kind != value.Kind() {
|
||||
return typeErr(t.Kind, value.Kind())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// varErr returns a formatted error.
|
||||
func varErr(expected, got reflect.Kind) error {
|
||||
return typeErr(expected, got)
|
||||
}
|
||||
|
||||
// typeErr returns a formatted type casting error.
|
||||
func typeErr(expected, got interface{}) error {
|
||||
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected)
|
||||
}
|
@ -18,6 +18,7 @@ package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -38,6 +39,44 @@ type Method struct {
|
||||
Outputs []Argument
|
||||
}
|
||||
|
||||
func (m Method) pack(method Method, args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
if len(args) != len(method.Inputs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
|
||||
}
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := method.Inputs[i]
|
||||
// pack the input
|
||||
packed, err := input.Type.pack(reflect.ValueOf(a))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("`%s` %v", method.Name, err)
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.requiresLengthPrefix() {
|
||||
// calculate the offset
|
||||
offset := len(method.Inputs)*32 + len(variableInput)
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset), UintTy)...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
ret = append(ret, packed...)
|
||||
}
|
||||
}
|
||||
// append the variable input at the end of the packed input
|
||||
ret = append(ret, variableInput...)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Sig returns the methods string signature according to the ABI spec.
|
||||
//
|
||||
// Example
|
||||
|
@ -24,8 +24,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
big_t = reflect.TypeOf(&big.Int{})
|
||||
ubig_t = reflect.TypeOf(&big.Int{})
|
||||
big_t = reflect.TypeOf(big.Int{})
|
||||
ubig_t = reflect.TypeOf(big.Int{})
|
||||
byte_t = reflect.TypeOf(byte(0))
|
||||
byte_ts = reflect.TypeOf([]byte(nil))
|
||||
uint_t = reflect.TypeOf(uint(0))
|
||||
|
65
accounts/abi/packing.go
Normal file
65
accounts/abi/packing.go
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
|
||||
// bytes slice
|
||||
func packBytesSlice(bytes []byte, l int) []byte {
|
||||
len := packNum(reflect.ValueOf(l), UintTy)
|
||||
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
|
||||
}
|
||||
|
||||
// packElement packs the given reflect value according to the abi specification in
|
||||
// t.
|
||||
func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
switch t.T {
|
||||
case IntTy, UintTy:
|
||||
return packNum(reflectValue, t.T)
|
||||
case StringTy:
|
||||
return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len())
|
||||
case AddressTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
|
||||
return common.LeftPadBytes(reflectValue.Bytes(), 32)
|
||||
case BoolTy:
|
||||
if reflectValue.Bool() {
|
||||
return common.LeftPadBytes(common.Big1.Bytes(), 32)
|
||||
} else {
|
||||
return common.LeftPadBytes(common.Big0.Bytes(), 32)
|
||||
}
|
||||
case BytesTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len())
|
||||
case FixedBytesTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32)
|
||||
}
|
||||
panic("abi: fatal error")
|
||||
}
|
97
accounts/abi/reflect.go
Normal file
97
accounts/abi/reflect.go
Normal file
@ -0,0 +1,97 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
// or finds a big.Int
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != big_t {
|
||||
return indirect(v.Elem())
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// reflectIntKind returns the reflect using the given size and
|
||||
// unsignedness.
|
||||
func reflectIntKind(unsigned bool, size int) reflect.Kind {
|
||||
switch size {
|
||||
case 8:
|
||||
if unsigned {
|
||||
return reflect.Uint8
|
||||
}
|
||||
return reflect.Int8
|
||||
case 16:
|
||||
if unsigned {
|
||||
return reflect.Uint16
|
||||
}
|
||||
return reflect.Int16
|
||||
case 32:
|
||||
if unsigned {
|
||||
return reflect.Uint32
|
||||
}
|
||||
return reflect.Int32
|
||||
case 64:
|
||||
if unsigned {
|
||||
return reflect.Uint64
|
||||
}
|
||||
return reflect.Int64
|
||||
}
|
||||
return reflect.Ptr
|
||||
}
|
||||
|
||||
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
|
||||
// and copies the bytes in value to the new slice.
|
||||
func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
||||
slice := reflect.MakeSlice(reflect.TypeOf([]byte{}), value.Len(), value.Len())
|
||||
reflect.Copy(slice, value)
|
||||
return slice
|
||||
}
|
||||
|
||||
// set attempts to assign src to dst by either setting, copying or otherwise.
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if !dstType.Elem().AssignableTo(r_byte) {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
|
||||
}
|
||||
|
||||
if dst.Len() < output.Type.SliceSize {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Ptr:
|
||||
return set(dst.Elem(), src, output)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
@ -21,8 +21,6 @@ import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -40,53 +38,60 @@ const (
|
||||
|
||||
// Type is the reflection of the supported argument type
|
||||
type Type struct {
|
||||
IsSlice bool
|
||||
SliceSize int
|
||||
IsSlice, IsArray bool
|
||||
SliceSize int
|
||||
|
||||
Elem *Type
|
||||
|
||||
Kind reflect.Kind
|
||||
Type reflect.Type
|
||||
Size int
|
||||
T byte // Our own type checking
|
||||
|
||||
Kind reflect.Kind
|
||||
Type reflect.Type
|
||||
Size int
|
||||
T byte // Our own type checking
|
||||
stringKind string // holds the unparsed string for deriving signatures
|
||||
}
|
||||
|
||||
var (
|
||||
// fullTypeRegex parses the abi types
|
||||
//
|
||||
// Types can be in the format of:
|
||||
//
|
||||
// Input = Type [ "[" [ Number ] "]" ] Name .
|
||||
// Type = [ "u" ] "int" [ Number ] .
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// string int uint real
|
||||
// string32 int8 uint8 uint[]
|
||||
// address int256 uint256 real[2]
|
||||
fullTypeRegex = regexp.MustCompile("([a-zA-Z0-9]+)(\\[([0-9]*)?\\])?")
|
||||
typeRegex = regexp.MustCompile("([a-zA-Z]+)([0-9]*)?")
|
||||
// typeRegex parses the abi sub types
|
||||
typeRegex = regexp.MustCompile("([a-zA-Z]+)([0-9]*)?")
|
||||
)
|
||||
|
||||
// NewType returns a fully parsed Type given by the input string or an error if it can't be parsed.
|
||||
//
|
||||
// Strings can be in the format of:
|
||||
//
|
||||
// Input = Type [ "[" [ Number ] "]" ] Name .
|
||||
// Type = [ "u" ] "int" [ Number ] .
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// string int uint real
|
||||
// string32 int8 uint8 uint[]
|
||||
// address int256 uint256 real[2]
|
||||
// NewType creates a new reflection type of abi type given in t.
|
||||
func NewType(t string) (typ Type, err error) {
|
||||
// 1. full string 2. type 3. (opt.) is slice 4. (opt.) size
|
||||
// parse the full representation of the abi-type definition; including:
|
||||
// * full string
|
||||
// * type
|
||||
// * is slice
|
||||
// * slice size
|
||||
res := fullTypeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
|
||||
// check if type is slice and parse type.
|
||||
switch {
|
||||
case res[3] != "":
|
||||
// err is ignored. Already checked for number through the regexp
|
||||
typ.SliceSize, _ = strconv.Atoi(res[3])
|
||||
typ.IsSlice = true
|
||||
typ.IsArray = true
|
||||
case res[2] != "":
|
||||
typ.IsSlice, typ.SliceSize = true, -1
|
||||
case res[0] == "":
|
||||
return Type{}, fmt.Errorf("abi: type parse error: %s", t)
|
||||
}
|
||||
if typ.IsArray || typ.IsSlice {
|
||||
sliceType, err := NewType(res[1])
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
typ.Elem = &sliceType
|
||||
typ.stringKind = sliceType.stringKind + t[len(res[1]):]
|
||||
return typ, nil
|
||||
}
|
||||
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(res[1], -1)[0]
|
||||
@ -106,24 +111,24 @@ func NewType(t string) (typ Type, err error) {
|
||||
varSize = 256
|
||||
t += "256"
|
||||
}
|
||||
typ.stringKind = t
|
||||
|
||||
switch varType {
|
||||
case "int":
|
||||
typ.Kind = reflect.Int
|
||||
typ.Kind = reflectIntKind(false, varSize)
|
||||
typ.Type = big_t
|
||||
typ.Size = varSize
|
||||
typ.T = IntTy
|
||||
case "uint":
|
||||
typ.Kind = reflect.Uint
|
||||
typ.Kind = reflectIntKind(true, varSize)
|
||||
typ.Type = ubig_t
|
||||
typ.Size = varSize
|
||||
typ.T = UintTy
|
||||
case "bool":
|
||||
typ.Kind = reflect.Bool
|
||||
typ.T = BoolTy
|
||||
case "real": // TODO
|
||||
typ.Kind = reflect.Invalid
|
||||
case "address":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = address_t
|
||||
typ.Size = 20
|
||||
typ.T = AddressTy
|
||||
@ -131,123 +136,55 @@ func NewType(t string) (typ Type, err error) {
|
||||
typ.Kind = reflect.String
|
||||
typ.Size = -1
|
||||
typ.T = StringTy
|
||||
if varSize > 0 {
|
||||
typ.Size = 32
|
||||
}
|
||||
case "hash":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Size = 32
|
||||
typ.Type = hash_t
|
||||
typ.T = HashTy
|
||||
case "bytes":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = byte_ts
|
||||
typ.Size = varSize
|
||||
sliceType, _ := NewType("uint8")
|
||||
typ.Elem = &sliceType
|
||||
if varSize == 0 {
|
||||
typ.IsSlice = true
|
||||
typ.T = BytesTy
|
||||
typ.SliceSize = -1
|
||||
} else {
|
||||
typ.IsArray = true
|
||||
typ.T = FixedBytesTy
|
||||
typ.SliceSize = varSize
|
||||
}
|
||||
default:
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
typ.stringKind = t
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// String implements Stringer
|
||||
func (t Type) String() (out string) {
|
||||
return t.stringKind
|
||||
}
|
||||
|
||||
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
|
||||
// bytes slice
|
||||
func packBytesSlice(bytes []byte, l int) []byte {
|
||||
len := packNum(reflect.ValueOf(l), UintTy)
|
||||
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
|
||||
}
|
||||
func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
// dereference pointer first if it's a pointer
|
||||
v = indirect(v)
|
||||
|
||||
// Test the given input parameter `v` and checks if it matches certain
|
||||
// criteria
|
||||
// * Big integers are checks for ptr types and if the given value is
|
||||
// assignable
|
||||
// * Integer are checked for size
|
||||
// * Strings, addresses and bytes are checks for type and size
|
||||
func (t Type) pack(v interface{}) ([]byte, error) {
|
||||
value := reflect.ValueOf(v)
|
||||
switch kind := value.Kind(); kind {
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// check input is unsigned
|
||||
if t.Type != ubig_t {
|
||||
return nil, fmt.Errorf("abi: type mismatch: %s for %T", t.Type, v)
|
||||
}
|
||||
|
||||
// no implicit type casting
|
||||
if int(value.Type().Size()*8) != t.Size {
|
||||
return nil, fmt.Errorf("abi: cannot use type %T as type uint%d", v, t.Size)
|
||||
}
|
||||
|
||||
return packNum(value, t.T), nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if t.Type != ubig_t {
|
||||
return nil, fmt.Errorf("type mismatch: %s for %T", t.Type, v)
|
||||
}
|
||||
|
||||
// no implicit type casting
|
||||
if int(value.Type().Size()*8) != t.Size {
|
||||
return nil, fmt.Errorf("abi: cannot use type %T as type uint%d", v, t.Size)
|
||||
}
|
||||
return packNum(value, t.T), nil
|
||||
case reflect.Ptr:
|
||||
// If the value is a ptr do a assign check (only used by
|
||||
// big.Int for now)
|
||||
if t.Type == ubig_t && value.Type() != ubig_t {
|
||||
return nil, fmt.Errorf("type mismatch: %s for %T", t.Type, v)
|
||||
}
|
||||
return packNum(value, t.T), nil
|
||||
case reflect.String:
|
||||
if t.Size > -1 && value.Len() > t.Size {
|
||||
return nil, fmt.Errorf("%v out of bound. %d for %d", value.Kind(), value.Len(), t.Size)
|
||||
}
|
||||
|
||||
return packBytesSlice([]byte(value.String()), value.Len()), nil
|
||||
case reflect.Slice:
|
||||
// Byte slice is a special case, it gets treated as a single value
|
||||
if t.T == BytesTy {
|
||||
return packBytesSlice(value.Bytes(), value.Len()), nil
|
||||
}
|
||||
|
||||
if t.SliceSize > -1 && value.Len() > t.SliceSize {
|
||||
return nil, fmt.Errorf("%v out of bound. %d for %d", value.Kind(), value.Len(), t.Size)
|
||||
}
|
||||
|
||||
// Signed / Unsigned check
|
||||
if value.Type() == big_t && (t.T != IntTy && isSigned(value)) || (t.T == UintTy && isSigned(value)) {
|
||||
return nil, fmt.Errorf("slice of incompatible types.")
|
||||
}
|
||||
if err := typeCheck(t, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if (t.IsSlice || t.IsArray) && t.T != BytesTy && t.T != FixedBytesTy {
|
||||
var packed []byte
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
val, err := t.pack(value.Index(i).Interface())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := t.Elem.pack(v.Index(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packed = append(packed, val...)
|
||||
}
|
||||
return packBytesSlice(packed, value.Len()), nil
|
||||
case reflect.Bool:
|
||||
if value.Bool() {
|
||||
return common.LeftPadBytes(common.Big1.Bytes(), 32), nil
|
||||
} else {
|
||||
return common.LeftPadBytes(common.Big0.Bytes(), 32), nil
|
||||
}
|
||||
case reflect.Array:
|
||||
if v, ok := value.Interface().(common.Address); ok {
|
||||
return common.LeftPadBytes(v[:], 32), nil
|
||||
} else if v, ok := value.Interface().(common.Hash); ok {
|
||||
return v[:], nil
|
||||
}
|
||||
return packBytesSlice(packed, v.Len()), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("ABI: bad input given %v", value.Kind())
|
||||
return packElement(t, v), nil
|
||||
}
|
||||
|
||||
// requireLengthPrefix returns whether the type requires any sort of length
|
||||
// prefixing.
|
||||
func (t Type) requiresLengthPrefix() bool {
|
||||
return t.T != FixedBytesTy && (t.T == StringTy || t.T == BytesTy || t.IsSlice || t.IsArray)
|
||||
}
|
||||
|
@ -284,7 +284,12 @@ func (am *Manager) Import(keyJSON []byte, passphrase, newPassphrase string) (Acc
|
||||
|
||||
// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase.
|
||||
func (am *Manager) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (Account, error) {
|
||||
return am.importKey(newKeyFromECDSA(priv), passphrase)
|
||||
key := newKeyFromECDSA(priv)
|
||||
if am.cache.hasAddress(key.Address) {
|
||||
return Account{}, fmt.Errorf("account already exists")
|
||||
}
|
||||
|
||||
return am.importKey(key, passphrase)
|
||||
}
|
||||
|
||||
func (am *Manager) importKey(key *Key, passphrase string) (Account, error) {
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build darwin,!ios freebsd linux netbsd solaris windows
|
||||
// +build darwin,!ios freebsd linux,!arm64 netbsd solaris windows
|
||||
|
||||
package accounts
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build ios !darwin,!freebsd,!linux,!netbsd,!solaris,!windows
|
||||
// +build ios linux,arm64 !darwin,!freebsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
// This is the fallback implementation of directory watching.
|
||||
// It is used on unsupported platforms.
|
||||
|
26
build/win-ci-compile.bat
Normal file
26
build/win-ci-compile.bat
Normal file
@ -0,0 +1,26 @@
|
||||
@echo off
|
||||
if not exist .\build\win-ci-compile.bat (
|
||||
echo This script must be run from the root of the repository.
|
||||
exit /b
|
||||
)
|
||||
if not defined GOPATH (
|
||||
echo GOPATH is not set.
|
||||
exit /b
|
||||
)
|
||||
|
||||
set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
|
||||
set GOBIN=%cd%\build\bin
|
||||
|
||||
rem set gitCommit when running from a Git checkout.
|
||||
set goLinkFlags=""
|
||||
if exist ".git\HEAD" (
|
||||
where /q git
|
||||
if not errorlevel 1 (
|
||||
for /f %%h in ('git rev-parse HEAD') do (
|
||||
set goLinkFlags="-X main.gitCommit=%%h"
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@echo on
|
||||
go install -v -ldflags %goLinkFlags% ./...
|
15
build/win-ci-test.bat
Normal file
15
build/win-ci-test.bat
Normal file
@ -0,0 +1,15 @@
|
||||
@echo off
|
||||
if not exist .\build\win-ci-test.bat (
|
||||
echo This script must be run from the root of the repository.
|
||||
exit /b
|
||||
)
|
||||
if not defined GOPATH (
|
||||
echo GOPATH is not set.
|
||||
exit /b
|
||||
)
|
||||
|
||||
set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
|
||||
set GOBIN=%cd%\build\bin
|
||||
|
||||
@echo on
|
||||
go test ./...
|
@ -19,15 +19,12 @@ package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
)
|
||||
@ -43,50 +40,43 @@ func main() {
|
||||
nodeKey *ecdsa.PrivateKey
|
||||
err error
|
||||
)
|
||||
flag.Var(glog.GetVerbosity(), "verbosity", "log verbosity (0-9)")
|
||||
flag.Var(glog.GetVModule(), "vmodule", "log verbosity pattern")
|
||||
glog.SetToStderr(true)
|
||||
flag.Parse()
|
||||
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.DebugLevel))
|
||||
|
||||
if *genKey != "" {
|
||||
writeKey(*genKey)
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
utils.Fatalf("could not generate key: %v", err)
|
||||
}
|
||||
if err := crypto.SaveECDSA(*genKey, key); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
natm, err := nat.Parse(*natdesc)
|
||||
if err != nil {
|
||||
log.Fatalf("-nat: %v", err)
|
||||
utils.Fatalf("-nat: %v", err)
|
||||
}
|
||||
switch {
|
||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
||||
log.Fatal("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
||||
log.Fatal("Options -nodekey and -nodekeyhex are mutually exclusive")
|
||||
utils.Fatalf("Options -nodekey and -nodekeyhex are mutually exclusive")
|
||||
case *nodeKeyFile != "":
|
||||
if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil {
|
||||
log.Fatalf("-nodekey: %v", err)
|
||||
utils.Fatalf("-nodekey: %v", err)
|
||||
}
|
||||
case *nodeKeyHex != "":
|
||||
if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil {
|
||||
log.Fatalf("-nodekeyhex: %v", err)
|
||||
utils.Fatalf("-nodekeyhex: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, ""); err != nil {
|
||||
log.Fatal(err)
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
select {}
|
||||
}
|
||||
|
||||
func writeKey(target string) {
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
log.Fatalf("could not generate key: %v", err)
|
||||
}
|
||||
b := crypto.FromECDSA(key)
|
||||
if target == "-" {
|
||||
fmt.Println(hex.EncodeToString(b))
|
||||
} else {
|
||||
if err := ioutil.WriteFile(target, b, 0600); err != nil {
|
||||
log.Fatal("write error: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ func (self *jsre) batch(statement string) {
|
||||
err := self.re.EvalAndPrettyPrint(statement)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
fmt.Printf("%v", jsErrorString(err))
|
||||
}
|
||||
|
||||
if self.atexit != nil {
|
||||
@ -301,21 +301,19 @@ func (self *jsre) preloadJSFiles(ctx *cli.Context) error {
|
||||
for _, file := range jsFiles {
|
||||
filename := common.AbsolutePath(assetPath, strings.TrimSpace(file))
|
||||
if err := self.re.Exec(filename); err != nil {
|
||||
return fmt.Errorf("%s: %v", file, err)
|
||||
return fmt.Errorf("%s: %v", file, jsErrorString(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// exec executes the JS file with the given filename and stops the JSRE
|
||||
func (self *jsre) exec(filename string) error {
|
||||
if err := self.re.Exec(filename); err != nil {
|
||||
self.re.Stop(false)
|
||||
return fmt.Errorf("Javascript Error: %v", err)
|
||||
// jsErrorString adds a backtrace to errors generated by otto.
|
||||
func jsErrorString(err error) string {
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
return ottoErr.String()
|
||||
}
|
||||
self.re.Stop(true)
|
||||
return nil
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func (self *jsre) interactive() {
|
||||
|
@ -18,9 +18,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@ -40,31 +42,48 @@ import (
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/release"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
ClientIdentifier = "Geth"
|
||||
Version = "1.4.0-rc"
|
||||
VersionMajor = 1
|
||||
VersionMinor = 4
|
||||
VersionPatch = 0
|
||||
clientIdentifier = "Geth" // Client identifier to advertise over the network
|
||||
versionMajor = 1 // Major version component of the current release
|
||||
versionMinor = 4 // Minor version component of the current release
|
||||
versionPatch = 4 // Patch version component of the current release
|
||||
versionMeta = "stable" // Version metadata to append to the version string
|
||||
|
||||
versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle
|
||||
)
|
||||
|
||||
var (
|
||||
gitCommit string // set via linker flagg
|
||||
nodeNameVersion string
|
||||
app *cli.App
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
verString string // Combined textual representation of all the version components
|
||||
relConfig release.Config // Structured version information and release oracle config
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
if gitCommit == "" {
|
||||
nodeNameVersion = Version
|
||||
} else {
|
||||
nodeNameVersion = Version + "-" + gitCommit[:8]
|
||||
// Construct the textual version string from the individual components
|
||||
verString = fmt.Sprintf("%d.%d.%d", versionMajor, versionMinor, versionPatch)
|
||||
if versionMeta != "" {
|
||||
verString += "-" + versionMeta
|
||||
}
|
||||
if gitCommit != "" {
|
||||
verString += "-" + gitCommit[:8]
|
||||
}
|
||||
// Construct the version release oracle configuration
|
||||
relConfig.Oracle = common.HexToAddress(versionOracle)
|
||||
|
||||
app = utils.NewApp(Version, "the go-ethereum command line interface")
|
||||
relConfig.Major = uint32(versionMajor)
|
||||
relConfig.Minor = uint32(versionMinor)
|
||||
relConfig.Patch = uint32(versionPatch)
|
||||
|
||||
commit, _ := hex.DecodeString(gitCommit)
|
||||
copy(relConfig.Commit[:], commit)
|
||||
|
||||
// Initialize the CLI app and start Geth
|
||||
app = utils.NewApp(verString, "the go-ethereum command line interface")
|
||||
app.Action = geth
|
||||
app.HideVersion = true // we have a command to print the version
|
||||
app.Commands = []cli.Command{
|
||||
@ -205,6 +224,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.SolcPathFlag,
|
||||
utils.GpoMinGasPriceFlag,
|
||||
utils.GpoMaxGasPriceFlag,
|
||||
@ -255,7 +275,7 @@ func makeDefaultExtra() []byte {
|
||||
Name string
|
||||
GoVersion string
|
||||
Os string
|
||||
}{uint(VersionMajor<<16 | VersionMinor<<8 | VersionPatch), ClientIdentifier, runtime.Version(), runtime.GOOS}
|
||||
}{uint(versionMajor<<16 | versionMinor<<8 | versionPatch), clientIdentifier, runtime.Version(), runtime.GOOS}
|
||||
extra, err := rlp.EncodeToBytes(clientInfo)
|
||||
if err != nil {
|
||||
glog.V(logger.Warn).Infoln("error setting canonical miner information:", err)
|
||||
@ -273,7 +293,7 @@ func makeDefaultExtra() []byte {
|
||||
// It creates a default node based on the command line arguments and runs it in
|
||||
// blocking mode, waiting for it to be shut down.
|
||||
func geth(ctx *cli.Context) {
|
||||
node := utils.MakeSystemNode(ClientIdentifier, nodeNameVersion, makeDefaultExtra(), ctx)
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
node.Wait()
|
||||
}
|
||||
@ -337,7 +357,7 @@ func initGenesis(ctx *cli.Context) {
|
||||
// same time.
|
||||
func console(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(ClientIdentifier, nodeNameVersion, makeDefaultExtra(), ctx)
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
|
||||
// Attach to the newly started node, and either execute script or become interactive
|
||||
@ -353,7 +373,7 @@ func console(ctx *cli.Context) {
|
||||
// preload user defined JS files into the console
|
||||
err = repl.preloadJSFiles(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to preload JS file %v", err)
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// in case the exec flag holds a JS statement execute it and return
|
||||
@ -370,8 +390,9 @@ func console(ctx *cli.Context) {
|
||||
// of the JavaScript files specified as command arguments.
|
||||
func execScripts(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(ClientIdentifier, nodeNameVersion, makeDefaultExtra(), ctx)
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Stop()
|
||||
|
||||
// Attach to the newly started node and execute the given scripts
|
||||
client, err := node.Attach()
|
||||
@ -383,10 +404,24 @@ func execScripts(ctx *cli.Context) {
|
||||
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
||||
client, false)
|
||||
|
||||
// Run all given files.
|
||||
for _, file := range ctx.Args() {
|
||||
repl.exec(file)
|
||||
if err = repl.re.Exec(file); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
node.Stop()
|
||||
if err != nil {
|
||||
utils.Fatalf("JavaScript Error: %v", jsErrorString(err))
|
||||
}
|
||||
// JS files loaded successfully.
|
||||
// Wait for pending callbacks, but stop for Ctrl-C.
|
||||
abort := make(chan os.Signal, 1)
|
||||
signal.Notify(abort, os.Interrupt)
|
||||
go func() {
|
||||
<-abort
|
||||
repl.re.Stop(false)
|
||||
}()
|
||||
repl.re.Stop(true)
|
||||
}
|
||||
|
||||
// startNode boots up the system node and all registered protocols, after which
|
||||
@ -471,11 +506,8 @@ func gpubench(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
func version(c *cli.Context) {
|
||||
fmt.Println(ClientIdentifier)
|
||||
fmt.Println("Version:", Version)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
fmt.Println(clientIdentifier)
|
||||
fmt.Println("Version:", verString)
|
||||
fmt.Println("Protocol Versions:", eth.ProtocolVersions)
|
||||
fmt.Println("Network Id:", c.GlobalInt(utils.NetworkIdFlag.Name))
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
|
@ -150,8 +150,11 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "LOGGING AND DEBUGGING",
|
||||
Flags: append([]cli.Flag{utils.MetricsEnabledFlag}, debug.Flags...),
|
||||
Name: "LOGGING AND DEBUGGING",
|
||||
Flags: append([]cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
}, debug.Flags...),
|
||||
},
|
||||
{
|
||||
Name: "EXPERIMENTAL",
|
||||
|
@ -73,15 +73,13 @@ func StartNode(stack *node.Node) {
|
||||
<-sigc
|
||||
glog.V(logger.Info).Infoln("Got interrupt, shutting down...")
|
||||
go stack.Stop()
|
||||
logger.Flush()
|
||||
for i := 10; i > 0; i-- {
|
||||
<-sigc
|
||||
if i > 1 {
|
||||
glog.V(logger.Info).Infoln("Already shutting down, please be patient.")
|
||||
glog.V(logger.Info).Infoln("Interrupt", i-1, "more times to induce panic.")
|
||||
glog.V(logger.Info).Infof("Already shutting down, interrupt %d more times for panic.", i-1)
|
||||
}
|
||||
}
|
||||
glog.V(logger.Error).Infof("Force quitting: this might not end so well.")
|
||||
debug.Exit() // ensure trace and CPU profile data is flushed.
|
||||
debug.LoudPanic("boom")
|
||||
}()
|
||||
}
|
||||
|
54
cmd/utils/fdlimit_freebsd.go
Normal file
54
cmd/utils/fdlimit_freebsd.go
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build freebsd
|
||||
|
||||
package utils
|
||||
|
||||
import "syscall"
|
||||
|
||||
// This file is largely identical to fdlimit_unix.go,
|
||||
// but Rlimit fields have type int64 on FreeBSD so it needs
|
||||
// an extra conversion.
|
||||
|
||||
// raiseFdLimit tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
func raiseFdLimit(max uint64) error {
|
||||
// Get the current limit
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
// Try to update the limit to the max allowance
|
||||
limit.Cur = limit.Max
|
||||
if limit.Cur > int64(max) {
|
||||
limit.Cur = int64(max)
|
||||
}
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFdLimit retrieves the number of file descriptors allowed to be opened by this
|
||||
// process.
|
||||
func getFdLimit() (int, error) {
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(limit.Cur), nil
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin
|
||||
// +build linux darwin netbsd openbsd solaris
|
||||
|
||||
package utils
|
||||
|
||||
|
@ -47,6 +47,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
"github.com/ethereum/go-ethereum/release"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/whisper"
|
||||
)
|
||||
@ -228,6 +230,10 @@ var (
|
||||
Name: metrics.MetricsEnabledFlag,
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
}
|
||||
FakePoWFlag = cli.BoolFlag{
|
||||
Name: "fakepow",
|
||||
Usage: "Disables proof-of-work verification",
|
||||
}
|
||||
|
||||
// RPC settings
|
||||
RPCEnabledFlag = cli.BoolFlag{
|
||||
@ -636,7 +642,7 @@ func MakePasswordList(ctx *cli.Context) []string {
|
||||
|
||||
// MakeSystemNode sets up a local node, configures the services to launch and
|
||||
// assembles the P2P protocol stack.
|
||||
func MakeSystemNode(name, version string, extra []byte, ctx *cli.Context) *node.Node {
|
||||
func MakeSystemNode(name, version string, relconf release.Config, extra []byte, ctx *cli.Context) *node.Node {
|
||||
// Avoid conflicting network flags
|
||||
networks, netFlags := 0, []cli.BoolFlag{DevModeFlag, TestNetFlag, OlympicFlag}
|
||||
for _, flag := range netFlags {
|
||||
@ -767,7 +773,11 @@ func MakeSystemNode(name, version string, extra []byte, ctx *cli.Context) *node.
|
||||
Fatalf("Failed to register the Whisper service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return release.NewReleaseService(ctx, relconf)
|
||||
}); err != nil {
|
||||
Fatalf("Failed to register the Geth release oracle service: %v", err)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
@ -842,11 +852,13 @@ func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database
|
||||
glog.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
chainConfig := MustMakeChainConfigFromDb(ctx, chainDb)
|
||||
|
||||
var eventMux event.TypeMux
|
||||
chain, err = core.NewBlockChain(chainDb, chainConfig, ethash.New(), &eventMux)
|
||||
pow := pow.PoW(core.FakePow{})
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
pow = ethash.New()
|
||||
}
|
||||
chain, err = core.NewBlockChain(chainDb, chainConfig, pow, new(event.TypeMux))
|
||||
if err != nil {
|
||||
Fatalf("Could not start chainmanager: %v", err)
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ func (a Address) MarshalJSON() ([]byte, error) {
|
||||
// Parse address from raw json data
|
||||
func (a *Address) UnmarshalJSON(data []byte) error {
|
||||
if len(data) > 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
||||
data = data[:len(data)-1][1:]
|
||||
data = data[1 : len(data)-1]
|
||||
}
|
||||
|
||||
if len(data) > 2 && data[0] == '0' && data[1] == 'x' {
|
||||
|
@ -16,7 +16,10 @@
|
||||
|
||||
package common
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBytesConversion(t *testing.T) {
|
||||
bytes := []byte{5}
|
||||
@ -47,7 +50,38 @@ func TestHashJsonValidation(t *testing.T) {
|
||||
}
|
||||
for i, test := range tests {
|
||||
if err := h.UnmarshalJSON(append([]byte(test.Prefix), make([]byte, test.Size)...)); err != test.Error {
|
||||
t.Error(i, "expected", test.Error, "got", err)
|
||||
t.Errorf("test #%d: error mismatch: have %v, want %v", i, err, test.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressUnmarshalJSON(t *testing.T) {
|
||||
var a Address
|
||||
var tests = []struct {
|
||||
Input string
|
||||
ShouldErr bool
|
||||
Output *big.Int
|
||||
}{
|
||||
{"", true, nil},
|
||||
{`""`, true, nil},
|
||||
{`"0x"`, true, nil},
|
||||
{`"0x00"`, true, nil},
|
||||
{`"0xG000000000000000000000000000000000000000"`, true, nil},
|
||||
{`"0x0000000000000000000000000000000000000000"`, false, big.NewInt(0)},
|
||||
{`"0x0000000000000000000000000000000000000010"`, false, big.NewInt(16)},
|
||||
}
|
||||
for i, test := range tests {
|
||||
err := a.UnmarshalJSON([]byte(test.Input))
|
||||
if err != nil && !test.ShouldErr {
|
||||
t.Errorf("test #%d: unexpected error: %v", i, err)
|
||||
}
|
||||
if err == nil {
|
||||
if test.ShouldErr {
|
||||
t.Errorf("test #%d: expected error, got none", i)
|
||||
}
|
||||
if a.Big().Cmp(test.Output) != 0 {
|
||||
t.Errorf("test #%d: address mismatch: have %v, want %v", i, a.Big(), test.Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -292,7 +292,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
|
||||
// minimum difficulty can ever be (before exponential factor)
|
||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||
x = params.MinimumDifficulty
|
||||
x.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
// for the exponential factor
|
||||
@ -325,7 +325,7 @@ func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *b
|
||||
diff.Sub(parentDiff, adjust)
|
||||
}
|
||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||
diff = params.MinimumDifficulty
|
||||
diff.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||
|
@ -1213,3 +1213,6 @@ func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []c
|
||||
func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
||||
return self.hc.GetHeaderByNumber(number)
|
||||
}
|
||||
|
||||
// Config retrieves the blockchain's chain configuration.
|
||||
func (self *BlockChain) Config() *ChainConfig { return self.config }
|
||||
|
@ -43,7 +43,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
|
||||
}
|
||||
|
||||
var genesis struct {
|
||||
ChainConfig *ChainConfig
|
||||
ChainConfig *ChainConfig `json:"config"`
|
||||
Nonce string
|
||||
Timestamp string
|
||||
ParentHash string
|
||||
|
@ -60,8 +60,7 @@ type stateFn func() (*state.StateDB, error)
|
||||
// two states over time as they are received and processed.
|
||||
type TxPool struct {
|
||||
config *ChainConfig
|
||||
quit chan bool // Quitting channel
|
||||
currentState stateFn // The state function which will allow us to do some pre checks
|
||||
currentState stateFn // The state function which will allow us to do some pre checks
|
||||
pendingState *state.ManagedState
|
||||
gasLimit func() *big.Int // The current gas limit function callback
|
||||
minGasPrice *big.Int
|
||||
@ -72,6 +71,8 @@ type TxPool struct {
|
||||
pending map[common.Hash]*types.Transaction // processable transactions
|
||||
queue map[common.Address]map[common.Hash]*types.Transaction
|
||||
|
||||
wg sync.WaitGroup // for shutdown sync
|
||||
|
||||
homestead bool
|
||||
}
|
||||
|
||||
@ -80,7 +81,6 @@ func NewTxPool(config *ChainConfig, eventMux *event.TypeMux, currentStateFn stat
|
||||
config: config,
|
||||
pending: make(map[common.Hash]*types.Transaction),
|
||||
queue: make(map[common.Address]map[common.Hash]*types.Transaction),
|
||||
quit: make(chan bool),
|
||||
eventMux: eventMux,
|
||||
currentState: currentStateFn,
|
||||
gasLimit: gasLimitFn,
|
||||
@ -90,12 +90,15 @@ func NewTxPool(config *ChainConfig, eventMux *event.TypeMux, currentStateFn stat
|
||||
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}, RemovedTransactionEvent{}),
|
||||
}
|
||||
|
||||
pool.wg.Add(1)
|
||||
go pool.eventLoop()
|
||||
|
||||
return pool
|
||||
}
|
||||
|
||||
func (pool *TxPool) eventLoop() {
|
||||
defer pool.wg.Done()
|
||||
|
||||
// Track chain events. When a chain events occurs (new chain canon block)
|
||||
// we need to know the new state. The new state will help us determine
|
||||
// the nonces in the managed state
|
||||
@ -155,8 +158,8 @@ func (pool *TxPool) resetState() {
|
||||
}
|
||||
|
||||
func (pool *TxPool) Stop() {
|
||||
close(pool.quit)
|
||||
pool.events.Unsubscribe()
|
||||
pool.wg.Wait()
|
||||
glog.V(logger.Info).Infoln("Transaction pool stopped")
|
||||
}
|
||||
|
||||
|
54
eth/api.go
54
eth/api.go
@ -18,6 +18,7 @@ package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -51,6 +52,15 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// errNoCode is returned by call and transact operations for which the requested
|
||||
// recipient contract to operate on does not exist in the state db or does not
|
||||
// have any code associated with it (i.e. suicided).
|
||||
//
|
||||
// Please note, this error string is part of the RPC API and is expected by the
|
||||
// native contract bindings to signal this particular error. Do not change this
|
||||
// as it will break all dependent code!
|
||||
var errNoCode = errors.New("no contract code at given address")
|
||||
|
||||
const defaultGas = uint64(90000)
|
||||
|
||||
// blockByNumber is a commonly used helper function which retrieves and returns
|
||||
@ -97,8 +107,11 @@ type PublicEthereumAPI struct {
|
||||
}
|
||||
|
||||
// NewPublicEthereumAPI creates a new Ethereum protocol API.
|
||||
func NewPublicEthereumAPI(e *Ethereum, gpo *GasPriceOracle) *PublicEthereumAPI {
|
||||
return &PublicEthereumAPI{e, gpo}
|
||||
func NewPublicEthereumAPI(e *Ethereum) *PublicEthereumAPI {
|
||||
return &PublicEthereumAPI{
|
||||
e: e,
|
||||
gpo: e.gpo,
|
||||
}
|
||||
}
|
||||
|
||||
// GasPrice returns a suggestion for a gas price.
|
||||
@ -439,6 +452,16 @@ func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error)
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) {
|
||||
hexkey, err := hex.DecodeString(privkey)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
acc, err := s.am.ImportECDSA(crypto.ToECDSA(hexkey), password)
|
||||
return acc.Address, err
|
||||
}
|
||||
|
||||
// UnlockAccount will unlock the account associated with the given address with
|
||||
// the given password for duration seconds. If duration is nil it will use a
|
||||
// default of 300 seconds. It returns an indication if the account was unlocked.
|
||||
@ -694,6 +717,12 @@ func (s *PublicBlockChainAPI) doCall(args CallArgs, blockNr rpc.BlockNumber) (st
|
||||
}
|
||||
stateDb = stateDb.Copy()
|
||||
|
||||
// If there's no code to interact with, respond with an appropriate error
|
||||
if args.To != nil {
|
||||
if code := stateDb.GetCode(*args.To); len(code) == 0 {
|
||||
return "0x", nil, errNoCode
|
||||
}
|
||||
}
|
||||
// Retrieve the account state object to interact with
|
||||
var from *state.StateObject
|
||||
if args.From == (common.Address{}) {
|
||||
@ -888,18 +917,17 @@ type PublicTransactionPoolAPI struct {
|
||||
}
|
||||
|
||||
// NewPublicTransactionPoolAPI creates a new RPC service with methods specific for the transaction pool.
|
||||
func NewPublicTransactionPoolAPI(e *Ethereum, gpo *GasPriceOracle) *PublicTransactionPoolAPI {
|
||||
func NewPublicTransactionPoolAPI(e *Ethereum) *PublicTransactionPoolAPI {
|
||||
api := &PublicTransactionPoolAPI{
|
||||
eventMux: e.EventMux(),
|
||||
gpo: gpo,
|
||||
chainDb: e.ChainDb(),
|
||||
bc: e.BlockChain(),
|
||||
am: e.AccountManager(),
|
||||
txPool: e.TxPool(),
|
||||
miner: e.Miner(),
|
||||
eventMux: e.eventMux,
|
||||
gpo: e.gpo,
|
||||
chainDb: e.chainDb,
|
||||
bc: e.blockchain,
|
||||
am: e.accountManager,
|
||||
txPool: e.txPool,
|
||||
miner: e.miner,
|
||||
pendingTxSubs: make(map[string]rpc.Subscription),
|
||||
}
|
||||
|
||||
go api.subscriptionLoop()
|
||||
|
||||
return api
|
||||
@ -1813,7 +1841,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
}
|
||||
// Mutate the state if we haven't reached the tracing transaction yet
|
||||
if uint64(idx) < txIndex {
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, parent.Header(), vm.Config{})
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{})
|
||||
_, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mutation failed: %v", err)
|
||||
@ -1821,7 +1849,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
continue
|
||||
}
|
||||
// Otherwise trace the transaction and return
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, parent.Header(), vm.Config{Debug: true, Logger: *logger})
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
|
||||
ret, gas, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tracing failed: %v", err)
|
||||
|
@ -119,6 +119,7 @@ type Ethereum struct {
|
||||
protocolManager *ProtocolManager
|
||||
SolcPath string
|
||||
solc *compiler.Solidity
|
||||
gpo *GasPriceOracle
|
||||
|
||||
GpoMinGasPrice *big.Int
|
||||
GpoMaxGasPrice *big.Int
|
||||
@ -260,6 +261,8 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
eth.gpo = NewGasPriceOracle(eth)
|
||||
|
||||
newPool := core.NewTxPool(eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
|
||||
eth.txPool = newPool
|
||||
|
||||
@ -276,34 +279,31 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
// APIs returns the collection of RPC services the ethereum package offers.
|
||||
// NOTE, some of these services probably need to be moved to somewhere else.
|
||||
func (s *Ethereum) APIs() []rpc.API {
|
||||
// share gas price oracle in API's
|
||||
gpo := NewGasPriceOracle(s)
|
||||
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: NewPublicEthereumAPI(s, gpo),
|
||||
Service: NewPublicEthereumAPI(s),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: NewPublicAccountAPI(s.AccountManager()),
|
||||
Service: NewPublicAccountAPI(s.accountManager),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "personal",
|
||||
Version: "1.0",
|
||||
Service: NewPrivateAccountAPI(s.AccountManager()),
|
||||
Service: NewPrivateAccountAPI(s.accountManager),
|
||||
Public: false,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: NewPublicBlockChainAPI(s.chainConfig, s.BlockChain(), s.Miner(), s.ChainDb(), gpo, s.EventMux(), s.AccountManager()),
|
||||
Service: NewPublicBlockChainAPI(s.chainConfig, s.blockchain, s.miner, s.chainDb, s.gpo, s.eventMux, s.accountManager),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: NewPublicTransactionPoolAPI(s, gpo),
|
||||
Service: NewPublicTransactionPoolAPI(s),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
@ -313,7 +313,7 @@ func (s *Ethereum) APIs() []rpc.API {
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: downloader.NewPublicDownloaderAPI(s.Downloader(), s.EventMux()),
|
||||
Service: downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "miner",
|
||||
@ -328,7 +328,7 @@ func (s *Ethereum) APIs() []rpc.API {
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: filters.NewPublicFilterAPI(s.ChainDb(), s.EventMux()),
|
||||
Service: filters.NewPublicFilterAPI(s.chainDb, s.eventMux),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "admin",
|
||||
@ -351,7 +351,7 @@ func (s *Ethereum) APIs() []rpc.API {
|
||||
}, {
|
||||
Namespace: "admin",
|
||||
Version: "1.0",
|
||||
Service: ethreg.NewPrivateRegistarAPI(s.chainConfig, s.BlockChain(), s.ChainDb(), s.TxPool(), s.AccountManager()),
|
||||
Service: ethreg.NewPrivateRegistarAPI(s.chainConfig, s.blockchain, s.chainDb, s.txPool, s.accountManager),
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -416,6 +416,7 @@ func (s *Ethereum) Stop() error {
|
||||
s.blockchain.Stop()
|
||||
s.protocolManager.Stop()
|
||||
s.txPool.Stop()
|
||||
s.miner.Stop()
|
||||
s.eventMux.Stop()
|
||||
|
||||
s.StopAutoDAG()
|
||||
|
110
eth/bind.go
Normal file
110
eth/bind.go
Normal file
@ -0,0 +1,110 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// ContractBackend implements bind.ContractBackend with direct calls to Ethereum
|
||||
// internals to support operating on contracts within subprotocols like eth and
|
||||
// swarm.
|
||||
//
|
||||
// Internally this backend uses the already exposed API endpoints of the Ethereum
|
||||
// object. These should be rewritten to internal Go method calls when the Go API
|
||||
// is refactored to support a clean library use.
|
||||
type ContractBackend struct {
|
||||
eapi *PublicEthereumAPI // Wrapper around the Ethereum object to access metadata
|
||||
bcapi *PublicBlockChainAPI // Wrapper around the blockchain to access chain data
|
||||
txapi *PublicTransactionPoolAPI // Wrapper around the transaction pool to access transaction data
|
||||
}
|
||||
|
||||
// NewContractBackend creates a new native contract backend using an existing
|
||||
// Etheruem object.
|
||||
func NewContractBackend(eth *Ethereum) *ContractBackend {
|
||||
return &ContractBackend{
|
||||
eapi: NewPublicEthereumAPI(eth),
|
||||
bcapi: NewPublicBlockChainAPI(eth.chainConfig, eth.blockchain, eth.miner, eth.chainDb, eth.gpo, eth.eventMux, eth.accountManager),
|
||||
txapi: NewPublicTransactionPoolAPI(eth),
|
||||
}
|
||||
}
|
||||
|
||||
// ContractCall implements bind.ContractCaller executing an Ethereum contract
|
||||
// call with the specified data as the input. The pending flag requests execution
|
||||
// against the pending block, not the stable head of the chain.
|
||||
func (b *ContractBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) {
|
||||
// Convert the input args to the API spec
|
||||
args := CallArgs{
|
||||
To: &contract,
|
||||
Data: common.ToHex(data),
|
||||
}
|
||||
block := rpc.LatestBlockNumber
|
||||
if pending {
|
||||
block = rpc.PendingBlockNumber
|
||||
}
|
||||
// Execute the call and convert the output back to Go types
|
||||
out, err := b.bcapi.Call(args, block)
|
||||
if err == errNoCode {
|
||||
err = bind.ErrNoCode
|
||||
}
|
||||
return common.FromHex(out), err
|
||||
}
|
||||
|
||||
// PendingAccountNonce implements bind.ContractTransactor retrieving the current
|
||||
// pending nonce associated with an account.
|
||||
func (b *ContractBackend) PendingAccountNonce(account common.Address) (uint64, error) {
|
||||
out, err := b.txapi.GetTransactionCount(account, rpc.PendingBlockNumber)
|
||||
return out.Uint64(), err
|
||||
}
|
||||
|
||||
// SuggestGasPrice implements bind.ContractTransactor retrieving the currently
|
||||
// suggested gas price to allow a timely execution of a transaction.
|
||||
func (b *ContractBackend) SuggestGasPrice() (*big.Int, error) {
|
||||
return b.eapi.GasPrice(), nil
|
||||
}
|
||||
|
||||
// EstimateGasLimit implements bind.ContractTransactor triing to estimate the gas
|
||||
// needed to execute a specific transaction based on the current pending state of
|
||||
// the backend blockchain. There is no guarantee that this is the true gas limit
|
||||
// requirement as other transactions may be added or removed by miners, but it
|
||||
// should provide a basis for setting a reasonable default.
|
||||
func (b *ContractBackend) EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error) {
|
||||
out, err := b.bcapi.EstimateGas(CallArgs{
|
||||
From: sender,
|
||||
To: contract,
|
||||
Value: *rpc.NewHexNumber(value),
|
||||
Data: common.ToHex(data),
|
||||
})
|
||||
if err == errNoCode {
|
||||
err = bind.ErrNoCode
|
||||
}
|
||||
return out.BigInt(), err
|
||||
}
|
||||
|
||||
// SendTransaction implements bind.ContractTransactor injects the transaction
|
||||
// into the pending pool for execution.
|
||||
func (b *ContractBackend) SendTransaction(tx *types.Transaction) error {
|
||||
raw, _ := rlp.EncodeToBytes(tx)
|
||||
_, err := b.txapi.SendRawTransaction(common.ToHex(raw))
|
||||
return err
|
||||
}
|
@ -34,6 +34,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
@ -45,6 +46,8 @@ var (
|
||||
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
|
||||
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
|
||||
|
||||
MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
|
||||
|
||||
hashTTL = 3 * time.Second // [eth/61] Time it takes for a hash request to time out
|
||||
blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request
|
||||
blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
|
||||
@ -79,6 +82,7 @@ var (
|
||||
errEmptyHeaderSet = errors.New("empty header set by peer")
|
||||
errPeersUnavailable = errors.New("no peers available or all tried for download")
|
||||
errAlreadyInPool = errors.New("hash already in pool")
|
||||
errInvalidAncestor = errors.New("retrieved ancestor is invalid")
|
||||
errInvalidChain = errors.New("retrieved hash chain is invalid")
|
||||
errInvalidBlock = errors.New("retrieved block is invalid")
|
||||
errInvalidBody = errors.New("retrieved block body is invalid")
|
||||
@ -266,7 +270,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
|
||||
case errBusy:
|
||||
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
||||
|
||||
case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidChain:
|
||||
case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidAncestor, errInvalidChain:
|
||||
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
|
||||
d.dropPeer(id)
|
||||
|
||||
@ -353,7 +357,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origin, err := d.findAncestor61(p)
|
||||
origin, err := d.findAncestor61(p, latest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -380,7 +384,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origin, err := d.findAncestor(p)
|
||||
origin, err := d.findAncestor(p, latest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -536,11 +540,19 @@ func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
|
||||
// on the correct chain, checking the top N blocks should already get us a match.
|
||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
||||
// the head blocks match), we do a binary search to find the common ancestor.
|
||||
func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
|
||||
func (d *Downloader) findAncestor61(p *peer, height uint64) (uint64, error) {
|
||||
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
|
||||
|
||||
// Request out head blocks to short circuit ancestor location
|
||||
head := d.headBlock().NumberU64()
|
||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||
floor, ceil := int64(-1), d.headBlock().NumberU64()
|
||||
if ceil >= MaxForkAncestry {
|
||||
floor = int64(ceil - MaxForkAncestry)
|
||||
}
|
||||
// Request the topmost blocks to short circuit binary ancestor lookup
|
||||
head := ceil
|
||||
if head > height {
|
||||
head = height
|
||||
}
|
||||
from := int64(head) - int64(MaxHashFetch) + 1
|
||||
if from < 0 {
|
||||
from = 0
|
||||
@ -600,11 +612,18 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
|
||||
}
|
||||
// If the head fetch already found an ancestor, return
|
||||
if !common.EmptyHash(hash) {
|
||||
if int64(number) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
|
||||
return number, nil
|
||||
}
|
||||
// Ancestor not found, we need to binary search over our chain
|
||||
start, end := uint64(0), head
|
||||
if floor > 0 {
|
||||
start = uint64(floor)
|
||||
}
|
||||
for start+1 < end {
|
||||
// Split our chain interval in two, and request the hash to cross check
|
||||
check := (start + end) / 2
|
||||
@ -660,6 +679,12 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure valid ancestry and return
|
||||
if int64(start) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
|
||||
return start, nil
|
||||
}
|
||||
|
||||
@ -961,15 +986,23 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) {
|
||||
// on the correct chain, checking the top N links should already get us a match.
|
||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
||||
// the head links match), we do a binary search to find the common ancestor.
|
||||
func (d *Downloader) findAncestor(p *peer) (uint64, error) {
|
||||
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
|
||||
|
||||
// Request our head headers to short circuit ancestor location
|
||||
head := d.headHeader().Number.Uint64()
|
||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
||||
if d.mode == FullSync {
|
||||
head = d.headBlock().NumberU64()
|
||||
ceil = d.headBlock().NumberU64()
|
||||
} else if d.mode == FastSync {
|
||||
head = d.headFastBlock().NumberU64()
|
||||
ceil = d.headFastBlock().NumberU64()
|
||||
}
|
||||
if ceil >= MaxForkAncestry {
|
||||
floor = int64(ceil - MaxForkAncestry)
|
||||
}
|
||||
// Request the topmost blocks to short circuit binary ancestor lookup
|
||||
head := ceil
|
||||
if head > height {
|
||||
head = height
|
||||
}
|
||||
from := int64(head) - int64(MaxHeaderFetch) + 1
|
||||
if from < 0 {
|
||||
@ -1040,11 +1073,18 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
|
||||
}
|
||||
// If the head fetch already found an ancestor, return
|
||||
if !common.EmptyHash(hash) {
|
||||
if int64(number) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
|
||||
return number, nil
|
||||
}
|
||||
// Ancestor not found, we need to binary search over our chain
|
||||
start, end := uint64(0), head
|
||||
if floor > 0 {
|
||||
start = uint64(floor)
|
||||
}
|
||||
for start+1 < end {
|
||||
// Split our chain interval in two, and request the hash to cross check
|
||||
check := (start + end) / 2
|
||||
@ -1100,6 +1140,12 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure valid ancestry and return
|
||||
if int64(start) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
|
||||
return start, nil
|
||||
}
|
||||
|
||||
|
@ -43,8 +43,9 @@ var (
|
||||
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
|
||||
)
|
||||
|
||||
// Reduce the block cache limit, otherwise the tests will be very heavy.
|
||||
// Reduce some of the parameters to make the tester faster.
|
||||
func init() {
|
||||
MaxForkAncestry = uint64(10000)
|
||||
blockCacheLimit = 1024
|
||||
}
|
||||
|
||||
@ -52,11 +53,15 @@ func init() {
|
||||
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
||||
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||
// reassembly.
|
||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||
// Generate the block chain
|
||||
blocks, receipts := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
|
||||
block.SetCoinbase(common.Address{seed})
|
||||
|
||||
// If a heavy chain is requested, delay blocks to raise difficulty
|
||||
if heavy {
|
||||
block.OffsetTime(-1)
|
||||
}
|
||||
// If the block number is multiple of 3, send a bonus transaction to the miner
|
||||
if parent == genesis && i%3 == 0 {
|
||||
tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
|
||||
@ -97,15 +102,19 @@ func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Recei
|
||||
|
||||
// makeChainFork creates two chains of length n, such that h1[:f] and
|
||||
// h2[:f] are different but have a common suffix of length n-f.
|
||||
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
|
||||
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
|
||||
// Create the common suffix
|
||||
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts)
|
||||
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts, false)
|
||||
|
||||
// Create the forks
|
||||
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]])
|
||||
// Create the forks, making the second heavyer if non balanced forks were requested
|
||||
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
|
||||
hashes1 = append(hashes1, hashes[1:]...)
|
||||
|
||||
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]])
|
||||
heavy := false
|
||||
if !balanced {
|
||||
heavy = true
|
||||
}
|
||||
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
|
||||
hashes2 = append(hashes2, hashes[1:]...)
|
||||
|
||||
for hash, header := range headers {
|
||||
@ -712,7 +721,7 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
@ -736,7 +745,7 @@ func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
|
||||
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a long block chain to download and the tester
|
||||
targetBlocks := 8 * blockCacheLimit
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
@ -810,20 +819,20 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Tests that simple synchronization against a forked chain works correctly. In
|
||||
// this test common ancestor lookup should *not* be short circuited, and a full
|
||||
// binary search should be executed.
|
||||
func TestForkedSynchronisation61(t *testing.T) { testForkedSynchronisation(t, 61, FullSync) }
|
||||
func TestForkedSynchronisation62(t *testing.T) { testForkedSynchronisation(t, 62, FullSync) }
|
||||
func TestForkedSynchronisation63Full(t *testing.T) { testForkedSynchronisation(t, 63, FullSync) }
|
||||
func TestForkedSynchronisation63Fast(t *testing.T) { testForkedSynchronisation(t, 63, FastSync) }
|
||||
func TestForkedSynchronisation64Full(t *testing.T) { testForkedSynchronisation(t, 64, FullSync) }
|
||||
func TestForkedSynchronisation64Fast(t *testing.T) { testForkedSynchronisation(t, 64, FastSync) }
|
||||
func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(t, 64, LightSync) }
|
||||
func TestForkedSync61(t *testing.T) { testForkedSync(t, 61, FullSync) }
|
||||
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
|
||||
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
||||
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
||||
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
|
||||
func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
|
||||
func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := MaxHashFetch, 2*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
@ -842,6 +851,40 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
|
||||
}
|
||||
|
||||
// Tests that synchronising against a much shorter but much heavyer fork works
|
||||
// corrently and is not dropped.
|
||||
func TestHeavyForkedSync61(t *testing.T) { testHeavyForkedSync(t, 61, FullSync) }
|
||||
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := MaxHashFetch, 4*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("light", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and make sure that fork is pulled too
|
||||
if err := tester.sync("heavy", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
||||
func TestInactiveDownloader61(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -856,6 +899,74 @@ func TestInactiveDownloader61(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
||||
// long dead chains.
|
||||
func TestBoundedForkedSync61(t *testing.T) { testBoundedForkedSync(t, 61, FullSync) }
|
||||
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := 13, int(MaxForkAncestry+17)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head for short but heavy forks too. These are a bit special because they
|
||||
// take different ancestor lookup paths.
|
||||
func TestBoundedHeavyForkedSync61(t *testing.T) { testBoundedHeavyForkedSync(t, 61, FullSync) }
|
||||
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := 13, int(MaxForkAncestry+17)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming block headers and
|
||||
// bodies.
|
||||
func TestInactiveDownloader62(t *testing.T) {
|
||||
@ -909,7 +1020,7 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
|
||||
if targetBlocks >= MaxHeaderFetch {
|
||||
targetBlocks = MaxHeaderFetch - 15
|
||||
}
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
@ -944,7 +1055,7 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create various peers with various parts of the chain
|
||||
targetPeers := 8
|
||||
targetBlocks := targetPeers*blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
for i := 0; i < targetPeers; i++ {
|
||||
@ -972,7 +1083,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Create peers of every type
|
||||
tester := newTester()
|
||||
@ -1010,7 +1121,7 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a block chain to download
|
||||
targetBlocks := 2*blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
@ -1063,7 +1174,7 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
|
||||
@ -1095,7 +1206,7 @@ func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 6
|
||||
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
|
||||
@ -1126,7 +1237,7 @@ func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(
|
||||
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := 3*fsHeaderSafetyNet + fsMinFullBlocks
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
|
||||
@ -1217,7 +1328,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil, false)
|
||||
|
||||
tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
|
||||
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
|
||||
@ -1247,6 +1358,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
||||
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
|
||||
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
||||
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
||||
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
|
||||
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
|
||||
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
||||
@ -1294,7 +1406,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
@ -1366,7 +1478,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a forked chain to simulate origin revertal
|
||||
common, fork := MaxHashFetch, 2*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
@ -1441,7 +1553,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
@ -1517,7 +1629,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small block chain
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
@ -1590,7 +1702,7 @@ func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64,
|
||||
|
||||
func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil, false)
|
||||
fakeHeads := []*types.Header{{}, {}, {}, {}}
|
||||
for i := 0; i < 200; i++ {
|
||||
tester := newTester()
|
||||
|
@ -164,7 +164,7 @@ func (fs *FilterSystem) filterLoop() {
|
||||
fs.filterMu.RLock()
|
||||
for _, filter := range fs.logFilters {
|
||||
if filter.LogCallback != nil && !filter.created.After(event.Time) {
|
||||
for _, removedLog := range ev.Logs {
|
||||
for _, removedLog := range filter.FilterLogs(ev.Logs) {
|
||||
filter.LogCallback(removedLog, true)
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"math"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -58,7 +59,7 @@ type blockFetcherFn func([]common.Hash) error
|
||||
type ProtocolManager struct {
|
||||
networkId int
|
||||
|
||||
fastSync bool
|
||||
fastSync uint32
|
||||
txpool txPool
|
||||
blockchain *core.BlockChain
|
||||
chaindb ethdb.Database
|
||||
@ -74,36 +75,39 @@ type ProtocolManager struct {
|
||||
minedBlockSub event.Subscription
|
||||
|
||||
// channels for fetcher, syncer, txsyncLoop
|
||||
newPeerCh chan *peer
|
||||
txsyncCh chan *txsync
|
||||
quitSync chan struct{}
|
||||
newPeerCh chan *peer
|
||||
txsyncCh chan *txsync
|
||||
quitSync chan struct{}
|
||||
noMorePeers chan struct{}
|
||||
|
||||
// wait group is used for graceful shutdowns during downloading
|
||||
// and processing
|
||||
wg sync.WaitGroup
|
||||
quit bool
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
// with the ethereum network.
|
||||
func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
networkId: networkId,
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
blockchain: blockchain,
|
||||
chaindb: chaindb,
|
||||
peers: newPeerSet(),
|
||||
newPeerCh: make(chan *peer),
|
||||
noMorePeers: make(chan struct{}),
|
||||
txsyncCh: make(chan *txsync),
|
||||
quitSync: make(chan struct{}),
|
||||
}
|
||||
// Figure out whether to allow fast sync or not
|
||||
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
|
||||
fastSync = false
|
||||
}
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
networkId: networkId,
|
||||
fastSync: fastSync,
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
blockchain: blockchain,
|
||||
chaindb: chaindb,
|
||||
peers: newPeerSet(),
|
||||
newPeerCh: make(chan *peer, 1),
|
||||
txsyncCh: make(chan *txsync),
|
||||
quitSync: make(chan struct{}),
|
||||
if fastSync {
|
||||
manager.fastSync = uint32(1)
|
||||
}
|
||||
// Initiate a sub-protocol for every implemented version we can handle
|
||||
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
|
||||
@ -120,8 +124,14 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
|
||||
Length: ProtocolLengths[i],
|
||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
peer := manager.newPeer(int(version), p, rw)
|
||||
manager.newPeerCh <- peer
|
||||
return manager.handle(peer)
|
||||
select {
|
||||
case manager.newPeerCh <- peer:
|
||||
manager.wg.Add(1)
|
||||
defer manager.wg.Done()
|
||||
return manager.handle(peer)
|
||||
case <-manager.quitSync:
|
||||
return p2p.DiscQuitting
|
||||
}
|
||||
},
|
||||
NodeInfo: func() interface{} {
|
||||
return manager.NodeInfo()
|
||||
@ -187,16 +197,25 @@ func (pm *ProtocolManager) Start() {
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) Stop() {
|
||||
// Showing a log message. During download / process this could actually
|
||||
// take between 5 to 10 seconds and therefor feedback is required.
|
||||
glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")
|
||||
|
||||
pm.quit = true
|
||||
pm.txSub.Unsubscribe() // quits txBroadcastLoop
|
||||
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
|
||||
close(pm.quitSync) // quits syncer, fetcher, txsyncLoop
|
||||
|
||||
// Wait for any process action
|
||||
// Quit the sync loop.
|
||||
// After this send has completed, no new peers will be accepted.
|
||||
pm.noMorePeers <- struct{}{}
|
||||
|
||||
// Quit fetcher, txsyncLoop.
|
||||
close(pm.quitSync)
|
||||
|
||||
// Disconnect existing sessions.
|
||||
// This also closes the gate for any new registrations on the peer set.
|
||||
// sessions which are already established but not added to pm.peers yet
|
||||
// will exit when they try to register.
|
||||
pm.peers.Close()
|
||||
|
||||
// Wait for all peer handler goroutines and the loops to come down.
|
||||
pm.wg.Wait()
|
||||
|
||||
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
|
||||
@ -662,7 +681,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case msg.Code == TxMsg:
|
||||
// Transactions arrived, parse all of them and deliver to the pool
|
||||
// Transactions arrived, make sure we have a valid chain to handle them
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
break
|
||||
}
|
||||
// Transactions can be processed, parse all of them and deliver to the pool
|
||||
var txs []*types.Transaction
|
||||
if err := msg.Decode(&txs); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
|
@ -140,14 +140,14 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
|
||||
// Start the peer on a new thread
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
pm.newPeerCh <- peer
|
||||
errc <- pm.handle(peer)
|
||||
select {
|
||||
case pm.newPeerCh <- peer:
|
||||
errc <- pm.handle(peer)
|
||||
case <-pm.quitSync:
|
||||
errc <- p2p.DiscQuitting
|
||||
}
|
||||
}()
|
||||
tp := &testPeer{
|
||||
app: app,
|
||||
net: net,
|
||||
peer: peer,
|
||||
}
|
||||
tp := &testPeer{app: app, net: net, peer: peer}
|
||||
// Execute any implicitly requested handshakes and return
|
||||
if shake {
|
||||
td, head, genesis := pm.blockchain.Status()
|
||||
|
21
eth/peer.go
21
eth/peer.go
@ -34,6 +34,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
errClosed = errors.New("peer set is closed")
|
||||
errAlreadyRegistered = errors.New("peer is already registered")
|
||||
errNotRegistered = errors.New("peer is not registered")
|
||||
)
|
||||
@ -351,8 +352,9 @@ func (p *peer) String() string {
|
||||
// peerSet represents the collection of active peers currently participating in
|
||||
// the Ethereum sub-protocol.
|
||||
type peerSet struct {
|
||||
peers map[string]*peer
|
||||
lock sync.RWMutex
|
||||
peers map[string]*peer
|
||||
lock sync.RWMutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// newPeerSet creates a new peer set to track the active participants.
|
||||
@ -368,6 +370,9 @@ func (ps *peerSet) Register(p *peer) error {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
if ps.closed {
|
||||
return errClosed
|
||||
}
|
||||
if _, ok := ps.peers[p.id]; ok {
|
||||
return errAlreadyRegistered
|
||||
}
|
||||
@ -450,3 +455,15 @@ func (ps *peerSet) BestPeer() *peer {
|
||||
}
|
||||
return bestPeer
|
||||
}
|
||||
|
||||
// Close disconnects all peers.
|
||||
// No new peers can be registered after Close has returned.
|
||||
func (ps *peerSet) Close() {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
for _, p := range ps.peers {
|
||||
p.Disconnect(p2p.DiscQuitting)
|
||||
}
|
||||
ps.closed = true
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package eth
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -148,7 +149,7 @@ func (pm *ProtocolManager) syncer() {
|
||||
// Force a sync even if not enough peers are present
|
||||
go pm.synchronise(pm.peers.BestPeer())
|
||||
|
||||
case <-pm.quitSync:
|
||||
case <-pm.noMorePeers:
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -167,18 +168,18 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||
}
|
||||
// Otherwise try to sync with the downloader
|
||||
mode := downloader.FullSync
|
||||
if pm.fastSync {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
mode = downloader.FastSync
|
||||
}
|
||||
if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil {
|
||||
return
|
||||
}
|
||||
// If fast sync was enabled, and we synced up, disable it
|
||||
if pm.fastSync {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
// Disable fast sync if we indeed have something in our chain
|
||||
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
|
||||
pm.fastSync = false
|
||||
atomic.StoreUint32(&pm.fastSync, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -29,12 +30,12 @@ import (
|
||||
func TestFastSyncDisabling(t *testing.T) {
|
||||
// Create a pristine protocol manager, check that fast sync is left enabled
|
||||
pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil)
|
||||
if !pmEmpty.fastSync {
|
||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
|
||||
t.Fatalf("fast sync disabled on pristine blockchain")
|
||||
}
|
||||
// Create a full protocol manager, check that fast sync gets disabled
|
||||
pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil)
|
||||
if pmFull.fastSync {
|
||||
if atomic.LoadUint32(&pmFull.fastSync) == 1 {
|
||||
t.Fatalf("fast sync not disabled on non-empty blockchain")
|
||||
}
|
||||
// Sync up the two peers
|
||||
@ -47,7 +48,7 @@ func TestFastSyncDisabling(t *testing.T) {
|
||||
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
|
||||
|
||||
// Check that fast sync was disabled
|
||||
if pmEmpty.fastSync {
|
||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 1 {
|
||||
t.Fatalf("fast sync not disabled after successful synchronisation")
|
||||
}
|
||||
}
|
||||
|
@ -66,6 +66,9 @@ func (mux *TypeMux) Subscribe(types ...interface{}) Subscription {
|
||||
mux.mutex.Lock()
|
||||
defer mux.mutex.Unlock()
|
||||
if mux.stopped {
|
||||
// set the status to closed so that calling Unsubscribe after this
|
||||
// call will short curuit
|
||||
sub.closed = true
|
||||
close(sub.postC)
|
||||
} else {
|
||||
if mux.subm == nil {
|
||||
|
@ -25,6 +25,14 @@ import (
|
||||
|
||||
type testEvent int
|
||||
|
||||
func TestSubCloseUnsub(t *testing.T) {
|
||||
// the point of this test is **not** to panic
|
||||
var mux TypeMux
|
||||
mux.Stop()
|
||||
sub := mux.Subscribe(int(0))
|
||||
sub.Unsubscribe()
|
||||
}
|
||||
|
||||
func TestSub(t *testing.T) {
|
||||
mux := new(TypeMux)
|
||||
defer mux.Stop()
|
||||
|
@ -51,7 +51,7 @@ type HandlerT struct {
|
||||
traceFile string
|
||||
}
|
||||
|
||||
// Verbosity sets the glog verbosity floor.
|
||||
// Verbosity sets the glog verbosity ceiling.
|
||||
// The verbosity of individual packages and source files
|
||||
// can be raised using Vmodule.
|
||||
func (*HandlerT) Verbosity(level int) {
|
||||
@ -131,14 +131,14 @@ func (h *HandlerT) StopCPUProfile() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Trace turns on tracing for nsec seconds and writes
|
||||
// GoTrace turns on tracing for nsec seconds and writes
|
||||
// trace data to file.
|
||||
func (h *HandlerT) Trace(file string, nsec uint) error {
|
||||
if err := h.StartTrace(file); err != nil {
|
||||
func (h *HandlerT) GoTrace(file string, nsec uint) error {
|
||||
if err := h.StartGoTrace(file); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(time.Duration(nsec) * time.Second)
|
||||
h.StopTrace()
|
||||
h.StopGoTrace()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ func Setup(ctx *cli.Context) error {
|
||||
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
|
||||
Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name))
|
||||
if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" {
|
||||
if err := Handler.StartTrace(traceFile); err != nil {
|
||||
if err := Handler.StartGoTrace(traceFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -114,5 +114,5 @@ func Setup(ctx *cli.Context) error {
|
||||
// respective file.
|
||||
func Exit() {
|
||||
Handler.StopCPUProfile()
|
||||
Handler.StopTrace()
|
||||
Handler.StopGoTrace()
|
||||
}
|
||||
|
@ -27,8 +27,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
)
|
||||
|
||||
// StartTrace turns on tracing, writing to the given file.
|
||||
func (h *HandlerT) StartTrace(file string) error {
|
||||
// StartGoTrace turns on tracing, writing to the given file.
|
||||
func (h *HandlerT) StartGoTrace(file string) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
if h.traceW != nil {
|
||||
@ -49,7 +49,7 @@ func (h *HandlerT) StartTrace(file string) error {
|
||||
}
|
||||
|
||||
// StopTrace stops an ongoing trace.
|
||||
func (h *HandlerT) StopTrace() error {
|
||||
func (h *HandlerT) StopGoTrace() error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
trace.Stop()
|
||||
|
@ -22,10 +22,10 @@ package debug
|
||||
|
||||
import "errors"
|
||||
|
||||
func (*HandlerT) StartTrace(string) error {
|
||||
func (*HandlerT) StartGoTrace(string) error {
|
||||
return errors.New("tracing is not supported on Go < 1.5")
|
||||
}
|
||||
|
||||
func (*HandlerT) StopTrace() error {
|
||||
func (*HandlerT) StopGoTrace() error {
|
||||
return errors.New("tracing is not supported on Go < 1.5")
|
||||
}
|
||||
|
@ -18,12 +18,13 @@
|
||||
package web3ext
|
||||
|
||||
var Modules = map[string]string{
|
||||
"txpool": TxPool_JS,
|
||||
"admin": Admin_JS,
|
||||
"eth": Eth_JS,
|
||||
"miner": Miner_JS,
|
||||
"debug": Debug_JS,
|
||||
"net": Net_JS,
|
||||
"txpool": TxPool_JS,
|
||||
"admin": Admin_JS,
|
||||
"personal": Personal_JS,
|
||||
"eth": Eth_JS,
|
||||
"miner": Miner_JS,
|
||||
"debug": Debug_JS,
|
||||
"net": Net_JS,
|
||||
}
|
||||
|
||||
const TxPool_JS = `
|
||||
@ -175,6 +176,20 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Personal_JS = `
|
||||
web3._extend({
|
||||
property: 'personal',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'importRawKey',
|
||||
call: 'personal_importRawKey',
|
||||
params: 2
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Eth_JS = `
|
||||
web3._extend({
|
||||
property: 'eth',
|
||||
@ -351,18 +366,18 @@ web3._extend({
|
||||
params: 0
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'trace',
|
||||
call: 'debug_trace',
|
||||
name: 'goTrace',
|
||||
call: 'debug_goTrace',
|
||||
params: 2
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'startTrace',
|
||||
call: 'debug_startTrace',
|
||||
name: 'startGoTrace',
|
||||
call: 'debug_startGoTrace',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'stopTrace',
|
||||
call: 'debug_stopTrace',
|
||||
name: 'stopGoTrace',
|
||||
call: 'debug_stopGoTrace',
|
||||
params: 0
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
|
@ -3911,7 +3911,12 @@ var outputSyncingFormatter = function(result) {
|
||||
result.startingBlock = utils.toDecimal(result.startingBlock);
|
||||
result.currentBlock = utils.toDecimal(result.currentBlock);
|
||||
result.highestBlock = utils.toDecimal(result.highestBlock);
|
||||
|
||||
if (result.knownStates !== undefined) {
|
||||
result.knownStates = utils.toDecimal(result.knownStates);
|
||||
}
|
||||
if (result.pulledStates !== undefined) {
|
||||
result.pulledStates = utils.toDecimal(result.pulledStates);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
|
@ -235,7 +235,14 @@ func (self *JSRE) Exec(file string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Do(func(vm *otto.Otto) { _, err = vm.Run(code) })
|
||||
var script *otto.Script
|
||||
self.Do(func(vm *otto.Otto) {
|
||||
script, err = vm.Compile(file, code)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = vm.Run(script)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -94,10 +94,13 @@ type worker struct {
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
// update loop
|
||||
mux *event.TypeMux
|
||||
events event.Subscription
|
||||
wg sync.WaitGroup
|
||||
|
||||
agents map[Agent]struct{}
|
||||
recv chan *Result
|
||||
mux *event.TypeMux
|
||||
quit chan struct{}
|
||||
pow pow.PoW
|
||||
|
||||
eth core.Backend
|
||||
@ -138,13 +141,13 @@ func newWorker(config *core.ChainConfig, coinbase common.Address, eth core.Backe
|
||||
possibleUncles: make(map[common.Hash]*types.Block),
|
||||
coinbase: coinbase,
|
||||
txQueue: make(map[common.Hash]*types.Transaction),
|
||||
quit: make(chan struct{}),
|
||||
agents: make(map[Agent]struct{}),
|
||||
fullValidation: false,
|
||||
}
|
||||
worker.events = worker.mux.Subscribe(core.ChainHeadEvent{}, core.ChainSideEvent{}, core.TxPreEvent{})
|
||||
go worker.update()
|
||||
go worker.wait()
|
||||
|
||||
go worker.wait()
|
||||
worker.commitNewWork()
|
||||
|
||||
return worker
|
||||
@ -184,9 +187,10 @@ func (self *worker) start() {
|
||||
}
|
||||
|
||||
func (self *worker) stop() {
|
||||
self.wg.Wait()
|
||||
|
||||
self.mu.Lock()
|
||||
defer self.mu.Unlock()
|
||||
|
||||
if atomic.LoadInt32(&self.mining) == 1 {
|
||||
// Stop all agents.
|
||||
for agent := range self.agents {
|
||||
@ -217,36 +221,22 @@ func (self *worker) unregister(agent Agent) {
|
||||
}
|
||||
|
||||
func (self *worker) update() {
|
||||
eventSub := self.mux.Subscribe(core.ChainHeadEvent{}, core.ChainSideEvent{}, core.TxPreEvent{})
|
||||
defer eventSub.Unsubscribe()
|
||||
|
||||
eventCh := eventSub.Chan()
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-eventCh:
|
||||
if !ok {
|
||||
// Event subscription closed, set the channel to nil to stop spinning
|
||||
eventCh = nil
|
||||
continue
|
||||
for event := range self.events.Chan() {
|
||||
// A real event arrived, process interesting content
|
||||
switch ev := event.Data.(type) {
|
||||
case core.ChainHeadEvent:
|
||||
self.commitNewWork()
|
||||
case core.ChainSideEvent:
|
||||
self.uncleMu.Lock()
|
||||
self.possibleUncles[ev.Block.Hash()] = ev.Block
|
||||
self.uncleMu.Unlock()
|
||||
case core.TxPreEvent:
|
||||
// Apply transaction to the pending state if we're not mining
|
||||
if atomic.LoadInt32(&self.mining) == 0 {
|
||||
self.currentMu.Lock()
|
||||
self.current.commitTransactions(self.mux, types.Transactions{ev.Tx}, self.gasPrice, self.chain)
|
||||
self.currentMu.Unlock()
|
||||
}
|
||||
// A real event arrived, process interesting content
|
||||
switch ev := event.Data.(type) {
|
||||
case core.ChainHeadEvent:
|
||||
self.commitNewWork()
|
||||
case core.ChainSideEvent:
|
||||
self.uncleMu.Lock()
|
||||
self.possibleUncles[ev.Block.Hash()] = ev.Block
|
||||
self.uncleMu.Unlock()
|
||||
case core.TxPreEvent:
|
||||
// Apply transaction to the pending state if we're not mining
|
||||
if atomic.LoadInt32(&self.mining) == 0 {
|
||||
self.currentMu.Lock()
|
||||
self.current.commitTransactions(self.mux, types.Transactions{ev.Tx}, self.gasPrice, self.chain)
|
||||
self.currentMu.Unlock()
|
||||
}
|
||||
}
|
||||
case <-self.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
12
node/api.go
12
node/api.go
@ -68,7 +68,11 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *rpc.HexNumber, cors *st
|
||||
}
|
||||
|
||||
if host == nil {
|
||||
host = &api.node.httpHost
|
||||
h := common.DefaultHTTPHost
|
||||
if api.node.httpHost != "" {
|
||||
h = api.node.httpHost
|
||||
}
|
||||
host = &h
|
||||
}
|
||||
if port == nil {
|
||||
port = rpc.NewHexNumber(api.node.httpPort)
|
||||
@ -113,7 +117,11 @@ func (api *PrivateAdminAPI) StartWS(host *string, port *rpc.HexNumber, allowedOr
|
||||
}
|
||||
|
||||
if host == nil {
|
||||
host = &api.node.wsHost
|
||||
h := common.DefaultWSHost
|
||||
if api.node.wsHost != "" {
|
||||
h = api.node.wsHost
|
||||
}
|
||||
host = &h
|
||||
}
|
||||
if port == nil {
|
||||
port = rpc.NewHexNumber(api.node.wsPort)
|
||||
|
@ -311,7 +311,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
||||
glog.V(logger.Error).Infof("IPC accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation | rpc.OptionSubscriptions)
|
||||
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
|
||||
}
|
||||
}()
|
||||
// All listeners booted successfully
|
||||
|
@ -68,7 +68,7 @@ type ServiceConstructor func(ctx *ServiceContext) (Service, error)
|
||||
// - Restart logic is not required as the node will create a fresh instance
|
||||
// every time a service is started.
|
||||
type Service interface {
|
||||
// Protocol retrieves the P2P protocols the service wishes to start.
|
||||
// Protocols retrieves the P2P protocols the service wishes to start.
|
||||
Protocols() []p2p.Protocol
|
||||
|
||||
// APIs retrieves the list of RPC descriptors the service provides
|
||||
|
@ -25,6 +25,7 @@ package discover
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
@ -457,6 +458,9 @@ func (tab *Table) bondall(nodes []*Node) (result []*Node) {
|
||||
// If pinged is true, the remote node has just pinged us and one half
|
||||
// of the process can be skipped.
|
||||
func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) (*Node, error) {
|
||||
if id == tab.self.ID {
|
||||
return nil, errors.New("is self")
|
||||
}
|
||||
// Retrieve a previously known node and any recent findnode failures
|
||||
node, fails := tab.db.node(id), 0
|
||||
if node != nil {
|
||||
|
@ -398,12 +398,11 @@ type dialer interface {
|
||||
func (srv *Server) run(dialstate dialer) {
|
||||
defer srv.loopWG.Done()
|
||||
var (
|
||||
peers = make(map[discover.NodeID]*Peer)
|
||||
trusted = make(map[discover.NodeID]bool, len(srv.TrustedNodes))
|
||||
|
||||
tasks []task
|
||||
pendingTasks []task
|
||||
peers = make(map[discover.NodeID]*Peer)
|
||||
trusted = make(map[discover.NodeID]bool, len(srv.TrustedNodes))
|
||||
taskdone = make(chan task, maxActiveDialTasks)
|
||||
runningTasks []task
|
||||
queuedTasks []task // tasks that can't run yet
|
||||
)
|
||||
// Put trusted nodes into a map to speed up checks.
|
||||
// Trusted peers are loaded on startup and cannot be
|
||||
@ -412,39 +411,39 @@ func (srv *Server) run(dialstate dialer) {
|
||||
trusted[n.ID] = true
|
||||
}
|
||||
|
||||
// Some task list helpers.
|
||||
// removes t from runningTasks
|
||||
delTask := func(t task) {
|
||||
for i := range tasks {
|
||||
if tasks[i] == t {
|
||||
tasks = append(tasks[:i], tasks[i+1:]...)
|
||||
for i := range runningTasks {
|
||||
if runningTasks[i] == t {
|
||||
runningTasks = append(runningTasks[:i], runningTasks[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
scheduleTasks := func(new []task) {
|
||||
pt := append(pendingTasks, new...)
|
||||
start := maxActiveDialTasks - len(tasks)
|
||||
if len(pt) < start {
|
||||
start = len(pt)
|
||||
// starts until max number of active tasks is satisfied
|
||||
startTasks := func(ts []task) (rest []task) {
|
||||
i := 0
|
||||
for ; len(runningTasks) < maxActiveDialTasks && i < len(ts); i++ {
|
||||
t := ts[i]
|
||||
glog.V(logger.Detail).Infoln("new task:", t)
|
||||
go func() { t.Do(srv); taskdone <- t }()
|
||||
runningTasks = append(runningTasks, t)
|
||||
}
|
||||
if start > 0 {
|
||||
tasks = append(tasks, pt[:start]...)
|
||||
for _, t := range pt[:start] {
|
||||
t := t
|
||||
glog.V(logger.Detail).Infoln("new task:", t)
|
||||
go func() { t.Do(srv); taskdone <- t }()
|
||||
}
|
||||
copy(pt, pt[start:])
|
||||
pendingTasks = pt[:len(pt)-start]
|
||||
return ts[i:]
|
||||
}
|
||||
scheduleTasks := func() {
|
||||
// Start from queue first.
|
||||
queuedTasks = append(queuedTasks[:0], startTasks(queuedTasks)...)
|
||||
// Query dialer for new tasks and start as many as possible now.
|
||||
if len(runningTasks) < maxActiveDialTasks {
|
||||
nt := dialstate.newTasks(len(runningTasks)+len(queuedTasks), peers, time.Now())
|
||||
queuedTasks = append(queuedTasks, startTasks(nt)...)
|
||||
}
|
||||
}
|
||||
|
||||
running:
|
||||
for {
|
||||
// Query the dialer for new tasks and launch them.
|
||||
now := time.Now()
|
||||
nt := dialstate.newTasks(len(pendingTasks)+len(tasks), peers, now)
|
||||
scheduleTasks(nt)
|
||||
scheduleTasks()
|
||||
|
||||
select {
|
||||
case <-srv.quit:
|
||||
@ -466,7 +465,7 @@ running:
|
||||
// can update its state and remove it from the active
|
||||
// tasks list.
|
||||
glog.V(logger.Detail).Infoln("<-taskdone:", t)
|
||||
dialstate.taskDone(t, now)
|
||||
dialstate.taskDone(t, time.Now())
|
||||
delTask(t)
|
||||
case c := <-srv.posthandshake:
|
||||
// A connection has passed the encryption handshake so
|
||||
@ -513,7 +512,7 @@ running:
|
||||
// Wait for peers to shut down. Pending connections and tasks are
|
||||
// not handled here and will terminate soon-ish because srv.quit
|
||||
// is closed.
|
||||
glog.V(logger.Detail).Infof("ignoring %d pending tasks at spindown", len(tasks))
|
||||
glog.V(logger.Detail).Infof("ignoring %d pending tasks at spindown", len(runningTasks))
|
||||
for len(peers) > 0 {
|
||||
p := <-srv.delpeer
|
||||
glog.V(logger.Detail).Infoln("<-delpeer (spindown):", p)
|
||||
|
@ -235,6 +235,56 @@ func TestServerTaskScheduling(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// This test checks that Server doesn't drop tasks,
|
||||
// even if newTasks returns more than the maximum number of tasks.
|
||||
func TestServerManyTasks(t *testing.T) {
|
||||
alltasks := make([]task, 300)
|
||||
for i := range alltasks {
|
||||
alltasks[i] = &testTask{index: i}
|
||||
}
|
||||
|
||||
var (
|
||||
srv = &Server{quit: make(chan struct{}), ntab: fakeTable{}, running: true}
|
||||
done = make(chan *testTask)
|
||||
start, end = 0, 0
|
||||
)
|
||||
defer srv.Stop()
|
||||
srv.loopWG.Add(1)
|
||||
go srv.run(taskgen{
|
||||
newFunc: func(running int, peers map[discover.NodeID]*Peer) []task {
|
||||
start, end = end, end+maxActiveDialTasks+10
|
||||
if end > len(alltasks) {
|
||||
end = len(alltasks)
|
||||
}
|
||||
return alltasks[start:end]
|
||||
},
|
||||
doneFunc: func(tt task) {
|
||||
done <- tt.(*testTask)
|
||||
},
|
||||
})
|
||||
|
||||
doneset := make(map[int]bool)
|
||||
timeout := time.After(2 * time.Second)
|
||||
for len(doneset) < len(alltasks) {
|
||||
select {
|
||||
case tt := <-done:
|
||||
if doneset[tt.index] {
|
||||
t.Errorf("task %d got done more than once", tt.index)
|
||||
} else {
|
||||
doneset[tt.index] = true
|
||||
}
|
||||
case <-timeout:
|
||||
t.Errorf("%d of %d tasks got done within 2s", len(doneset), len(alltasks))
|
||||
for i := 0; i < len(alltasks); i++ {
|
||||
if !doneset[i] {
|
||||
t.Logf("task %d not done", i)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type taskgen struct {
|
||||
newFunc func(running int, peers map[discover.NodeID]*Peer) []task
|
||||
doneFunc func(task)
|
||||
|
432
release/contract.go
Normal file
432
release/contract.go
Normal file
File diff suppressed because one or more lines are too long
249
release/contract.sol
Normal file
249
release/contract.sol
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// ReleaseOracle is an Ethereum contract to store the current and previous
|
||||
// versions of the go-ethereum implementation. Its goal is to allow Geth to
|
||||
// check for new releases automatically without the need to consult a central
|
||||
// repository.
|
||||
//
|
||||
// The contract takes a vote based approach on both assigning authorised signers
|
||||
// as well as signing off on new Geth releases.
|
||||
//
|
||||
// Note, when a signer is demoted, the currently pending release is auto-nuked.
|
||||
// The reason is to prevent suprises where a demotion actually tilts the votes
|
||||
// in favor of one voter party and pushing out a new release as a consequence of
|
||||
// a simple demotion.
|
||||
contract ReleaseOracle {
|
||||
// Votes is an internal data structure to count votes on a specific proposal
|
||||
struct Votes {
|
||||
address[] pass; // List of signers voting to pass a proposal
|
||||
address[] fail; // List of signers voting to fail a proposal
|
||||
}
|
||||
|
||||
// Version is the version details of a particular Geth release
|
||||
struct Version {
|
||||
uint32 major; // Major version component of the release
|
||||
uint32 minor; // Minor version component of the release
|
||||
uint32 patch; // Patch version component of the release
|
||||
bytes20 commit; // Git SHA1 commit hash of the release
|
||||
|
||||
uint64 time; // Timestamp of the release approval
|
||||
Votes votes; // Votes that passed this release
|
||||
}
|
||||
|
||||
// Oracle authorization details
|
||||
mapping(address => bool) authorised; // Set of accounts allowed to vote on updating the contract
|
||||
address[] voters; // List of addresses currently accepted as signers
|
||||
|
||||
// Various proposals being voted on
|
||||
mapping(address => Votes) authProps; // Currently running user authorization proposals
|
||||
address[] authPend; // List of addresses being voted on (map indexes)
|
||||
|
||||
Version verProp; // Currently proposed release being voted on
|
||||
Version[] releases; // All the positively voted releases
|
||||
|
||||
// isSigner is a modifier to authorize contract transactions.
|
||||
modifier isSigner() {
|
||||
if (authorised[msg.sender]) {
|
||||
_
|
||||
}
|
||||
}
|
||||
|
||||
// Constructor to assign the initial set of signers.
|
||||
function ReleaseOracle(address[] signers) {
|
||||
// If no signers were specified, assign the creator as the sole signer
|
||||
if (signers.length == 0) {
|
||||
authorised[msg.sender] = true;
|
||||
voters.push(msg.sender);
|
||||
return;
|
||||
}
|
||||
// Otherwise assign the individual signers one by one
|
||||
for (uint i = 0; i < signers.length; i++) {
|
||||
authorised[signers[i]] = true;
|
||||
voters.push(signers[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// signers is an accessor method to retrieve all te signers (public accessor
|
||||
// generates an indexed one, not a retreive-all version).
|
||||
function signers() constant returns(address[]) {
|
||||
return voters;
|
||||
}
|
||||
|
||||
// authProposals retrieves the list of addresses that authorization proposals
|
||||
// are currently being voted on.
|
||||
function authProposals() constant returns(address[]) {
|
||||
return authPend;
|
||||
}
|
||||
|
||||
// authVotes retrieves the current authorization votes for a particular user
|
||||
// to promote him into the list of signers, or demote him from there.
|
||||
function authVotes(address user) constant returns(address[] promote, address[] demote) {
|
||||
return (authProps[user].pass, authProps[user].fail);
|
||||
}
|
||||
|
||||
// currentVersion retrieves the semantic version, commit hash and release time
|
||||
// of the currently votec active release.
|
||||
function currentVersion() constant returns (uint32 major, uint32 minor, uint32 patch, bytes20 commit, uint time) {
|
||||
if (releases.length == 0) {
|
||||
return (0, 0, 0, 0, 0);
|
||||
}
|
||||
var release = releases[releases.length - 1];
|
||||
|
||||
return (release.major, release.minor, release.patch, release.commit, release.time);
|
||||
}
|
||||
|
||||
// proposedVersion retrieves the semantic version, commit hash and the current
|
||||
// votes for the next proposed release.
|
||||
function proposedVersion() constant returns (uint32 major, uint32 minor, uint32 patch, bytes20 commit, address[] pass, address[] fail) {
|
||||
return (verProp.major, verProp.minor, verProp.patch, verProp.commit, verProp.votes.pass, verProp.votes.fail);
|
||||
}
|
||||
|
||||
// promote pitches in on a voting campaign to promote a new user to a signer
|
||||
// position.
|
||||
function promote(address user) {
|
||||
updateSigner(user, true);
|
||||
}
|
||||
|
||||
// demote pitches in on a voting campaign to demote an authorised user from
|
||||
// its signer position.
|
||||
function demote(address user) {
|
||||
updateSigner(user, false);
|
||||
}
|
||||
|
||||
// release votes for a particular version to be included as the next release.
|
||||
function release(uint32 major, uint32 minor, uint32 patch, bytes20 commit) {
|
||||
updateRelease(major, minor, patch, commit, true);
|
||||
}
|
||||
|
||||
// nuke votes for the currently proposed version to not be included as the next
|
||||
// release. Nuking doesn't require a specific version number for simplicity.
|
||||
function nuke() {
|
||||
updateRelease(0, 0, 0, 0, false);
|
||||
}
|
||||
|
||||
// updateSigner marks a vote for changing the status of an Ethereum user, either
|
||||
// for or against the user being an authorised signer.
|
||||
function updateSigner(address user, bool authorize) internal isSigner {
|
||||
// Gather the current votes and ensure we don't double vote
|
||||
Votes votes = authProps[user];
|
||||
for (uint i = 0; i < votes.pass.length; i++) {
|
||||
if (votes.pass[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < votes.fail.length; i++) {
|
||||
if (votes.fail[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// If no authorization proposal is open, add the user to the index for later lookups
|
||||
if (votes.pass.length == 0 && votes.fail.length == 0) {
|
||||
authPend.push(user);
|
||||
}
|
||||
// Cast the vote and return if the proposal cannot be resolved yet
|
||||
if (authorize) {
|
||||
votes.pass.push(msg.sender);
|
||||
if (votes.pass.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
votes.fail.push(msg.sender);
|
||||
if (votes.fail.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Proposal resolved in our favor, execute whatever we voted on
|
||||
if (authorize && !authorised[user]) {
|
||||
authorised[user] = true;
|
||||
voters.push(user);
|
||||
} else if (!authorize && authorised[user]) {
|
||||
authorised[user] = false;
|
||||
|
||||
for (i = 0; i < voters.length; i++) {
|
||||
if (voters[i] == user) {
|
||||
voters[i] = voters[voters.length - 1];
|
||||
voters.length--;
|
||||
|
||||
delete verProp; // Nuke any version proposal (no suprise releases!)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finally delete the resolved proposal, index and garbage collect
|
||||
delete authProps[user];
|
||||
|
||||
for (i = 0; i < authPend.length; i++) {
|
||||
if (authPend[i] == user) {
|
||||
authPend[i] = authPend[authPend.length - 1];
|
||||
authPend.length--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateRelease votes for a particular version to be included as the next release,
|
||||
// or for the currently proposed release to be nuked out.
|
||||
function updateRelease(uint32 major, uint32 minor, uint32 patch, bytes20 commit, bool release) internal isSigner {
|
||||
// Skip nuke votes if no proposal is pending
|
||||
if (!release && verProp.votes.pass.length == 0) {
|
||||
return;
|
||||
}
|
||||
// Mark a new release if no proposal is pending
|
||||
if (verProp.votes.pass.length == 0) {
|
||||
verProp.major = major;
|
||||
verProp.minor = minor;
|
||||
verProp.patch = patch;
|
||||
verProp.commit = commit;
|
||||
}
|
||||
// Make sure positive votes match the current proposal
|
||||
if (release && (verProp.major != major || verProp.minor != minor || verProp.patch != patch || verProp.commit != commit)) {
|
||||
return;
|
||||
}
|
||||
// Gather the current votes and ensure we don't double vote
|
||||
Votes votes = verProp.votes;
|
||||
for (uint i = 0; i < votes.pass.length; i++) {
|
||||
if (votes.pass[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < votes.fail.length; i++) {
|
||||
if (votes.fail[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Cast the vote and return if the proposal cannot be resolved yet
|
||||
if (release) {
|
||||
votes.pass.push(msg.sender);
|
||||
if (votes.pass.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
votes.fail.push(msg.sender);
|
||||
if (votes.fail.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Proposal resolved in our favor, execute whatever we voted on
|
||||
if (release) {
|
||||
verProp.time = uint64(now);
|
||||
releases.push(verProp);
|
||||
delete verProp;
|
||||
} else {
|
||||
delete verProp;
|
||||
}
|
||||
}
|
||||
}
|
374
release/contract_test.go
Normal file
374
release/contract_test.go
Normal file
@ -0,0 +1,374 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package release
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// setupReleaseTest creates a blockchain simulator and deploys a version oracle
|
||||
// contract for testing.
|
||||
func setupReleaseTest(t *testing.T, prefund ...*ecdsa.PrivateKey) (*ecdsa.PrivateKey, *ReleaseOracle, *backends.SimulatedBackend) {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
|
||||
accounts := []core.GenesisAccount{{Address: auth.From, Balance: big.NewInt(10000000000)}}
|
||||
for _, key := range prefund {
|
||||
accounts = append(accounts, core.GenesisAccount{Address: crypto.PubkeyToAddress(key.PublicKey), Balance: big.NewInt(10000000000)})
|
||||
}
|
||||
sim := backends.NewSimulatedBackend(accounts...)
|
||||
|
||||
// Deploy a version oracle contract, commit and return
|
||||
_, _, oracle, err := DeployReleaseOracle(auth, sim, []common.Address{auth.From})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy version contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
return key, oracle, sim
|
||||
}
|
||||
|
||||
// Tests that the version contract can be deployed and the creator is assigned
|
||||
// the sole authorized signer.
|
||||
func TestContractCreation(t *testing.T) {
|
||||
key, oracle, _ := setupReleaseTest(t)
|
||||
|
||||
owner := crypto.PubkeyToAddress(key.PublicKey)
|
||||
signers, err := oracle.Signers(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve list of signers: %v", err)
|
||||
}
|
||||
if len(signers) != 1 || signers[0] != owner {
|
||||
t.Fatalf("Initial signer mismatch: have %v, want %v", signers, owner)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that subsequent signers can be promoted, each requiring half plus one
|
||||
// votes for it to pass through.
|
||||
func TestSignerPromotion(t *testing.T) {
|
||||
// Prefund a few accounts to authorize with and create the oracle
|
||||
keys := make([]*ecdsa.PrivateKey, 5)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
key, oracle, sim := setupReleaseTest(t, keys...)
|
||||
|
||||
// Gradually promote the keys, until all are authorized
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
// Check that no votes are accepted from the not yet authed user
|
||||
if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[i]), common.Address{}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed invalid promotion attempt: %v", i, err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
pend, err := oracle.AuthProposals(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve active proposals: %v", i, err)
|
||||
}
|
||||
if len(pend) != 0 {
|
||||
t.Fatalf("Iter #%d: proposal count mismatch: have %d, want 0", i, len(pend))
|
||||
}
|
||||
// Promote with half - 1 voters and check that the user's not yet authorized
|
||||
for j := 0; j < i/2; j++ {
|
||||
if _, err = oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
signers, err := oracle.Signers(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve list of signers: %v", i, err)
|
||||
}
|
||||
if len(signers) != i {
|
||||
t.Fatalf("Iter #%d: signer count mismatch: have %v, want %v", i, len(signers), i)
|
||||
}
|
||||
// Promote with the last one needed to pass the promotion
|
||||
if _, err = oracle.Promote(bind.NewKeyedTransactor(keys[i/2]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid promotion completion attempt: %v", i, err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
signers, err = oracle.Signers(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve list of signers: %v", i, err)
|
||||
}
|
||||
if len(signers) != i+1 {
|
||||
t.Fatalf("Iter #%d: signer count mismatch: have %v, want %v", i, len(signers), i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that subsequent signers can be demoted, each requiring half plus one
|
||||
// votes for it to pass through.
|
||||
func TestSignerDemotion(t *testing.T) {
|
||||
// Prefund a few accounts to authorize with and create the oracle
|
||||
keys := make([]*ecdsa.PrivateKey, 5)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
key, oracle, sim := setupReleaseTest(t, keys...)
|
||||
|
||||
// Authorize all the keys as valid signers and verify cardinality
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
for j := 0; j <= i/2; j++ {
|
||||
if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
}
|
||||
signers, err := oracle.Signers(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve list of signers: %v", err)
|
||||
}
|
||||
if len(signers) != len(keys) {
|
||||
t.Fatalf("Signer count mismatch: have %v, want %v", len(signers), len(keys))
|
||||
}
|
||||
// Gradually demote users until we run out of signers
|
||||
for i := len(keys) - 1; i >= 0; i-- {
|
||||
// Demote with half - 1 voters and check that the user's not yet dropped
|
||||
for j := 0; j < (i+1)/2; j++ {
|
||||
if _, err = oracle.Demote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid demotion attempt: %v", len(keys)-i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
signers, err := oracle.Signers(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve list of signers: %v", len(keys)-i, err)
|
||||
}
|
||||
if len(signers) != i+1 {
|
||||
t.Fatalf("Iter #%d: signer count mismatch: have %v, want %v", len(keys)-i, len(signers), i+1)
|
||||
}
|
||||
// Demote with the last one needed to pass the demotion
|
||||
if _, err = oracle.Demote(bind.NewKeyedTransactor(keys[(i+1)/2]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid demotion completion attempt: %v", i, err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
signers, err = oracle.Signers(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve list of signers: %v", len(keys)-i, err)
|
||||
}
|
||||
if len(signers) != i {
|
||||
t.Fatalf("Iter #%d: signer count mismatch: have %v, want %v", len(keys)-i, len(signers), i)
|
||||
}
|
||||
// Check that no votes are accepted from the already demoted users
|
||||
if _, err = oracle.Promote(bind.NewKeyedTransactor(keys[i]), common.Address{}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed invalid promotion attempt: %v", i, err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
pend, err := oracle.AuthProposals(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve active proposals: %v", i, err)
|
||||
}
|
||||
if len(pend) != 0 {
|
||||
t.Fatalf("Iter #%d: proposal count mismatch: have %d, want 0", i, len(pend))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that new versions can be released, honouring both voting rights as well
|
||||
// as the minimum required vote count.
|
||||
func TestVersionRelease(t *testing.T) {
|
||||
// Prefund a few accounts to authorize with and create the oracle
|
||||
keys := make([]*ecdsa.PrivateKey, 5)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
key, oracle, sim := setupReleaseTest(t, keys...)
|
||||
|
||||
// Track the "current release"
|
||||
var (
|
||||
verMajor = uint32(0)
|
||||
verMinor = uint32(0)
|
||||
verPatch = uint32(0)
|
||||
verCommit = [20]byte{}
|
||||
)
|
||||
// Gradually push releases, always requiring more signers than previously
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
// Check that no votes are accepted from the not yet authed user
|
||||
if _, err := oracle.Release(bind.NewKeyedTransactor(keys[i]), 0, 0, 0, [20]byte{0}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed invalid release attempt: %v", i, err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
prop, err := oracle.ProposedVersion(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve active proposal: %v", i, err)
|
||||
}
|
||||
if len(prop.Pass) != 0 {
|
||||
t.Fatalf("Iter #%d: proposal vote count mismatch: have %d, want 0", i, len(prop.Pass))
|
||||
}
|
||||
// Authorize the user to make releases
|
||||
for j := 0; j <= i/2; j++ {
|
||||
if _, err = oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
// Propose release with half voters and check that the release does not yet go through
|
||||
for j := 0; j < (i+1)/2; j++ {
|
||||
if _, err = oracle.Release(bind.NewKeyedTransactor(keys[j]), uint32(i), uint32(i+1), uint32(i+2), [20]byte{byte(i + 3)}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid release attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
ver, err := oracle.CurrentVersion(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve current version: %v", i, err)
|
||||
}
|
||||
if ver.Major != verMajor || ver.Minor != verMinor || ver.Patch != verPatch || ver.Commit != verCommit {
|
||||
t.Fatalf("Iter #%d: version mismatch: have %d.%d.%d-%x, want %d.%d.%d-%x", i, ver.Major, ver.Minor, ver.Patch, ver.Commit, verMajor, verMinor, verPatch, verCommit)
|
||||
}
|
||||
|
||||
// Pass the release and check that it became the next version
|
||||
verMajor, verMinor, verPatch, verCommit = uint32(i), uint32(i+1), uint32(i+2), [20]byte{byte(i + 3)}
|
||||
if _, err = oracle.Release(bind.NewKeyedTransactor(keys[(i+1)/2]), uint32(i), uint32(i+1), uint32(i+2), [20]byte{byte(i + 3)}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid release completion attempt: %v", i, err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
ver, err = oracle.CurrentVersion(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve current version: %v", i, err)
|
||||
}
|
||||
if ver.Major != verMajor || ver.Minor != verMinor || ver.Patch != verPatch || ver.Commit != verCommit {
|
||||
t.Fatalf("Iter #%d: version mismatch: have %d.%d.%d-%x, want %d.%d.%d-%x", i, ver.Major, ver.Minor, ver.Patch, ver.Commit, verMajor, verMinor, verPatch, verCommit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that proposed versions can be nuked out of existence.
|
||||
func TestVersionNuking(t *testing.T) {
|
||||
// Prefund a few accounts to authorize with and create the oracle
|
||||
keys := make([]*ecdsa.PrivateKey, 9)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
key, oracle, sim := setupReleaseTest(t, keys...)
|
||||
|
||||
// Authorize all the keys as valid signers
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
for j := 0; j <= i/2; j++ {
|
||||
if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
}
|
||||
// Propose releases with more and more keys, always retaining enough users to nuke the proposals
|
||||
for i := 1; i < (len(keys)+1)/2; i++ {
|
||||
// Propose release with an initial set of signers
|
||||
for j := 0; j < i; j++ {
|
||||
if _, err := oracle.Release(bind.NewKeyedTransactor(keys[j]), uint32(i), uint32(i+1), uint32(i+2), [20]byte{byte(i + 3)}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid proposal attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
prop, err := oracle.ProposedVersion(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve active proposal: %v", i, err)
|
||||
}
|
||||
if len(prop.Pass) != i {
|
||||
t.Fatalf("Iter #%d: proposal vote count mismatch: have %d, want %d", i, len(prop.Pass), i)
|
||||
}
|
||||
// Nuke the release with half+1 voters
|
||||
for j := i; j <= i+(len(keys)+1)/2; j++ {
|
||||
if _, err := oracle.Nuke(bind.NewKeyedTransactor(keys[j])); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid nuke attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
prop, err = oracle.ProposedVersion(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Iter #%d: failed to retrieve active proposal: %v", i, err)
|
||||
}
|
||||
if len(prop.Pass) != 0 || len(prop.Fail) != 0 {
|
||||
t.Fatalf("Iter #%d: proposal vote count mismatch: have %d/%d pass/fail, want 0/0", i, len(prop.Pass), len(prop.Fail))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that demoting a signer will auto-nuke the currently pending release.
|
||||
func TestVersionAutoNuke(t *testing.T) {
|
||||
// Prefund a few accounts to authorize with and create the oracle
|
||||
keys := make([]*ecdsa.PrivateKey, 5)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
key, oracle, sim := setupReleaseTest(t, keys...)
|
||||
|
||||
// Authorize all the keys as valid signers
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
for j := 0; j <= i/2; j++ {
|
||||
if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
}
|
||||
// Make a release proposal and check it's existence
|
||||
if _, err := oracle.Release(bind.NewKeyedTransactor(keys[0]), 1, 2, 3, [20]byte{4}); err != nil {
|
||||
t.Fatalf("Failed valid proposal attempt: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
prop, err := oracle.ProposedVersion(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve active proposal: %v", err)
|
||||
}
|
||||
if len(prop.Pass) != 1 {
|
||||
t.Fatalf("Proposal vote count mismatch: have %d, want 1", len(prop.Pass))
|
||||
}
|
||||
// Demote a signer and check release proposal deletion
|
||||
for i := 0; i <= len(keys)/2; i++ {
|
||||
if _, err := oracle.Demote(bind.NewKeyedTransactor(keys[i]), crypto.PubkeyToAddress(keys[len(keys)-1].PublicKey)); err != nil {
|
||||
t.Fatalf("Iter #%d: failed valid demotion attempt: %v", i, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
prop, err = oracle.ProposedVersion(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve active proposal: %v", err)
|
||||
}
|
||||
if len(prop.Pass) != 0 {
|
||||
t.Fatalf("Proposal vote count mismatch: have %d, want 0", len(prop.Pass))
|
||||
}
|
||||
}
|
19
release/generator.go
Normal file
19
release/generator.go
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:generate abigen --sol ./contract.sol --pkg release --out ./contract.go
|
||||
|
||||
package release
|
147
release/release.go
Normal file
147
release/release.go
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package release contains the node service that tracks client releases.
|
||||
package release
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// Interval to check for new releases
|
||||
const releaseRecheckInterval = time.Hour
|
||||
|
||||
// Config contains the configurations of the release service.
|
||||
type Config struct {
|
||||
Oracle common.Address // Ethereum address of the release oracle
|
||||
Major uint32 // Major version component of the release
|
||||
Minor uint32 // Minor version component of the release
|
||||
Patch uint32 // Patch version component of the release
|
||||
Commit [20]byte // Git SHA1 commit hash of the release
|
||||
}
|
||||
|
||||
// ReleaseService is a node service that periodically checks the blockchain for
|
||||
// newly released versions of the client being run and issues a warning to the
|
||||
// user about it.
|
||||
type ReleaseService struct {
|
||||
config Config // Current version to check releases against
|
||||
oracle *ReleaseOracle // Native binding to the release oracle contract
|
||||
quit chan chan error // Quit channel to terminate the version checker
|
||||
}
|
||||
|
||||
// NewReleaseService creates a new service to periodically check for new client
|
||||
// releases and notify the user of such.
|
||||
func NewReleaseService(ctx *node.ServiceContext, config Config) (node.Service, error) {
|
||||
// Retrieve the Ethereum service dependency to access the blockchain
|
||||
var ethereum *eth.Ethereum
|
||||
if err := ctx.Service(ðereum); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Construct the release service
|
||||
contract, err := NewReleaseOracle(config.Oracle, eth.NewContractBackend(ethereum))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ReleaseService{
|
||||
config: config,
|
||||
oracle: contract,
|
||||
quit: make(chan chan error),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Protocols returns an empty list of P2P protocols as the release service does
|
||||
// not have a networking component.
|
||||
func (r *ReleaseService) Protocols() []p2p.Protocol { return nil }
|
||||
|
||||
// APIs returns an empty list of RPC descriptors as the release service does not
|
||||
// expose any functioanlity to the outside world.
|
||||
func (r *ReleaseService) APIs() []rpc.API { return nil }
|
||||
|
||||
// Start spawns the periodic version checker goroutine
|
||||
func (r *ReleaseService) Start(server *p2p.Server) error {
|
||||
go r.checker()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop terminates all goroutines belonging to the service, blocking until they
|
||||
// are all terminated.
|
||||
func (r *ReleaseService) Stop() error {
|
||||
errc := make(chan error)
|
||||
r.quit <- errc
|
||||
return <-errc
|
||||
}
|
||||
|
||||
// checker runs indefinitely in the background, periodically checking for new
|
||||
// client releases.
|
||||
func (r *ReleaseService) checker() {
|
||||
// Set up the timers to periodically check for releases
|
||||
timer := time.NewTimer(0) // Immediately fire a version check
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
// If the time arrived, check for a new release
|
||||
case <-timer.C:
|
||||
// Rechedule the timer before continuing
|
||||
timer.Reset(releaseRecheckInterval)
|
||||
|
||||
// Retrieve the current version, and handle missing contracts gracefully
|
||||
version, err := r.oracle.CurrentVersion(nil)
|
||||
if err != nil {
|
||||
if err == bind.ErrNoCode {
|
||||
glog.V(logger.Debug).Infof("Release oracle not found at %x", r.config.Oracle)
|
||||
continue
|
||||
}
|
||||
glog.V(logger.Error).Infof("Failed to retrieve current release: %v", err)
|
||||
continue
|
||||
}
|
||||
// Version was successfully retrieved, notify if newer than ours
|
||||
if version.Major > r.config.Major ||
|
||||
(version.Major == r.config.Major && version.Minor > r.config.Minor) ||
|
||||
(version.Major == r.config.Major && version.Minor == r.config.Minor && version.Patch > r.config.Patch) {
|
||||
|
||||
warning := fmt.Sprintf("Client v%d.%d.%d-%x seems older than the latest upstream release v%d.%d.%d-%x",
|
||||
r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4])
|
||||
howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases")
|
||||
separator := strings.Repeat("-", len(warning))
|
||||
|
||||
glog.V(logger.Warn).Info(separator)
|
||||
glog.V(logger.Warn).Info(warning)
|
||||
glog.V(logger.Warn).Info(howtofix)
|
||||
glog.V(logger.Warn).Info(separator)
|
||||
} else {
|
||||
glog.V(logger.Debug).Infof("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x",
|
||||
r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4])
|
||||
}
|
||||
|
||||
// If termination was requested, return
|
||||
case errc := <-r.quit:
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@ -61,22 +61,22 @@ func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http
|
||||
allowAllOrigins = true
|
||||
}
|
||||
if origin != "" {
|
||||
origins.Add(origin)
|
||||
origins.Add(strings.ToLower(origin))
|
||||
}
|
||||
}
|
||||
|
||||
// allow localhost if no allowedOrigins are specified
|
||||
// allow localhost if no allowedOrigins are specified.
|
||||
if len(origins.List()) == 0 {
|
||||
origins.Add("http://localhost")
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
origins.Add("http://" + hostname)
|
||||
origins.Add("http://" + strings.ToLower(hostname))
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(logger.Debug).Infof("Allowed origin(s) for WS RPC interface %v\n", origins.List())
|
||||
|
||||
f := func(cfg *websocket.Config, req *http.Request) error {
|
||||
origin := req.Header.Get("Origin")
|
||||
origin := strings.ToLower(req.Header.Get("Origin"))
|
||||
if allowAllOrigins || origins.Has(origin) {
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user