Compare commits
116 Commits
Author | SHA1 | Date | |
---|---|---|---|
0f036f6209 | |||
71a89b7c75 | |||
ecb8e23e88 | |||
058c5fe960 | |||
a29bdf547c | |||
44b912ec64 | |||
3d69970c15 | |||
8b90a49f3d | |||
c046126c87 | |||
cd134178f7 | |||
4918c820c6 | |||
5904d58a96 | |||
7c90a2e42e | |||
c39de61a0a | |||
af53767e16 | |||
7632acf6b4 | |||
9ccb70da7b | |||
8fefee7132 | |||
7a4073a758 | |||
ab522d3bc7 | |||
c45c424073 | |||
8d2775e3d7 | |||
170036289b | |||
8ebbd9b7c7 | |||
7df36e5ec1 | |||
5fb29fd45f | |||
90beb6112e | |||
efa2b3da7e | |||
e3b3c298df | |||
3752507a11 | |||
a269a713d6 | |||
27df30f30f | |||
311f5a0ed1 | |||
68ae6b52e9 | |||
1776c717bf | |||
0f6e3e873a | |||
7a7a5acc9f | |||
66d74dfb75 | |||
b950a2977c | |||
8ea3c88e44 | |||
94ad694a26 | |||
4c6953606e | |||
fc0638f9d8 | |||
4b11f207cb | |||
7e5c49cafa | |||
efcfa2209b | |||
aa18aad7ad | |||
594328c112 | |||
2e6b9c141b | |||
b38cea6654 | |||
dd083aa34e | |||
f213a9d8e8 | |||
6826b2040f | |||
2a3657d8d9 | |||
566af6ef92 | |||
290e851f57 | |||
8f96d66241 | |||
5782164a35 | |||
27f657478f | |||
756b62988c | |||
f61e203c10 | |||
56ed6152a1 | |||
dc7f202ecd | |||
1d42061e2c | |||
b6135a72dd | |||
7d59c5c58d | |||
8aa4597c9e | |||
57ba1824ac | |||
c89f4352d0 | |||
6a00a3ade1 | |||
f821b0188a | |||
d79f2f2656 | |||
130bccc763 | |||
ae9ed5c420 | |||
a1c201a5ac | |||
844e911129 | |||
4b9de75623 | |||
2d7d7ef2fe | |||
14d5033c9d | |||
d52a693f80 | |||
258cc73ea9 | |||
79b7b5eeaa | |||
b4fbcd5060 | |||
488528e9e4 | |||
8110671960 | |||
32bb280179 | |||
4536b993ff | |||
586eddfd09 | |||
d46da273c6 | |||
ecd7199c43 | |||
1c20313a6a | |||
cfa999f006 | |||
c74a575725 | |||
9672a62a38 | |||
572da73d4d | |||
0f722df2d9 | |||
1b77d5090d | |||
e62c2aeb1b | |||
4880868c88 | |||
c3d5250473 | |||
e0dc45fce2 | |||
48cc36ce83 | |||
123aa659e4 | |||
cdcbb2f160 | |||
db62979514 | |||
5137c04ccf | |||
a20d3fc362 | |||
3d6d828caf | |||
6a543607ef | |||
c1a4dcfc87 | |||
70b8b54cd2 | |||
c88c89fd9e | |||
b06f44ecc2 | |||
87ae0df476 | |||
5127ec10cb | |||
18580e152c |
2
.github/CONTRIBUTING.md
vendored
2
.github/CONTRIBUTING.md
vendored
@ -8,7 +8,7 @@ and help.
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits who do not comply with the coding standards
|
||||
send a pull request. Commits which do not comply with the coding standards
|
||||
are ignored (use gofmt!). If you send pull requests make absolute sure that you
|
||||
commit on the `develop` branch and that you do not merge to master.
|
||||
Commits that are directly based on master are simply ignored.
|
||||
|
@ -1,10 +1,12 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4.2
|
||||
- 1.5.4
|
||||
- 1.6.2
|
||||
install:
|
||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||
# - go get github.com/golang/lint/golint
|
||||
# - go get golang.org/x/tools/cmd/vet
|
||||
# - go get golang.org/x/tools/cmd/vet
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
before_script:
|
||||
# - gofmt -l -w .
|
||||
@ -24,6 +26,6 @@ notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
|
||||
on_success: change
|
||||
on_success: change
|
||||
on_failure: always
|
||||
on_start: false
|
||||
on_start: false
|
||||
|
6
Makefile
6
Makefile
@ -13,7 +13,7 @@ GOBIN = build/bin
|
||||
GO ?= latest
|
||||
|
||||
geth:
|
||||
build/env.sh go install -v $(shell build/flags.sh) ./cmd/geth
|
||||
build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/geth ./cmd/geth
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||
|
||||
@ -103,7 +103,9 @@ evm:
|
||||
@echo "Run \"$(GOBIN)/evm to start the evm."
|
||||
|
||||
all:
|
||||
build/env.sh go install -v $(shell build/flags.sh) ./...
|
||||
for cmd in `ls ./cmd/`; do \
|
||||
build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/$$cmd ./cmd/$$cmd; \
|
||||
done
|
||||
|
||||
test: all
|
||||
build/env.sh go test ./...
|
||||
|
75
README.md
75
README.md
@ -9,7 +9,7 @@ master | [](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||
[](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
## Automated development builds
|
||||
@ -36,38 +36,55 @@ Once the dependencies are installed, run
|
||||
|
||||
make geth
|
||||
|
||||
or, to build the full suite of utilities:
|
||||
|
||||
make all
|
||||
|
||||
## Executables
|
||||
|
||||
Go Ethereum comes with several wrappers/executables found in
|
||||
[the `cmd` directory](https://github.com/ethereum/go-ethereum/tree/develop/cmd):
|
||||
The go-ethereum project comes with several wrappers/executables found in the `cmd` directory.
|
||||
|
||||
Command | |
|
||||
----------|---------|
|
||||
`geth` | Ethereum CLI (ethereum command line interface client) |
|
||||
`bootnode` | runs a bootstrap node for the Discovery Protocol |
|
||||
`ethtest` | test tool which runs with the [tests](https://github.com/ethereum/tests) suite: `/path/to/test.json > ethtest --test BlockTests --stdin`.
|
||||
`evm` | is a generic Ethereum Virtual Machine: `evm -code 60ff60ff -gas 10000 -price 0 -dump`. See `-h` for a detailed description. |
|
||||
`disasm` | disassembles EVM code: `echo "6001" | disasm` |
|
||||
`rlpdump` | prints RLP structures |
|
||||
|
||||
## Command line options
|
||||
|
||||
`geth` can be configured via command line options, environment variables and config files.
|
||||
|
||||
To get the options available:
|
||||
|
||||
geth help
|
||||
|
||||
For further details on options, see the [wiki](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)
|
||||
| Command | Description |
|
||||
|:----------:|-------------|
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as an gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. Please see our [Command Line Options](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) wiki page for details. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `disasm` | Bytecode disassembler to convert EVM (Ethereum Virtual Machine) bytecode into more user friendly assembly-like opcodes (e.g. `echo "6001" | disasm`). For details on the individual opcodes, please see pages 22-30 of the [Ethereum Yellow Paper](http://gavwood.com/paper.pdf). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow insolated, fine graned debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
|
||||
## Contribution
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits who do not comply with the coding standards
|
||||
are ignored (use gofmt!). If you send pull requests make absolute sure that you
|
||||
commit on the `develop` branch and that you do not merge to master.
|
||||
Commits that are directly based on master are simply ignored.
|
||||
Thank you for considering to help out with the source code! We welcome contributions from
|
||||
anyone on the internet, and are grateful for even the smallest of fixes!
|
||||
|
||||
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
|
||||
for the maintainers to review and merge into the main code base. If you wish to submit more
|
||||
complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum)
|
||||
to ensure those changes are in line with the general philosophy of the project and/or get some
|
||||
early feedback which can make both your efforts much lighter as well as our review and merge
|
||||
procedures quick and simple.
|
||||
|
||||
Please make sure your contributions adhere to our coding guidelines:
|
||||
|
||||
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
|
||||
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
|
||||
* Pull requests need to be based on and opened against the `develop` branch.
|
||||
* Commit messages should be prefixed with the package(s) they modify.
|
||||
* E.g. "eth, rpc: make trace configs optional"
|
||||
|
||||
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, managing project dependencies and testing procedures.
|
||||
|
||||
## License
|
||||
|
||||
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
|
||||
[GNU Lesser General Public License v3.0](http://www.gnu.org/licenses/lgpl-3.0.en.html), also
|
||||
included in our repository in the `COPYING.LESSER` file.
|
||||
|
||||
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the
|
||||
[GNU General Public License v3.0](http://www.gnu.org/licenses/gpl-3.0.en.html), also included
|
||||
in our repository in the `COPYING` file.
|
||||
|
||||
|
||||
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, testing, and
|
||||
dependency management.
|
||||
|
@ -48,42 +48,6 @@ func JSON(reader io.Reader) (ABI, error) {
|
||||
return abi, nil
|
||||
}
|
||||
|
||||
// tests, tests whether the given input would result in a successful
|
||||
// call. Checks argument list count and matches input to `input`.
|
||||
func (abi ABI) pack(method Method, args ...interface{}) ([]byte, error) {
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := method.Inputs[i]
|
||||
// pack the input
|
||||
packed, err := input.Type.pack(a)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("`%s` %v", method.Name, err)
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.T == StringTy || input.Type.T == BytesTy || input.Type.IsSlice {
|
||||
// calculate the offset
|
||||
offset := len(method.Inputs)*32 + len(variableInput)
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset), UintTy)...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
ret = append(ret, packed...)
|
||||
}
|
||||
}
|
||||
// append the variable input at the end of the packed input
|
||||
ret = append(ret, variableInput...)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Pack the given method name to conform the ABI. Method call's data
|
||||
// will consist of method_id, args0, arg1, ... argN. Method id consists
|
||||
// of 4 bytes and arguments are all 32 bytes.
|
||||
@ -102,11 +66,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
}
|
||||
method = m
|
||||
}
|
||||
// Make sure arguments match up and pack them
|
||||
if len(args) != len(method.Inputs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
|
||||
}
|
||||
arguments, err := abi.pack(method, args...)
|
||||
arguments, err := method.pack(method, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -126,18 +86,21 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
|
||||
}
|
||||
elem := t.Type.Elem
|
||||
|
||||
// first we need to create a slice of the type
|
||||
var refSlice reflect.Value
|
||||
switch t.Type.T {
|
||||
switch elem.T {
|
||||
case IntTy, UintTy, BoolTy: // int, uint, bool can all be of type big int.
|
||||
refSlice = reflect.ValueOf([]*big.Int(nil))
|
||||
case AddressTy: // address must be of slice Address
|
||||
refSlice = reflect.ValueOf([]common.Address(nil))
|
||||
case HashTy: // hash must be of slice hash
|
||||
refSlice = reflect.ValueOf([]common.Hash(nil))
|
||||
case FixedBytesTy:
|
||||
refSlice = reflect.ValueOf([]byte(nil))
|
||||
default: // no other types are supported
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", t.Type.T)
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
|
||||
}
|
||||
// get the offset which determines the start of this array ...
|
||||
offset := int(common.BytesToBig(output[index : index+32]).Uint64())
|
||||
@ -164,7 +127,7 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
)
|
||||
|
||||
// set inter to the correct type (cast)
|
||||
switch t.Type.T {
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
inter = common.BytesToBig(returnOutput)
|
||||
case BoolTy:
|
||||
@ -186,7 +149,7 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// we need to treat slices differently
|
||||
if t.Type.IsSlice {
|
||||
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy {
|
||||
return toGoSlice(i, t, output)
|
||||
}
|
||||
|
||||
@ -217,12 +180,33 @@ func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
returnOutput = output[index : index+32]
|
||||
}
|
||||
|
||||
// cast bytes to abi return type
|
||||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
case IntTy:
|
||||
return common.BytesToBig(returnOutput), nil
|
||||
case UintTy:
|
||||
return common.BytesToBig(returnOutput), nil
|
||||
case IntTy, UintTy:
|
||||
bigNum := common.BytesToBig(returnOutput)
|
||||
|
||||
// If the type is a integer convert to the integer type
|
||||
// specified by the ABI.
|
||||
switch t.Type.Kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(bigNum.Uint64()), nil
|
||||
case reflect.Uint16:
|
||||
return uint16(bigNum.Uint64()), nil
|
||||
case reflect.Uint32:
|
||||
return uint32(bigNum.Uint64()), nil
|
||||
case reflect.Uint64:
|
||||
return uint64(bigNum.Uint64()), nil
|
||||
case reflect.Int8:
|
||||
return int8(bigNum.Int64()), nil
|
||||
case reflect.Int16:
|
||||
return int16(bigNum.Int64()), nil
|
||||
case reflect.Int32:
|
||||
return int32(bigNum.Int64()), nil
|
||||
case reflect.Int64:
|
||||
return int64(bigNum.Int64()), nil
|
||||
case reflect.Ptr:
|
||||
return bigNum, nil
|
||||
}
|
||||
case BoolTy:
|
||||
return common.BytesToBig(returnOutput).Uint64() > 0, nil
|
||||
case AddressTy:
|
||||
@ -254,8 +238,16 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(v).Elem()
|
||||
typ := value.Type()
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
if len(method.Outputs) > 1 {
|
||||
switch value.Kind() {
|
||||
@ -284,6 +276,25 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
|
||||
}
|
||||
|
||||
// if the slice already contains values, set those instead of the interface slice itself.
|
||||
if value.Len() > 0 {
|
||||
if len(method.Outputs) > value.Len() {
|
||||
return fmt.Errorf("abi: cannot marshal in to slices of unequal size (require: %v, got: %v)", len(method.Outputs), value.Len())
|
||||
}
|
||||
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(value.Index(i).Elem(), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a new slice and start appending the unmarshalled
|
||||
// values to the new interface slice.
|
||||
z := reflect.MakeSlice(typ, 0, len(method.Outputs))
|
||||
@ -312,32 +323,6 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// set attempts to assign src to dst by either setting, copying or otherwise.
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if !dstType.Elem().AssignableTo(r_byte) {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
|
||||
}
|
||||
|
||||
if dst.Len() < output.Type.Size {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.Size, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
|
@ -29,66 +29,422 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// formatSilceOutput add padding to the value and adds a size
|
||||
func formatSliceOutput(v ...[]byte) []byte {
|
||||
off := common.LeftPadBytes(big.NewInt(int64(len(v))).Bytes(), 32)
|
||||
output := append(off, make([]byte, 0, len(v)*32)...)
|
||||
|
||||
for _, value := range v {
|
||||
output = append(output, common.LeftPadBytes(value, 32)...)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// quick helper padding
|
||||
func pad(input []byte, size int, left bool) []byte {
|
||||
if left {
|
||||
return common.LeftPadBytes(input, size)
|
||||
}
|
||||
return common.RightPadBytes(input, size)
|
||||
}
|
||||
|
||||
func TestTypeCheck(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), ""},
|
||||
{"int", big.NewInt(1), ""},
|
||||
{"uint30", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type []uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"address[]", []common.Address{common.Address{1}}, ""},
|
||||
{"address[1]", []common.Address{common.Address{1}}, ""},
|
||||
{"address[1]", [1]common.Address{common.Address{1}}, ""},
|
||||
{"address[2]", [1]common.Address{common.Address{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, ""},
|
||||
{"bytes", common.Hash{1}, ""},
|
||||
{"string", "hello world", ""},
|
||||
{"bytes32[]", [][32]byte{[32]byte{}}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
err = typeCheck(typ, reflect.ValueOf(test.input))
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMethodUnpack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
def string // definition of the **output** ABI params
|
||||
marshalledOutput []byte // evm return data
|
||||
expectedOut interface{} // the expected output
|
||||
outVar string // the output variable (e.g. uint32, *big.Int, etc)
|
||||
err string // empty or error if expected
|
||||
}{
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
uint32(1),
|
||||
"uint32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
int32(1),
|
||||
"int32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "address" } ]`,
|
||||
pad(pad([]byte{1}, 20, false), 32, true),
|
||||
common.Address{1},
|
||||
"address",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"bytes",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"hash",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"interface",
|
||||
"",
|
||||
},
|
||||
} {
|
||||
abiDefinition := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(abiDefinition))
|
||||
if err != nil {
|
||||
t.Errorf("%d failed. %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var outvar interface{}
|
||||
switch test.outVar {
|
||||
case "uint8":
|
||||
var v uint8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint16":
|
||||
var v uint16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint32":
|
||||
var v uint32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint64":
|
||||
var v uint64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int8":
|
||||
var v int8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int16":
|
||||
var v int16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int32":
|
||||
var v int32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int64":
|
||||
var v int64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "*big.Int":
|
||||
var v *big.Int
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "address":
|
||||
var v common.Address
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "bytes":
|
||||
var v []byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "hash":
|
||||
var v common.Hash
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "interface":
|
||||
err = abi.Unpack(&outvar, "method", test.marshalledOutput)
|
||||
default:
|
||||
t.Errorf("unsupported type '%v' please add it to the switch statement in this test", test.outVar)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// bit of an ugly hack for hash type but I don't feel like finding a proper solution
|
||||
if test.outVar == "hash" {
|
||||
tmp := outvar.(common.Hash) // without assignment it's unaddressable
|
||||
outvar = tmp[:]
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedOut, outvar) {
|
||||
t.Errorf("%d failed. Output error: expected %v, got %v", i, test.expectedOut, outvar)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
||||
input interface{}
|
||||
output []byte
|
||||
}{
|
||||
{"uint16", uint16(2), pad([]byte{2}, 32, true)},
|
||||
{"uint16[]", []uint16{1, 2}, formatSliceOutput([]byte{1}, []byte{2})},
|
||||
{"bytes20", [20]byte{1}, pad([]byte{1}, 32, false)},
|
||||
{"uint256[]", []*big.Int{big.NewInt(1), big.NewInt(2)}, formatSliceOutput([]byte{1}, []byte{2})},
|
||||
{"address[]", []common.Address{common.Address{1}, common.Address{2}}, formatSliceOutput(pad([]byte{1}, 20, false), pad([]byte{2}, 20, false))},
|
||||
{"bytes32[]", []common.Hash{common.Hash{1}, common.Hash{2}}, formatSliceOutput(pad([]byte{1}, 32, false), pad([]byte{2}, 32, false))},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatal("unexpected pack error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMethodPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sig := abi.Methods["slice"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err := abi.Pack("slice", []uint32{1, 2})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||
sig = abi.Methods["sliceAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||
sig = abi.Methods["sliceMultiAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = abi.Methods["slice256"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
const jsondata = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "const" : true },
|
||||
{ "type" : "function", "name" : "send", "const" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
|
||||
]`
|
||||
|
||||
const jsondata2 = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "const" : true },
|
||||
{ "type" : "function", "name" : "send", "const" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
||||
{ "type" : "function", "name" : "test", "const" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
|
||||
{ "type" : "function", "name" : "string", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
|
||||
{ "type" : "function", "name" : "bool", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
|
||||
{ "type" : "function", "name" : "address", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
|
||||
{ "type" : "function", "name" : "string32", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "string32" } ] },
|
||||
{ "type" : "function", "name" : "uint64[2]", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
|
||||
{ "type" : "function", "name" : "uint64[]", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
|
||||
{ "type" : "function", "name" : "foo", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
|
||||
{ "type" : "function", "name" : "bar", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
|
||||
{ "type" : "function", "name" : "slice", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
||||
{ "type" : "function", "name" : "slice256", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "sliceAddress", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
||||
{ "type" : "function", "name" : "sliceMultiAddress", "const" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
||||
{ "type" : "function", "name" : "test", "constant" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
|
||||
{ "type" : "function", "name" : "string", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
|
||||
{ "type" : "function", "name" : "bool", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
|
||||
{ "type" : "function", "name" : "address", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
|
||||
{ "type" : "function", "name" : "uint64[2]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
|
||||
{ "type" : "function", "name" : "uint64[]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
|
||||
{ "type" : "function", "name" : "foo", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
|
||||
{ "type" : "function", "name" : "bar", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
|
||||
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
||||
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
||||
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
|
||||
]`
|
||||
|
||||
func TestType(t *testing.T) {
|
||||
typ, err := NewType("uint32")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if typ.Kind != reflect.Uint {
|
||||
t.Error("expected uint32 to have kind Ptr")
|
||||
}
|
||||
|
||||
typ, err = NewType("uint32[]")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !typ.IsSlice {
|
||||
t.Error("expected uint32[] to be slice")
|
||||
}
|
||||
if typ.Type != ubig_t {
|
||||
t.Error("expcted uith32[] to have type uint64")
|
||||
}
|
||||
|
||||
typ, err = NewType("uint32[2]")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !typ.IsSlice {
|
||||
t.Error("expected uint32[2] to be slice")
|
||||
}
|
||||
if typ.Type != ubig_t {
|
||||
t.Error("expcted uith32[2] to have type uint64")
|
||||
}
|
||||
if typ.SliceSize != 2 {
|
||||
t.Error("expected uint32[2] to have a size of 2")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
Uint256, _ := NewType("uint256")
|
||||
exp := ABI{
|
||||
@ -164,21 +520,6 @@ func TestTestString(t *testing.T) {
|
||||
if _, err := abi.Pack("string", "hello world"); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
str10 := string(make([]byte, 10))
|
||||
if _, err := abi.Pack("string32", str10); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
str32 := string(make([]byte, 32))
|
||||
if _, err := abi.Pack("string32", str32); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
str33 := string(make([]byte, 33))
|
||||
if _, err := abi.Pack("string32", str33); err == nil {
|
||||
t.Error("expected str33 to throw out of bound error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestBool(t *testing.T) {
|
||||
@ -210,26 +551,10 @@ func TestTestSlice(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestImplicitTypeCasts(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
slice := make([]uint8, 2)
|
||||
_, err = abi.Pack("uint64[2]", slice)
|
||||
expStr := "`uint64[2]` abi: cannot use type uint8 as type uint64"
|
||||
if err.Error() != expStr {
|
||||
t.Errorf("expected %v, got %v", expStr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMethodSignature(t *testing.T) {
|
||||
String, _ := NewType("string")
|
||||
String32, _ := NewType("string32")
|
||||
m := Method{"foo", false, []Argument{Argument{"bar", String32, false}, Argument{"baz", String, false}}, nil}
|
||||
exp := "foo(string32,string)"
|
||||
m := Method{"foo", false, []Argument{Argument{"bar", String, false}, Argument{"baz", String, false}}, nil}
|
||||
exp := "foo(string,string)"
|
||||
if m.Sig() != exp {
|
||||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
||||
}
|
||||
@ -247,28 +572,6 @@ func TestMethodSignature(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
sig := crypto.Keccak256([]byte("foo(uint32)"))[:4]
|
||||
sig = append(sig, make([]byte, 32)...)
|
||||
sig[35] = 10
|
||||
|
||||
packed, err := abi.Pack("foo", uint32(10))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
@ -292,77 +595,6 @@ func TestMultiPack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackSlice(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
sig := crypto.Keccak256([]byte("slice(uint32[2])"))[:4]
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err := abi.Pack("slice", []uint32{1, 2})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||
sig = abi.Methods["sliceAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||
sig = abi.Methods["sliceMultiAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = crypto.Keccak256([]byte("slice256(uint256[2])"))[:4]
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
func ExampleJSON() {
|
||||
const definition = `[{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"isBar","outputs":[{"name":"","type":"bool"}],"type":"function"}]`
|
||||
|
||||
@ -382,9 +614,9 @@ func ExampleJSON() {
|
||||
|
||||
func TestInputVariableInputLength(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "type" : "function", "name" : "strOne", "const" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
|
||||
{ "type" : "function", "name" : "bytesOne", "const" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
|
||||
{ "type" : "function", "name" : "strTwo", "const" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "str1", "type" : "string" } ] }
|
||||
{ "type" : "function", "name" : "strOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
|
||||
{ "type" : "function", "name" : "bytesOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
|
||||
{ "type" : "function", "name" : "strTwo", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "str1", "type" : "string" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
@ -546,7 +778,7 @@ func TestBareEvents(t *testing.T) {
|
||||
|
||||
func TestMultiReturnWithStruct(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "const" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
@ -599,7 +831,7 @@ func TestMultiReturnWithStruct(t *testing.T) {
|
||||
|
||||
func TestMultiReturnWithSlice(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "const" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
@ -635,8 +867,8 @@ func TestMultiReturnWithSlice(t *testing.T) {
|
||||
|
||||
func TestMarshalArrays(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "bytes32", "const" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "const" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
{ "name" : "bytes32", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "constant" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
@ -694,14 +926,14 @@ func TestMarshalArrays(t *testing.T) {
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "int", "const" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
{ "name" : "bool", "const" : false, "outputs": [ { "type": "bool" } ] },
|
||||
{ "name" : "bytes", "const" : false, "outputs": [ { "type": "bytes" } ] },
|
||||
{ "name" : "fixed", "const" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "multi", "const" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||
{ "name" : "addressSliceSingle", "const" : false, "outputs": [ { "type": "address[]" } ] },
|
||||
{ "name" : "addressSliceDouble", "const" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||
{ "name" : "mixedBytes", "const" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
{ "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] },
|
||||
{ "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] },
|
||||
{ "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||
{ "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] },
|
||||
{ "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||
{ "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
|
@ -17,15 +17,26 @@
|
||||
package bind
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// ErrNoCode is returned by call and transact operations for which the requested
|
||||
// recipient contract to operate on does not exist in the state db or does not
|
||||
// have any code associated with it (i.e. suicided).
|
||||
var ErrNoCode = errors.New("no contract code at given address")
|
||||
|
||||
// ContractCaller defines the methods needed to allow operating with contract on a read
|
||||
// only basis.
|
||||
type ContractCaller interface {
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// ContractCall executes an Ethereum contract call with the specified data as
|
||||
// the input. The pending flag requests execution against the pending block, not
|
||||
// the stable head of the chain.
|
||||
@ -37,7 +48,51 @@ type ContractCaller interface {
|
||||
// used when the user does not provide some needed values, but rather leaves it up
|
||||
// to the transactor to decide.
|
||||
type ContractTransactor interface {
|
||||
// Nonce retrieves the current pending nonce associated with an account.
|
||||
// PendingAccountNonce retrieves the current pending nonce associated with an
|
||||
// account.
|
||||
PendingAccountNonce(account common.Address) (uint64, error)
|
||||
|
||||
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
|
||||
// execution of a transaction.
|
||||
SuggestGasPrice() (*big.Int, error)
|
||||
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// EstimateGasLimit tries to estimate the gas needed to execute a specific
|
||||
// transaction based on the current pending state of the backend blockchain.
|
||||
// There is no guarantee that this is the true gas limit requirement as other
|
||||
// transactions may be added or removed by miners, but it should provide a basis
|
||||
// for setting a reasonable default.
|
||||
EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error)
|
||||
|
||||
// SendTransaction injects the transaction into the pending pool for execution.
|
||||
SendTransaction(tx *types.Transaction) error
|
||||
}
|
||||
|
||||
// ContractBackend defines the methods needed to allow operating with contract
|
||||
// on a read-write basis.
|
||||
//
|
||||
// This interface is essentially the union of ContractCaller and ContractTransactor
|
||||
// but due to a bug in the Go compiler (https://github.com/golang/go/issues/6977),
|
||||
// we cannot simply list it as the two interfaces. The other solution is to add a
|
||||
// third interface containing the common methods, but that convolutes the user API
|
||||
// as it introduces yet another parameter to require for initialization.
|
||||
type ContractBackend interface {
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// ContractCall executes an Ethereum contract call with the specified data as
|
||||
// the input. The pending flag requests execution against the pending block, not
|
||||
// the stable head of the chain.
|
||||
ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error)
|
||||
|
||||
// PendingAccountNonce retrieves the current pending nonce associated with an
|
||||
// account.
|
||||
PendingAccountNonce(account common.Address) (uint64, error)
|
||||
|
||||
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
|
||||
@ -52,12 +107,5 @@ type ContractTransactor interface {
|
||||
EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error)
|
||||
|
||||
// SendTransaction injects the transaction into the pending pool for execution.
|
||||
SendTransaction(*types.Transaction) error
|
||||
}
|
||||
|
||||
// ContractBackend defines the methods needed to allow operating with contract
|
||||
// on a read-write basis.
|
||||
type ContractBackend interface {
|
||||
ContractCaller
|
||||
ContractTransactor
|
||||
SendTransaction(tx *types.Transaction) error
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ func (*nilBackend) ContractCall(common.Address, []byte, bool) ([]byte, error) {
|
||||
func (*nilBackend) EstimateGasLimit(common.Address, *common.Address, *big.Int, []byte) (*big.Int, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
func (*nilBackend) HasCode(common.Address, bool) (bool, error) { panic("not implemented") }
|
||||
func (*nilBackend) SuggestGasPrice() (*big.Int, error) { panic("not implemented") }
|
||||
func (*nilBackend) PendingAccountNonce(common.Address) (uint64, error) { panic("not implemented") }
|
||||
func (*nilBackend) SendTransaction(*types.Transaction) error { panic("not implemented") }
|
||||
|
@ -66,10 +66,16 @@ type request struct {
|
||||
type response struct {
|
||||
JSONRPC string `json:"jsonrpc"` // Version of the JSON RPC protocol, always set to 2.0
|
||||
ID int `json:"id"` // Auto incrementing ID number for this request
|
||||
Error json.RawMessage `json:"error"` // Any error returned by the remote side
|
||||
Error *failure `json:"error"` // Any error returned by the remote side
|
||||
Result json.RawMessage `json:"result"` // Whatever the remote side sends us in reply
|
||||
}
|
||||
|
||||
// failure is a JSON RPC response error field sent back from the API server.
|
||||
type failure struct {
|
||||
Code int `json:"code"` // JSON RPC error code associated with the failure
|
||||
Message string `json:"message"` // Specific error message of the failure
|
||||
}
|
||||
|
||||
// request forwards an API request to the RPC server, and parses the response.
|
||||
//
|
||||
// This is currently painfully non-concurrent, but it will have to do until we
|
||||
@ -96,12 +102,35 @@ func (b *rpcBackend) request(method string, params []interface{}) (json.RawMessa
|
||||
if err := b.client.Recv(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(res.Error) > 0 {
|
||||
return nil, fmt.Errorf("remote error: %s", string(res.Error))
|
||||
if res.Error != nil {
|
||||
if res.Error.Message == bind.ErrNoCode.Error() {
|
||||
return nil, bind.ErrNoCode
|
||||
}
|
||||
return nil, fmt.Errorf("remote error: %s", res.Error.Message)
|
||||
}
|
||||
return res.Result, nil
|
||||
}
|
||||
|
||||
// HasCode implements ContractVerifier.HasCode by retrieving any code associated
|
||||
// with the contract from the remote node, and checking its size.
|
||||
func (b *rpcBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
// Execute the RPC code retrieval
|
||||
block := "latest"
|
||||
if pending {
|
||||
block = "pending"
|
||||
}
|
||||
res, err := b.request("eth_getCode", []interface{}{contract.Hex(), block})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var hex string
|
||||
if err := json.Unmarshal(res, &hex); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Convert the response back to a Go byte slice and return
|
||||
return len(common.FromHex(hex)) > 0, nil
|
||||
}
|
||||
|
||||
// ContractCall implements ContractCaller.ContractCall, delegating the execution of
|
||||
// a contract call to the remote node, returning the reply to for local processing.
|
||||
func (b *rpcBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) {
|
||||
|
@ -78,6 +78,16 @@ func (b *SimulatedBackend) Rollback() {
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||
}
|
||||
|
||||
// HasCode implements ContractVerifier.HasCode, checking whether there is any
|
||||
// code associated with a certain account in the blockchain.
|
||||
func (b *SimulatedBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
if pending {
|
||||
return len(b.pendingState.GetCode(contract)) > 0, nil
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
return len(statedb.GetCode(contract)) > 0, nil
|
||||
}
|
||||
|
||||
// ContractCall implements ContractCaller.ContractCall, executing the specified
|
||||
// contract with the given input data.
|
||||
func (b *SimulatedBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) {
|
||||
@ -92,6 +102,10 @@ func (b *SimulatedBackend) ContractCall(contract common.Address, data []byte, pe
|
||||
block = b.blockchain.CurrentBlock()
|
||||
statedb, _ = b.blockchain.State()
|
||||
}
|
||||
// If there's no code to interact with, respond with an appropriate error
|
||||
if code := statedb.GetCode(contract); len(code) == 0 {
|
||||
return nil, bind.ErrNoCode
|
||||
}
|
||||
// Set infinite balance to the a fake caller account
|
||||
from := statedb.GetOrNewStateObject(common.Address{})
|
||||
from.SetBalance(common.MaxBig)
|
||||
@ -134,7 +148,12 @@ func (b *SimulatedBackend) EstimateGasLimit(sender common.Address, contract *com
|
||||
block = b.pendingBlock
|
||||
statedb = b.pendingState.Copy()
|
||||
)
|
||||
|
||||
// If there's no code to interact with, respond with an appropriate error
|
||||
if contract != nil {
|
||||
if code := statedb.GetCode(*contract); len(code) == 0 {
|
||||
return nil, bind.ErrNoCode
|
||||
}
|
||||
}
|
||||
// Set infinite balance to the a fake caller account
|
||||
from := statedb.GetOrNewStateObject(sender)
|
||||
from.SetBalance(common.MaxBig)
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -56,6 +57,9 @@ type BoundContract struct {
|
||||
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
|
||||
caller ContractCaller // Read interface to interact with the blockchain
|
||||
transactor ContractTransactor // Write interface to interact with the blockchain
|
||||
|
||||
latestHasCode uint32 // Cached verification that the latest state contains code for this contract
|
||||
pendingHasCode uint32 // Cached verification that the pending state contains code for this contract
|
||||
}
|
||||
|
||||
// NewBoundContract creates a low level contract interface through which calls
|
||||
@ -96,6 +100,19 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
|
||||
if opts == nil {
|
||||
opts = new(CallOpts)
|
||||
}
|
||||
// Make sure we have a contract to operate on, and bail out otherwise
|
||||
if (opts.Pending && atomic.LoadUint32(&c.pendingHasCode) == 0) || (!opts.Pending && atomic.LoadUint32(&c.latestHasCode) == 0) {
|
||||
if code, err := c.caller.HasCode(c.address, opts.Pending); err != nil {
|
||||
return err
|
||||
} else if !code {
|
||||
return ErrNoCode
|
||||
}
|
||||
if opts.Pending {
|
||||
atomic.StoreUint32(&c.pendingHasCode, 1)
|
||||
} else {
|
||||
atomic.StoreUint32(&c.latestHasCode, 1)
|
||||
}
|
||||
}
|
||||
// Pack the input, call and unpack the results
|
||||
input, err := c.abi.Pack(method, params...)
|
||||
if err != nil {
|
||||
@ -153,6 +170,16 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
}
|
||||
gasLimit := opts.GasLimit
|
||||
if gasLimit == nil {
|
||||
// Gas estimation cannot succeed without code for method invocations
|
||||
if contract != nil && atomic.LoadUint32(&c.pendingHasCode) == 0 {
|
||||
if code, err := c.transactor.HasCode(c.address, true); err != nil {
|
||||
return nil, err
|
||||
} else if !code {
|
||||
return nil, ErrNoCode
|
||||
}
|
||||
atomic.StoreUint32(&c.pendingHasCode, 1)
|
||||
}
|
||||
// If the contract surely has code (or code is not needed), estimate the transaction
|
||||
gasLimit, err = c.transactor.EstimateGasLimit(opts.From, contract, value, input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to exstimate gas needed: %v", err)
|
||||
|
@ -194,12 +194,44 @@ var bindTests = []struct {
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that plain values can be properly returned and deserialized
|
||||
{
|
||||
`Getter`,
|
||||
`
|
||||
contract Getter {
|
||||
function getter() constant returns (string, int, bytes32) {
|
||||
return ("Hi", 1, sha3(""));
|
||||
}
|
||||
}
|
||||
`,
|
||||
`606060405260dc8060106000396000f3606060405260e060020a6000350463993a04b78114601a575b005b600060605260c0604052600260809081527f486900000000000000000000000000000000000000000000000000000000000060a05260017fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060e0829052610100819052606060c0908152600261012081905281906101409060a09080838184600060046012f1505081517fffff000000000000000000000000000000000000000000000000000000000000169091525050604051610160819003945092505050f3`,
|
||||
`[{"constant":true,"inputs":[],"name":"getter","outputs":[{"name":"","type":"string"},{"name":"","type":"int256"},{"name":"","type":"bytes32"}],"type":"function"}]`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
|
||||
|
||||
// Deploy a tuple tester contract and execute a structured call on it
|
||||
_, _, getter, err := DeployGetter(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy getter contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
if str, num, _, err := getter.Getter(nil); err != nil {
|
||||
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
||||
} else if str != "Hi" || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", str, num, "Hi", 1)
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that tuples can be properly returned and deserialized
|
||||
{
|
||||
`Tupler`,
|
||||
`
|
||||
contract Tupler {
|
||||
function tuple() returns (string a, int b, bytes32 c) {
|
||||
function tuple() constant returns (string a, int b, bytes32 c) {
|
||||
return ("Hi", 1, sha3(""));
|
||||
}
|
||||
}
|
||||
@ -219,8 +251,10 @@ var bindTests = []struct {
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
if _, err := tupler.Tuple(nil); err != nil {
|
||||
if res, err := tupler.Tuple(nil); err != nil {
|
||||
t.Fatalf("Failed to call structure retriever: %v", err)
|
||||
} else if res.A != "Hi" || res.B.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", res.A, res.B, "Hi", 1)
|
||||
}
|
||||
`,
|
||||
},
|
||||
@ -303,6 +337,34 @@ var bindTests = []struct {
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that non-existent contracts are reported as such (though only simulator test)
|
||||
{
|
||||
`NonExistent`,
|
||||
`
|
||||
contract NonExistent {
|
||||
function String() constant returns(string) {
|
||||
return "I don't exist";
|
||||
}
|
||||
}
|
||||
`,
|
||||
`6060604052609f8060106000396000f3606060405260e060020a6000350463f97a60058114601a575b005b600060605260c0604052600d60809081527f4920646f6e27742065786973740000000000000000000000000000000000000060a052602060c0908152600d60e081905281906101009060a09080838184600060046012f15050815172ffffffffffffffffffffffffffffffffffffff1916909152505060405161012081900392509050f3`,
|
||||
`[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`,
|
||||
`
|
||||
// Create a simulator and wrap a non-deployed contract
|
||||
sim := backends.NewSimulatedBackend()
|
||||
|
||||
nonexistent, err := NewNonExistent(common.Address{}, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to access non-existent contract: %v", err)
|
||||
}
|
||||
// Ensure that contract calls fail with the appropriate error
|
||||
if res, err := nonexistent.String(nil); err == nil {
|
||||
t.Fatalf("Call succeeded on non-existent contract: %v", res)
|
||||
} else if (err != bind.ErrNoCode) {
|
||||
t.Fatalf("Error mismatch: have %v, want %v", err, bind.ErrNoCode)
|
||||
}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
// Tests that packages generated by the binder can be successfully compiled and
|
||||
|
@ -211,7 +211,7 @@ package {{.Package}}
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}} = new({{bindtype .Type}})
|
||||
{{end}}
|
||||
){{end}}
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}[]interface{}{
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}&[]interface{}{
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}},
|
||||
{{end}}
|
||||
}{{end}}{{end}}
|
||||
|
79
accounts/abi/error.go
Normal file
79
accounts/abi/error.go
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// formatSliceString formats the reflection kind with the given slice size
|
||||
// and returns a formatted string representation.
|
||||
func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
||||
if sliceSize == -1 {
|
||||
return fmt.Sprintf("[]%v", kind)
|
||||
}
|
||||
return fmt.Sprintf("[%d]%v", sliceSize, kind)
|
||||
}
|
||||
|
||||
// sliceTypeCheck checks that the given slice can by assigned to the reflection
|
||||
// type in t.
|
||||
func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
|
||||
return typeErr(formatSliceString(t.Kind, t.SliceSize), val.Type())
|
||||
}
|
||||
if t.IsArray && val.Len() != t.SliceSize {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
||||
}
|
||||
|
||||
if t.Elem.IsSlice {
|
||||
if val.Len() > 0 {
|
||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||
}
|
||||
} else if t.Elem.IsArray {
|
||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||
}
|
||||
|
||||
if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), val.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// typeCheck checks that the given reflection value can be assigned to the reflection
|
||||
// type in t.
|
||||
func typeCheck(t Type, value reflect.Value) error {
|
||||
if t.IsSlice || t.IsArray {
|
||||
return sliceTypeCheck(t, value)
|
||||
}
|
||||
|
||||
// Check base type validity. Element types will be checked later on.
|
||||
if t.Kind != value.Kind() {
|
||||
return typeErr(t.Kind, value.Kind())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// varErr returns a formatted error.
|
||||
func varErr(expected, got reflect.Kind) error {
|
||||
return typeErr(expected, got)
|
||||
}
|
||||
|
||||
// typeErr returns a formatted type casting error.
|
||||
func typeErr(expected, got interface{}) error {
|
||||
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected)
|
||||
}
|
@ -18,6 +18,7 @@ package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -38,6 +39,44 @@ type Method struct {
|
||||
Outputs []Argument
|
||||
}
|
||||
|
||||
func (m Method) pack(method Method, args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
if len(args) != len(method.Inputs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
|
||||
}
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := method.Inputs[i]
|
||||
// pack the input
|
||||
packed, err := input.Type.pack(reflect.ValueOf(a))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("`%s` %v", method.Name, err)
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.requiresLengthPrefix() {
|
||||
// calculate the offset
|
||||
offset := len(method.Inputs)*32 + len(variableInput)
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset), UintTy)...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
ret = append(ret, packed...)
|
||||
}
|
||||
}
|
||||
// append the variable input at the end of the packed input
|
||||
ret = append(ret, variableInput...)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Sig returns the methods string signature according to the ABI spec.
|
||||
//
|
||||
// Example
|
||||
|
@ -24,8 +24,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
big_t = reflect.TypeOf(&big.Int{})
|
||||
ubig_t = reflect.TypeOf(&big.Int{})
|
||||
big_t = reflect.TypeOf(big.Int{})
|
||||
ubig_t = reflect.TypeOf(big.Int{})
|
||||
byte_t = reflect.TypeOf(byte(0))
|
||||
byte_ts = reflect.TypeOf([]byte(nil))
|
||||
uint_t = reflect.TypeOf(uint(0))
|
||||
|
65
accounts/abi/packing.go
Normal file
65
accounts/abi/packing.go
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
|
||||
// bytes slice
|
||||
func packBytesSlice(bytes []byte, l int) []byte {
|
||||
len := packNum(reflect.ValueOf(l), UintTy)
|
||||
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
|
||||
}
|
||||
|
||||
// packElement packs the given reflect value according to the abi specification in
|
||||
// t.
|
||||
func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
switch t.T {
|
||||
case IntTy, UintTy:
|
||||
return packNum(reflectValue, t.T)
|
||||
case StringTy:
|
||||
return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len())
|
||||
case AddressTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
|
||||
return common.LeftPadBytes(reflectValue.Bytes(), 32)
|
||||
case BoolTy:
|
||||
if reflectValue.Bool() {
|
||||
return common.LeftPadBytes(common.Big1.Bytes(), 32)
|
||||
} else {
|
||||
return common.LeftPadBytes(common.Big0.Bytes(), 32)
|
||||
}
|
||||
case BytesTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len())
|
||||
case FixedBytesTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32)
|
||||
}
|
||||
panic("abi: fatal error")
|
||||
}
|
97
accounts/abi/reflect.go
Normal file
97
accounts/abi/reflect.go
Normal file
@ -0,0 +1,97 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
// or finds a big.Int
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != big_t {
|
||||
return indirect(v.Elem())
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// reflectIntKind returns the reflect using the given size and
|
||||
// unsignedness.
|
||||
func reflectIntKind(unsigned bool, size int) reflect.Kind {
|
||||
switch size {
|
||||
case 8:
|
||||
if unsigned {
|
||||
return reflect.Uint8
|
||||
}
|
||||
return reflect.Int8
|
||||
case 16:
|
||||
if unsigned {
|
||||
return reflect.Uint16
|
||||
}
|
||||
return reflect.Int16
|
||||
case 32:
|
||||
if unsigned {
|
||||
return reflect.Uint32
|
||||
}
|
||||
return reflect.Int32
|
||||
case 64:
|
||||
if unsigned {
|
||||
return reflect.Uint64
|
||||
}
|
||||
return reflect.Int64
|
||||
}
|
||||
return reflect.Ptr
|
||||
}
|
||||
|
||||
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
|
||||
// and copies the bytes in value to the new slice.
|
||||
func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
||||
slice := reflect.MakeSlice(reflect.TypeOf([]byte{}), value.Len(), value.Len())
|
||||
reflect.Copy(slice, value)
|
||||
return slice
|
||||
}
|
||||
|
||||
// set attempts to assign src to dst by either setting, copying or otherwise.
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if !dstType.Elem().AssignableTo(r_byte) {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
|
||||
}
|
||||
|
||||
if dst.Len() < output.Type.SliceSize {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Ptr:
|
||||
return set(dst.Elem(), src, output)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
@ -21,8 +21,6 @@ import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -40,53 +38,60 @@ const (
|
||||
|
||||
// Type is the reflection of the supported argument type
|
||||
type Type struct {
|
||||
IsSlice bool
|
||||
SliceSize int
|
||||
IsSlice, IsArray bool
|
||||
SliceSize int
|
||||
|
||||
Elem *Type
|
||||
|
||||
Kind reflect.Kind
|
||||
Type reflect.Type
|
||||
Size int
|
||||
T byte // Our own type checking
|
||||
|
||||
Kind reflect.Kind
|
||||
Type reflect.Type
|
||||
Size int
|
||||
T byte // Our own type checking
|
||||
stringKind string // holds the unparsed string for deriving signatures
|
||||
}
|
||||
|
||||
var (
|
||||
// fullTypeRegex parses the abi types
|
||||
//
|
||||
// Types can be in the format of:
|
||||
//
|
||||
// Input = Type [ "[" [ Number ] "]" ] Name .
|
||||
// Type = [ "u" ] "int" [ Number ] .
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// string int uint real
|
||||
// string32 int8 uint8 uint[]
|
||||
// address int256 uint256 real[2]
|
||||
fullTypeRegex = regexp.MustCompile("([a-zA-Z0-9]+)(\\[([0-9]*)?\\])?")
|
||||
typeRegex = regexp.MustCompile("([a-zA-Z]+)([0-9]*)?")
|
||||
// typeRegex parses the abi sub types
|
||||
typeRegex = regexp.MustCompile("([a-zA-Z]+)([0-9]*)?")
|
||||
)
|
||||
|
||||
// NewType returns a fully parsed Type given by the input string or an error if it can't be parsed.
|
||||
//
|
||||
// Strings can be in the format of:
|
||||
//
|
||||
// Input = Type [ "[" [ Number ] "]" ] Name .
|
||||
// Type = [ "u" ] "int" [ Number ] .
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// string int uint real
|
||||
// string32 int8 uint8 uint[]
|
||||
// address int256 uint256 real[2]
|
||||
// NewType creates a new reflection type of abi type given in t.
|
||||
func NewType(t string) (typ Type, err error) {
|
||||
// 1. full string 2. type 3. (opt.) is slice 4. (opt.) size
|
||||
// parse the full representation of the abi-type definition; including:
|
||||
// * full string
|
||||
// * type
|
||||
// * is slice
|
||||
// * slice size
|
||||
res := fullTypeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
|
||||
// check if type is slice and parse type.
|
||||
switch {
|
||||
case res[3] != "":
|
||||
// err is ignored. Already checked for number through the regexp
|
||||
typ.SliceSize, _ = strconv.Atoi(res[3])
|
||||
typ.IsSlice = true
|
||||
typ.IsArray = true
|
||||
case res[2] != "":
|
||||
typ.IsSlice, typ.SliceSize = true, -1
|
||||
case res[0] == "":
|
||||
return Type{}, fmt.Errorf("abi: type parse error: %s", t)
|
||||
}
|
||||
if typ.IsArray || typ.IsSlice {
|
||||
sliceType, err := NewType(res[1])
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
typ.Elem = &sliceType
|
||||
typ.stringKind = sliceType.stringKind + t[len(res[1]):]
|
||||
return typ, nil
|
||||
}
|
||||
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(res[1], -1)[0]
|
||||
@ -106,24 +111,24 @@ func NewType(t string) (typ Type, err error) {
|
||||
varSize = 256
|
||||
t += "256"
|
||||
}
|
||||
typ.stringKind = t
|
||||
|
||||
switch varType {
|
||||
case "int":
|
||||
typ.Kind = reflect.Int
|
||||
typ.Kind = reflectIntKind(false, varSize)
|
||||
typ.Type = big_t
|
||||
typ.Size = varSize
|
||||
typ.T = IntTy
|
||||
case "uint":
|
||||
typ.Kind = reflect.Uint
|
||||
typ.Kind = reflectIntKind(true, varSize)
|
||||
typ.Type = ubig_t
|
||||
typ.Size = varSize
|
||||
typ.T = UintTy
|
||||
case "bool":
|
||||
typ.Kind = reflect.Bool
|
||||
typ.T = BoolTy
|
||||
case "real": // TODO
|
||||
typ.Kind = reflect.Invalid
|
||||
case "address":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = address_t
|
||||
typ.Size = 20
|
||||
typ.T = AddressTy
|
||||
@ -131,123 +136,55 @@ func NewType(t string) (typ Type, err error) {
|
||||
typ.Kind = reflect.String
|
||||
typ.Size = -1
|
||||
typ.T = StringTy
|
||||
if varSize > 0 {
|
||||
typ.Size = 32
|
||||
}
|
||||
case "hash":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Size = 32
|
||||
typ.Type = hash_t
|
||||
typ.T = HashTy
|
||||
case "bytes":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = byte_ts
|
||||
typ.Size = varSize
|
||||
sliceType, _ := NewType("uint8")
|
||||
typ.Elem = &sliceType
|
||||
if varSize == 0 {
|
||||
typ.IsSlice = true
|
||||
typ.T = BytesTy
|
||||
typ.SliceSize = -1
|
||||
} else {
|
||||
typ.IsArray = true
|
||||
typ.T = FixedBytesTy
|
||||
typ.SliceSize = varSize
|
||||
}
|
||||
default:
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
typ.stringKind = t
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// String implements Stringer
|
||||
func (t Type) String() (out string) {
|
||||
return t.stringKind
|
||||
}
|
||||
|
||||
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
|
||||
// bytes slice
|
||||
func packBytesSlice(bytes []byte, l int) []byte {
|
||||
len := packNum(reflect.ValueOf(l), UintTy)
|
||||
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
|
||||
}
|
||||
func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
// dereference pointer first if it's a pointer
|
||||
v = indirect(v)
|
||||
|
||||
// Test the given input parameter `v` and checks if it matches certain
|
||||
// criteria
|
||||
// * Big integers are checks for ptr types and if the given value is
|
||||
// assignable
|
||||
// * Integer are checked for size
|
||||
// * Strings, addresses and bytes are checks for type and size
|
||||
func (t Type) pack(v interface{}) ([]byte, error) {
|
||||
value := reflect.ValueOf(v)
|
||||
switch kind := value.Kind(); kind {
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// check input is unsigned
|
||||
if t.Type != ubig_t {
|
||||
return nil, fmt.Errorf("abi: type mismatch: %s for %T", t.Type, v)
|
||||
}
|
||||
|
||||
// no implicit type casting
|
||||
if int(value.Type().Size()*8) != t.Size {
|
||||
return nil, fmt.Errorf("abi: cannot use type %T as type uint%d", v, t.Size)
|
||||
}
|
||||
|
||||
return packNum(value, t.T), nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if t.Type != ubig_t {
|
||||
return nil, fmt.Errorf("type mismatch: %s for %T", t.Type, v)
|
||||
}
|
||||
|
||||
// no implicit type casting
|
||||
if int(value.Type().Size()*8) != t.Size {
|
||||
return nil, fmt.Errorf("abi: cannot use type %T as type uint%d", v, t.Size)
|
||||
}
|
||||
return packNum(value, t.T), nil
|
||||
case reflect.Ptr:
|
||||
// If the value is a ptr do a assign check (only used by
|
||||
// big.Int for now)
|
||||
if t.Type == ubig_t && value.Type() != ubig_t {
|
||||
return nil, fmt.Errorf("type mismatch: %s for %T", t.Type, v)
|
||||
}
|
||||
return packNum(value, t.T), nil
|
||||
case reflect.String:
|
||||
if t.Size > -1 && value.Len() > t.Size {
|
||||
return nil, fmt.Errorf("%v out of bound. %d for %d", value.Kind(), value.Len(), t.Size)
|
||||
}
|
||||
|
||||
return packBytesSlice([]byte(value.String()), value.Len()), nil
|
||||
case reflect.Slice:
|
||||
// Byte slice is a special case, it gets treated as a single value
|
||||
if t.T == BytesTy {
|
||||
return packBytesSlice(value.Bytes(), value.Len()), nil
|
||||
}
|
||||
|
||||
if t.SliceSize > -1 && value.Len() > t.SliceSize {
|
||||
return nil, fmt.Errorf("%v out of bound. %d for %d", value.Kind(), value.Len(), t.Size)
|
||||
}
|
||||
|
||||
// Signed / Unsigned check
|
||||
if value.Type() == big_t && (t.T != IntTy && isSigned(value)) || (t.T == UintTy && isSigned(value)) {
|
||||
return nil, fmt.Errorf("slice of incompatible types.")
|
||||
}
|
||||
if err := typeCheck(t, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if (t.IsSlice || t.IsArray) && t.T != BytesTy && t.T != FixedBytesTy {
|
||||
var packed []byte
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
val, err := t.pack(value.Index(i).Interface())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := t.Elem.pack(v.Index(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packed = append(packed, val...)
|
||||
}
|
||||
return packBytesSlice(packed, value.Len()), nil
|
||||
case reflect.Bool:
|
||||
if value.Bool() {
|
||||
return common.LeftPadBytes(common.Big1.Bytes(), 32), nil
|
||||
} else {
|
||||
return common.LeftPadBytes(common.Big0.Bytes(), 32), nil
|
||||
}
|
||||
case reflect.Array:
|
||||
if v, ok := value.Interface().(common.Address); ok {
|
||||
return common.LeftPadBytes(v[:], 32), nil
|
||||
} else if v, ok := value.Interface().(common.Hash); ok {
|
||||
return v[:], nil
|
||||
}
|
||||
return packBytesSlice(packed, v.Len()), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("ABI: bad input given %v", value.Kind())
|
||||
return packElement(t, v), nil
|
||||
}
|
||||
|
||||
// requireLengthPrefix returns whether the type requires any sort of length
|
||||
// prefixing.
|
||||
func (t Type) requiresLengthPrefix() bool {
|
||||
return t.T != FixedBytesTy && (t.T == StringTy || t.T == BytesTy || t.IsSlice || t.IsArray)
|
||||
}
|
||||
|
@ -147,9 +147,21 @@ func (am *Manager) Sign(addr common.Address, hash []byte) (signature []byte, err
|
||||
return crypto.Sign(hash, unlockedKey.PrivateKey)
|
||||
}
|
||||
|
||||
// SignWithPassphrase signs hash if the private key matching the given address can be
|
||||
// decrypted with the given passphrase.
|
||||
func (am *Manager) SignWithPassphrase(addr common.Address, passphrase string, hash []byte) (signature []byte, err error) {
|
||||
_, key, err := am.getDecryptedKey(Account{Address: addr}, passphrase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer zeroKey(key.PrivateKey)
|
||||
return crypto.Sign(hash, key.PrivateKey)
|
||||
}
|
||||
|
||||
// Unlock unlocks the given account indefinitely.
|
||||
func (am *Manager) Unlock(a Account, keyAuth string) error {
|
||||
return am.TimedUnlock(a, keyAuth, 0)
|
||||
func (am *Manager) Unlock(a Account, passphrase string) error {
|
||||
return am.TimedUnlock(a, passphrase, 0)
|
||||
}
|
||||
|
||||
// Lock removes the private key with the given address from memory.
|
||||
@ -284,7 +296,12 @@ func (am *Manager) Import(keyJSON []byte, passphrase, newPassphrase string) (Acc
|
||||
|
||||
// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase.
|
||||
func (am *Manager) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (Account, error) {
|
||||
return am.importKey(newKeyFromECDSA(priv), passphrase)
|
||||
key := newKeyFromECDSA(priv)
|
||||
if am.cache.hasAddress(key.Address) {
|
||||
return Account{}, fmt.Errorf("account already exists")
|
||||
}
|
||||
|
||||
return am.importKey(key, passphrase)
|
||||
}
|
||||
|
||||
func (am *Manager) importKey(key *Key, passphrase string) (Account, error) {
|
||||
|
@ -81,6 +81,34 @@ func TestSign(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignWithPassphrase(t *testing.T) {
|
||||
dir, am := tmpManager(t, true)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
pass := "passwd"
|
||||
acc, err := am.NewAccount(pass)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, unlocked := am.unlocked[acc.Address]; unlocked {
|
||||
t.Fatal("expected account to be locked")
|
||||
}
|
||||
|
||||
_, err = am.SignWithPassphrase(acc.Address, pass, testSigData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, unlocked := am.unlocked[acc.Address]; unlocked {
|
||||
t.Fatal("expected account to be locked")
|
||||
}
|
||||
|
||||
if _, err = am.SignWithPassphrase(acc.Address, "invalid passwd", testSigData); err == nil {
|
||||
t.Fatal("expected SignHash to fail with invalid password")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimedUnlock(t *testing.T) {
|
||||
dir, am := tmpManager(t, true)
|
||||
defer os.RemoveAll(dir)
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build darwin,!ios freebsd linux netbsd solaris windows
|
||||
// +build darwin,!ios freebsd linux,!arm64 netbsd solaris windows
|
||||
|
||||
package accounts
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build ios !darwin,!freebsd,!linux,!netbsd,!solaris,!windows
|
||||
// +build ios linux,arm64 !darwin,!freebsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
// This is the fallback implementation of directory watching.
|
||||
// It is used on unsupported platforms.
|
||||
|
26
build/win-ci-compile.bat
Normal file
26
build/win-ci-compile.bat
Normal file
@ -0,0 +1,26 @@
|
||||
@echo off
|
||||
if not exist .\build\win-ci-compile.bat (
|
||||
echo This script must be run from the root of the repository.
|
||||
exit /b
|
||||
)
|
||||
if not defined GOPATH (
|
||||
echo GOPATH is not set.
|
||||
exit /b
|
||||
)
|
||||
|
||||
set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
|
||||
set GOBIN=%cd%\build\bin
|
||||
|
||||
rem set gitCommit when running from a Git checkout.
|
||||
set goLinkFlags=""
|
||||
if exist ".git\HEAD" (
|
||||
where /q git
|
||||
if not errorlevel 1 (
|
||||
for /f %%h in ('git rev-parse HEAD') do (
|
||||
set goLinkFlags="-X main.gitCommit=%%h"
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@echo on
|
||||
go install -v -ldflags %goLinkFlags% ./...
|
15
build/win-ci-test.bat
Normal file
15
build/win-ci-test.bat
Normal file
@ -0,0 +1,15 @@
|
||||
@echo off
|
||||
if not exist .\build\win-ci-test.bat (
|
||||
echo This script must be run from the root of the repository.
|
||||
exit /b
|
||||
)
|
||||
if not defined GOPATH (
|
||||
echo GOPATH is not set.
|
||||
exit /b
|
||||
)
|
||||
|
||||
set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
|
||||
set GOBIN=%cd%\build\bin
|
||||
|
||||
@echo on
|
||||
go test ./...
|
@ -19,15 +19,12 @@ package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
)
|
||||
@ -43,50 +40,43 @@ func main() {
|
||||
nodeKey *ecdsa.PrivateKey
|
||||
err error
|
||||
)
|
||||
flag.Var(glog.GetVerbosity(), "verbosity", "log verbosity (0-9)")
|
||||
flag.Var(glog.GetVModule(), "vmodule", "log verbosity pattern")
|
||||
glog.SetToStderr(true)
|
||||
flag.Parse()
|
||||
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.DebugLevel))
|
||||
|
||||
if *genKey != "" {
|
||||
writeKey(*genKey)
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
utils.Fatalf("could not generate key: %v", err)
|
||||
}
|
||||
if err := crypto.SaveECDSA(*genKey, key); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
natm, err := nat.Parse(*natdesc)
|
||||
if err != nil {
|
||||
log.Fatalf("-nat: %v", err)
|
||||
utils.Fatalf("-nat: %v", err)
|
||||
}
|
||||
switch {
|
||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
||||
log.Fatal("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
||||
log.Fatal("Options -nodekey and -nodekeyhex are mutually exclusive")
|
||||
utils.Fatalf("Options -nodekey and -nodekeyhex are mutually exclusive")
|
||||
case *nodeKeyFile != "":
|
||||
if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil {
|
||||
log.Fatalf("-nodekey: %v", err)
|
||||
utils.Fatalf("-nodekey: %v", err)
|
||||
}
|
||||
case *nodeKeyHex != "":
|
||||
if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil {
|
||||
log.Fatalf("-nodekeyhex: %v", err)
|
||||
utils.Fatalf("-nodekeyhex: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, ""); err != nil {
|
||||
log.Fatal(err)
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
select {}
|
||||
}
|
||||
|
||||
func writeKey(target string) {
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
log.Fatalf("could not generate key: %v", err)
|
||||
}
|
||||
b := crypto.FromECDSA(key)
|
||||
if target == "-" {
|
||||
fmt.Println(hex.EncodeToString(b))
|
||||
} else {
|
||||
if err := ioutil.WriteFile(target, b, 0600); err != nil {
|
||||
log.Fatal("write error: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
@ -215,12 +216,12 @@ func getPassPhrase(prompt string, confirmation bool, i int, passwords []string)
|
||||
if prompt != "" {
|
||||
fmt.Println(prompt)
|
||||
}
|
||||
password, err := utils.Stdin.PasswordPrompt("Passphrase: ")
|
||||
password, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := utils.Stdin.PasswordPrompt("Repeat passphrase: ")
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -116,7 +117,7 @@ func exportChain(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
func removeDB(ctx *cli.Context) {
|
||||
confirm, err := utils.Stdin.ConfirmPrompt("Remove local database?")
|
||||
confirm, err := console.Stdin.PromptConfirm("Remove local database?")
|
||||
if err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
|
167
cmd/geth/consolecmd.go
Normal file
167
cmd/geth/consolecmd.go
Normal file
@ -0,0 +1,167 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
)
|
||||
|
||||
var (
|
||||
consoleCommand = cli.Command{
|
||||
Action: localConsole,
|
||||
Name: "console",
|
||||
Usage: `Geth Console: interactive JavaScript environment`,
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
}
|
||||
attachCommand = cli.Command{
|
||||
Action: remoteConsole,
|
||||
Name: "attach",
|
||||
Usage: `Geth Console: interactive JavaScript environment (connect to node)`,
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.
|
||||
This command allows to open a console on a running geth node.
|
||||
`,
|
||||
}
|
||||
javascriptCommand = cli.Command{
|
||||
Action: ephemeralConsole,
|
||||
Name: "js",
|
||||
Usage: `executes the given JavaScript files in the Geth JavaScript VM`,
|
||||
Description: `
|
||||
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
}
|
||||
)
|
||||
|
||||
// localConsole starts a new geth node, attaching a JavaScript console to it at the
|
||||
// same time.
|
||||
func localConsole(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Stop()
|
||||
|
||||
// Attach to the newly started node and start the JavaScript console
|
||||
client, err := node.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: node.DataDir(),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
}
|
||||
defer console.Stop(false)
|
||||
|
||||
// If only a short execution was requested, evaluate and return
|
||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||
console.Evaluate(script)
|
||||
return
|
||||
}
|
||||
// Otherwise print the welcome screen and enter interactive mode
|
||||
console.Welcome()
|
||||
console.Interactive()
|
||||
}
|
||||
|
||||
// remoteConsole will connect to a remote geth instance, attaching a JavaScript
|
||||
// console to it.
|
||||
func remoteConsole(ctx *cli.Context) {
|
||||
// Attach to a remotely running geth instance and start the JavaScript console
|
||||
client, err := utils.NewRemoteRPCClient(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("Unable to attach to remote geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: utils.MustMakeDataDir(ctx),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
}
|
||||
defer console.Stop(false)
|
||||
|
||||
// If only a short execution was requested, evaluate and return
|
||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||
console.Evaluate(script)
|
||||
return
|
||||
}
|
||||
// Otherwise print the welcome screen and enter interactive mode
|
||||
console.Welcome()
|
||||
console.Interactive()
|
||||
}
|
||||
|
||||
// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript
|
||||
// console to it, and each of the files specified as arguments and tears the
|
||||
// everything down.
|
||||
func ephemeralConsole(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Stop()
|
||||
|
||||
// Attach to the newly started node and start the JavaScript console
|
||||
client, err := node.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: node.DataDir(),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
}
|
||||
defer console.Stop(false)
|
||||
|
||||
// Evaluate each of the specified JavaScript files
|
||||
for _, file := range ctx.Args() {
|
||||
if err = console.Execute(file); err != nil {
|
||||
utils.Fatalf("Failed to execute %s: %v", file, err)
|
||||
}
|
||||
}
|
||||
// Wait for pending callbacks, but stop for Ctrl-C.
|
||||
abort := make(chan os.Signal, 1)
|
||||
signal.Notify(abort, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-abort
|
||||
os.Exit(0)
|
||||
}()
|
||||
console.Stop(true)
|
||||
}
|
162
cmd/geth/consolecmd_test.go
Normal file
162
cmd/geth/consolecmd_test.go
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// Tests that a node embedded within a console can be started up properly and
|
||||
// then terminated by closing the input stream.
|
||||
func TestConsoleWelcome(t *testing.T) {
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
|
||||
// Start a geth console, make sure it's cleaned up and terminate the console
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh",
|
||||
"console")
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
geth.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.setTemplateFunc("gover", runtime.Version)
|
||||
geth.setTemplateFunc("gethver", func() string { return verString })
|
||||
geth.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.setTemplateFunc("apis", func() []string {
|
||||
apis := append(strings.Split(rpc.DefaultIPCApis, ","), rpc.MetadataApi)
|
||||
sort.Strings(apis)
|
||||
return apis
|
||||
})
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
geth.expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}/{{gover}}
|
||||
coinbase: {{.Etherbase}}
|
||||
at block: 0 ({{niltime}})
|
||||
datadir: {{.Datadir}}
|
||||
modules:{{range apis}} {{.}}:1.0{{end}}
|
||||
|
||||
> {{.InputLine "exit"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
// Tests that a console can be attached to a running node via various means.
|
||||
func TestIPCAttachWelcome(t *testing.T) {
|
||||
// Configure the instance for IPC attachement
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
var ipc string
|
||||
if runtime.GOOS == "windows" {
|
||||
ipc = `\\.\pipe\geth` + strconv.Itoa(rand.Int())
|
||||
} else {
|
||||
ws := tmpdir(t)
|
||||
defer os.RemoveAll(ws)
|
||||
ipc = filepath.Join(ws, "geth.ipc")
|
||||
}
|
||||
// Note: we need --shh because testAttachWelcome checks for default
|
||||
// list of ipc modules and shh is included there.
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh", "--ipcpath", ipc)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ipc:"+ipc)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
func TestHTTPAttachWelcome(t *testing.T) {
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--rpc", "--rpcport", port)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "http://localhost:"+port)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
func TestWSAttachWelcome(t *testing.T) {
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P
|
||||
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--ws", "--wsport", port)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ws://localhost:"+port)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint string) {
|
||||
// Attach to a running geth note and terminate immediately
|
||||
attach := runGeth(t, "attach", endpoint)
|
||||
defer attach.expectExit()
|
||||
attach.stdin.Close()
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
attach.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.setTemplateFunc("gover", runtime.Version)
|
||||
attach.setTemplateFunc("gethver", func() string { return verString })
|
||||
attach.setTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.setTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.setTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.setTemplateFunc("apis", func() []string {
|
||||
var apis []string
|
||||
if strings.HasPrefix(endpoint, "ipc") {
|
||||
apis = append(strings.Split(rpc.DefaultIPCApis, ","), rpc.MetadataApi)
|
||||
} else {
|
||||
apis = append(strings.Split(rpc.DefaultHTTPApis, ","), rpc.MetadataApi)
|
||||
}
|
||||
sort.Strings(apis)
|
||||
return apis
|
||||
})
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
attach.expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}/{{gover}}
|
||||
coinbase: {{etherbase}}
|
||||
at block: 0 ({{niltime}}){{if ipc}}
|
||||
datadir: {{datadir}}{{end}}
|
||||
modules:{{range apis}} {{.}}:1.0{{end}}
|
||||
|
||||
> {{.InputLine "exit" }}
|
||||
`)
|
||||
attach.expectExit()
|
||||
}
|
429
cmd/geth/js.go
429
cmd/geth/js.go
@ -1,429 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/registrar"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||
re "github.com/ethereum/go-ethereum/jsre"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/peterh/liner"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
var (
|
||||
passwordRegexp = regexp.MustCompile("personal.[nu]")
|
||||
leadingSpace = regexp.MustCompile("^ ")
|
||||
onlyws = regexp.MustCompile("^\\s*$")
|
||||
exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$")
|
||||
)
|
||||
|
||||
type jsre struct {
|
||||
re *re.JSRE
|
||||
stack *node.Node
|
||||
wait chan *big.Int
|
||||
ps1 string
|
||||
atexit func()
|
||||
corsDomain string
|
||||
client rpc.Client
|
||||
}
|
||||
|
||||
func makeCompleter(re *jsre) liner.WordCompleter {
|
||||
return func(line string, pos int) (head string, completions []string, tail string) {
|
||||
if len(line) == 0 || pos == 0 {
|
||||
return "", nil, ""
|
||||
}
|
||||
// chuck data to relevant part for autocompletion, e.g. in case of nested lines eth.getBalance(eth.coinb<tab><tab>
|
||||
i := 0
|
||||
for i = pos - 1; i > 0; i-- {
|
||||
if line[i] == '.' || (line[i] >= 'a' && line[i] <= 'z') || (line[i] >= 'A' && line[i] <= 'Z') {
|
||||
continue
|
||||
}
|
||||
if i >= 3 && line[i] == '3' && line[i-3] == 'w' && line[i-2] == 'e' && line[i-1] == 'b' {
|
||||
continue
|
||||
}
|
||||
i += 1
|
||||
break
|
||||
}
|
||||
return line[:i], re.re.CompleteKeywords(line[i:pos]), line[pos:]
|
||||
}
|
||||
}
|
||||
|
||||
func newLightweightJSRE(docRoot string, client rpc.Client, datadir string, interactive bool) *jsre {
|
||||
js := &jsre{ps1: "> "}
|
||||
js.wait = make(chan *big.Int)
|
||||
js.client = client
|
||||
js.re = re.New(docRoot)
|
||||
if err := js.apiBindings(); err != nil {
|
||||
utils.Fatalf("Unable to initialize console - %v", err)
|
||||
}
|
||||
js.setupInput(datadir)
|
||||
return js
|
||||
}
|
||||
|
||||
func newJSRE(stack *node.Node, docRoot, corsDomain string, client rpc.Client, interactive bool) *jsre {
|
||||
js := &jsre{stack: stack, ps1: "> "}
|
||||
// set default cors domain used by startRpc from CLI flag
|
||||
js.corsDomain = corsDomain
|
||||
js.wait = make(chan *big.Int)
|
||||
js.client = client
|
||||
js.re = re.New(docRoot)
|
||||
if err := js.apiBindings(); err != nil {
|
||||
utils.Fatalf("Unable to connect - %v", err)
|
||||
}
|
||||
js.setupInput(stack.DataDir())
|
||||
return js
|
||||
}
|
||||
|
||||
func (self *jsre) setupInput(datadir string) {
|
||||
self.withHistory(datadir, func(hist *os.File) { utils.Stdin.ReadHistory(hist) })
|
||||
utils.Stdin.SetCtrlCAborts(true)
|
||||
utils.Stdin.SetWordCompleter(makeCompleter(self))
|
||||
utils.Stdin.SetTabCompletionStyle(liner.TabPrints)
|
||||
self.atexit = func() {
|
||||
self.withHistory(datadir, func(hist *os.File) {
|
||||
hist.Truncate(0)
|
||||
utils.Stdin.WriteHistory(hist)
|
||||
})
|
||||
utils.Stdin.Close()
|
||||
close(self.wait)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *jsre) batch(statement string) {
|
||||
err := self.re.EvalAndPrettyPrint(statement)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
|
||||
if self.atexit != nil {
|
||||
self.atexit()
|
||||
}
|
||||
|
||||
self.re.Stop(false)
|
||||
}
|
||||
|
||||
// show summary of current geth instance
|
||||
func (self *jsre) welcome() {
|
||||
self.re.Run(`
|
||||
(function () {
|
||||
console.log('instance: ' + web3.version.node);
|
||||
console.log("coinbase: " + eth.coinbase);
|
||||
var ts = 1000 * eth.getBlock(eth.blockNumber).timestamp;
|
||||
console.log("at block: " + eth.blockNumber + " (" + new Date(ts) + ")");
|
||||
console.log(' datadir: ' + admin.datadir);
|
||||
})();
|
||||
`)
|
||||
if modules, err := self.supportedApis(); err == nil {
|
||||
loadedModules := make([]string, 0)
|
||||
for api, version := range modules {
|
||||
loadedModules = append(loadedModules, fmt.Sprintf("%s:%s", api, version))
|
||||
}
|
||||
sort.Strings(loadedModules)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *jsre) supportedApis() (map[string]string, error) {
|
||||
return self.client.SupportedModules()
|
||||
}
|
||||
|
||||
func (js *jsre) apiBindings() error {
|
||||
apis, err := js.supportedApis()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiNames := make([]string, 0, len(apis))
|
||||
for a, _ := range apis {
|
||||
apiNames = append(apiNames, a)
|
||||
}
|
||||
|
||||
jeth := utils.NewJeth(js.re, js.client)
|
||||
js.re.Set("jeth", struct{}{})
|
||||
t, _ := js.re.Get("jeth")
|
||||
jethObj := t.Object()
|
||||
|
||||
jethObj.Set("send", jeth.Send)
|
||||
jethObj.Set("sendAsync", jeth.Send)
|
||||
|
||||
err = js.re.Compile("bignumber.js", re.BigNumber_JS)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error loading bignumber.js: %v", err)
|
||||
}
|
||||
|
||||
err = js.re.Compile("web3.js", re.Web3_JS)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error loading web3.js: %v", err)
|
||||
}
|
||||
|
||||
_, err = js.re.Run("var Web3 = require('web3');")
|
||||
if err != nil {
|
||||
utils.Fatalf("Error requiring web3: %v", err)
|
||||
}
|
||||
|
||||
_, err = js.re.Run("var web3 = new Web3(jeth);")
|
||||
if err != nil {
|
||||
utils.Fatalf("Error setting web3 provider: %v", err)
|
||||
}
|
||||
|
||||
// load only supported API's in javascript runtime
|
||||
shortcuts := "var eth = web3.eth; var personal = web3.personal; "
|
||||
for _, apiName := range apiNames {
|
||||
if apiName == "web3" || apiName == "rpc" {
|
||||
continue // manually mapped or ignore
|
||||
}
|
||||
|
||||
if jsFile, ok := web3ext.Modules[apiName]; ok {
|
||||
if err = js.re.Compile(fmt.Sprintf("%s.js", apiName), jsFile); err == nil {
|
||||
shortcuts += fmt.Sprintf("var %s = web3.%s; ", apiName, apiName)
|
||||
} else {
|
||||
utils.Fatalf("Error loading %s.js: %v", apiName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = js.re.Run(shortcuts)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error setting namespaces: %v", err)
|
||||
}
|
||||
|
||||
js.re.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
|
||||
|
||||
// overrule some of the methods that require password as input and ask for it interactively
|
||||
p, err := js.re.Get("personal")
|
||||
if err != nil {
|
||||
fmt.Println("Unable to overrule sensitive methods in personal module")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Override the unlockAccount and newAccount methods on the personal object since these require user interaction.
|
||||
// Assign the jeth.unlockAccount and jeth.newAccount in the jsre the original web3 callbacks. These will be called
|
||||
// by the jeth.* methods after they got the password from the user and send the original web3 request to the backend.
|
||||
if persObj := p.Object(); persObj != nil { // make sure the personal api is enabled over the interface
|
||||
js.re.Run(`jeth.unlockAccount = personal.unlockAccount;`)
|
||||
persObj.Set("unlockAccount", jeth.UnlockAccount)
|
||||
js.re.Run(`jeth.newAccount = personal.newAccount;`)
|
||||
persObj.Set("newAccount", jeth.NewAccount)
|
||||
}
|
||||
|
||||
// The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer.
|
||||
// Bind these if the admin module is available.
|
||||
if a, err := js.re.Get("admin"); err == nil {
|
||||
if adminObj := a.Object(); adminObj != nil {
|
||||
adminObj.Set("sleepBlocks", jeth.SleepBlocks)
|
||||
adminObj.Set("sleep", jeth.Sleep)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *jsre) AskPassword() (string, bool) {
|
||||
pass, err := utils.Stdin.PasswordPrompt("Passphrase: ")
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
return pass, true
|
||||
}
|
||||
|
||||
func (self *jsre) ConfirmTransaction(tx string) bool {
|
||||
// Retrieve the Ethereum instance from the node
|
||||
var ethereum *eth.Ethereum
|
||||
if err := self.stack.Service(ðereum); err != nil {
|
||||
return false
|
||||
}
|
||||
// If natspec is enabled, ask for permission
|
||||
if ethereum.NatSpec && false /* disabled for now */ {
|
||||
// notice := natspec.GetNotice(self.xeth, tx, ethereum.HTTPClient())
|
||||
// fmt.Println(notice)
|
||||
// answer, _ := self.Prompt("Confirm Transaction [y/n]")
|
||||
// return strings.HasPrefix(strings.Trim(answer, " "), "y")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (self *jsre) UnlockAccount(addr []byte) bool {
|
||||
fmt.Printf("Please unlock account %x.\n", addr)
|
||||
pass, err := utils.Stdin.PasswordPrompt("Passphrase: ")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// TODO: allow retry
|
||||
var ethereum *eth.Ethereum
|
||||
if err := self.stack.Service(ðereum); err != nil {
|
||||
return false
|
||||
}
|
||||
a := accounts.Account{Address: common.BytesToAddress(addr)}
|
||||
if err := ethereum.AccountManager().Unlock(a, pass); err != nil {
|
||||
return false
|
||||
} else {
|
||||
fmt.Println("Account is now unlocked for this session.")
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// preloadJSFiles loads JS files that the user has specified with ctx.PreLoadJSFlag into
|
||||
// the JSRE. If not all files could be loaded it will return an error describing the error.
|
||||
func (self *jsre) preloadJSFiles(ctx *cli.Context) error {
|
||||
if ctx.GlobalString(utils.PreLoadJSFlag.Name) != "" {
|
||||
assetPath := ctx.GlobalString(utils.JSpathFlag.Name)
|
||||
jsFiles := strings.Split(ctx.GlobalString(utils.PreLoadJSFlag.Name), ",")
|
||||
for _, file := range jsFiles {
|
||||
filename := common.AbsolutePath(assetPath, strings.TrimSpace(file))
|
||||
if err := self.re.Exec(filename); err != nil {
|
||||
return fmt.Errorf("%s: %v", file, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// exec executes the JS file with the given filename and stops the JSRE
|
||||
func (self *jsre) exec(filename string) error {
|
||||
if err := self.re.Exec(filename); err != nil {
|
||||
self.re.Stop(false)
|
||||
return fmt.Errorf("Javascript Error: %v", err)
|
||||
}
|
||||
self.re.Stop(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *jsre) interactive() {
|
||||
// Read input lines.
|
||||
prompt := make(chan string)
|
||||
inputln := make(chan string)
|
||||
go func() {
|
||||
defer close(inputln)
|
||||
for {
|
||||
line, err := utils.Stdin.Prompt(<-prompt)
|
||||
if err != nil {
|
||||
if err == liner.ErrPromptAborted { // ctrl-C
|
||||
self.resetPrompt()
|
||||
inputln <- ""
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
inputln <- line
|
||||
}
|
||||
}()
|
||||
// Wait for Ctrl-C, too.
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, os.Interrupt)
|
||||
|
||||
defer func() {
|
||||
if self.atexit != nil {
|
||||
self.atexit()
|
||||
}
|
||||
self.re.Stop(false)
|
||||
}()
|
||||
for {
|
||||
prompt <- self.ps1
|
||||
select {
|
||||
case <-sig:
|
||||
fmt.Println("caught interrupt, exiting")
|
||||
return
|
||||
case input, ok := <-inputln:
|
||||
if !ok || indentCount <= 0 && exit.MatchString(input) {
|
||||
return
|
||||
}
|
||||
if onlyws.MatchString(input) {
|
||||
continue
|
||||
}
|
||||
str += input + "\n"
|
||||
self.setIndent()
|
||||
if indentCount <= 0 {
|
||||
if mustLogInHistory(str) {
|
||||
utils.Stdin.AppendHistory(str[:len(str)-1])
|
||||
}
|
||||
self.parseInput(str)
|
||||
str = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustLogInHistory(input string) bool {
|
||||
return len(input) == 0 ||
|
||||
passwordRegexp.MatchString(input) ||
|
||||
!leadingSpace.MatchString(input)
|
||||
}
|
||||
|
||||
func (self *jsre) withHistory(datadir string, op func(*os.File)) {
|
||||
hist, err := os.OpenFile(filepath.Join(datadir, "history"), os.O_RDWR|os.O_CREATE, os.ModePerm)
|
||||
if err != nil {
|
||||
fmt.Printf("unable to open history file: %v\n", err)
|
||||
return
|
||||
}
|
||||
op(hist)
|
||||
hist.Close()
|
||||
}
|
||||
|
||||
func (self *jsre) parseInput(code string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Println("[native] error", r)
|
||||
}
|
||||
}()
|
||||
if err := self.re.EvalAndPrettyPrint(code); err != nil {
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
fmt.Println(ottoErr.String())
|
||||
} else {
|
||||
fmt.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var indentCount = 0
|
||||
var str = ""
|
||||
|
||||
func (self *jsre) resetPrompt() {
|
||||
indentCount = 0
|
||||
str = ""
|
||||
self.ps1 = "> "
|
||||
}
|
||||
|
||||
func (self *jsre) setIndent() {
|
||||
open := strings.Count(str, "{")
|
||||
open += strings.Count(str, "(")
|
||||
closed := strings.Count(str, "}")
|
||||
closed += strings.Count(str, ")")
|
||||
indentCount = open - closed
|
||||
if indentCount <= 0 {
|
||||
self.ps1 = "> "
|
||||
} else {
|
||||
self.ps1 = strings.Join(make([]string, indentCount*2), "..")
|
||||
self.ps1 += " "
|
||||
}
|
||||
}
|
@ -1,500 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/compiler"
|
||||
"github.com/ethereum/go-ethereum/common/httpclient"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
)
|
||||
|
||||
const (
|
||||
testSolcPath = ""
|
||||
solcVersion = "0.9.23"
|
||||
testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
testBalance = "10000000000000000000"
|
||||
// of empty string
|
||||
testHash = "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
)
|
||||
|
||||
var (
|
||||
versionRE = regexp.MustCompile(strconv.Quote(`"compilerVersion":"` + solcVersion + `"`))
|
||||
testNodeKey, _ = crypto.HexToECDSA("4b50fa71f5c3eeb8fdc452224b2395af2fcc3d125e06c32c82e048c0559db03f")
|
||||
testAccount, _ = crypto.HexToECDSA("e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674")
|
||||
testGenesis = `{"` + testAddress[2:] + `": {"balance": "` + testBalance + `"}}`
|
||||
)
|
||||
|
||||
type testjethre struct {
|
||||
*jsre
|
||||
lastConfirm string
|
||||
client *httpclient.HTTPClient
|
||||
}
|
||||
|
||||
// Temporary disabled while natspec hasn't been migrated
|
||||
//func (self *testjethre) ConfirmTransaction(tx string) bool {
|
||||
// var ethereum *eth.Ethereum
|
||||
// self.stack.Service(ðereum)
|
||||
//
|
||||
// if ethereum.NatSpec {
|
||||
// self.lastConfirm = natspec.GetNotice(self.xeth, tx, self.client)
|
||||
// }
|
||||
// return true
|
||||
//}
|
||||
|
||||
func testJEthRE(t *testing.T) (string, *testjethre, *node.Node) {
|
||||
return testREPL(t, nil)
|
||||
}
|
||||
|
||||
func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *node.Node) {
|
||||
tmp, err := ioutil.TempDir("", "geth-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Create a networkless protocol stack
|
||||
stack, err := node.New(&node.Config{DataDir: tmp, PrivateKey: testNodeKey, Name: "test", NoDiscovery: true})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node: %v", err)
|
||||
}
|
||||
// Initialize and register the Ethereum protocol
|
||||
accman := accounts.NewPlaintextManager(filepath.Join(tmp, "keystore"))
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{
|
||||
Address: common.HexToAddress(testAddress),
|
||||
Balance: common.String2Big(testBalance),
|
||||
})
|
||||
ethConf := ð.Config{
|
||||
ChainConfig: &core.ChainConfig{HomesteadBlock: new(big.Int)},
|
||||
TestGenesisState: db,
|
||||
AccountManager: accman,
|
||||
DocRoot: "/",
|
||||
SolcPath: testSolcPath,
|
||||
PowTest: true,
|
||||
}
|
||||
if config != nil {
|
||||
config(ethConf)
|
||||
}
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return eth.New(ctx, ethConf)
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to register ethereum protocol: %v", err)
|
||||
}
|
||||
// Initialize all the keys for testing
|
||||
a, err := accman.ImportECDSA(testAccount, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := accman.Unlock(a, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Start the node and assemble the REPL tester
|
||||
if err := stack.Start(); err != nil {
|
||||
t.Fatalf("failed to start test stack: %v", err)
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
stack.Service(ðereum)
|
||||
|
||||
assetPath := filepath.Join(os.Getenv("GOPATH"), "src", "github.com", "ethereum", "go-ethereum", "cmd", "mist", "assets", "ext")
|
||||
client, err := stack.Attach()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach to node: %v", err)
|
||||
}
|
||||
tf := &testjethre{client: ethereum.HTTPClient()}
|
||||
repl := newJSRE(stack, assetPath, "", client, false)
|
||||
tf.jsre = repl
|
||||
return tmp, tf, stack
|
||||
}
|
||||
|
||||
func TestNodeInfo(t *testing.T) {
|
||||
t.Skip("broken after p2p update")
|
||||
tmp, repl, ethereum := testJEthRE(t)
|
||||
defer ethereum.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
want := `{"DiscPort":0,"IP":"0.0.0.0","ListenAddr":"","Name":"test","NodeID":"4cb2fc32924e94277bf94b5e4c983beedb2eabd5a0bc941db32202735c6625d020ca14a5963d1738af43b6ac0a711d61b1a06de931a499fe2aa0b1a132a902b5","NodeUrl":"enode://4cb2fc32924e94277bf94b5e4c983beedb2eabd5a0bc941db32202735c6625d020ca14a5963d1738af43b6ac0a711d61b1a06de931a499fe2aa0b1a132a902b5@0.0.0.0:0","TCPPort":0,"Td":"131072"}`
|
||||
checkEvalJSON(t, repl, `admin.nodeInfo`, want)
|
||||
}
|
||||
|
||||
func TestAccounts(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
checkEvalJSON(t, repl, `eth.accounts`, `["`+testAddress+`"]`)
|
||||
checkEvalJSON(t, repl, `eth.coinbase`, `"`+testAddress+`"`)
|
||||
val, err := repl.re.Run(`jeth.newAccount("password")`)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
}
|
||||
addr := val.String()
|
||||
if !regexp.MustCompile(`0x[0-9a-f]{40}`).MatchString(addr) {
|
||||
t.Errorf("address not hex: %q", addr)
|
||||
}
|
||||
|
||||
checkEvalJSON(t, repl, `eth.accounts`, `["`+testAddress+`","`+addr+`"]`)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockChain(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
// get current block dump before export/import.
|
||||
val, err := repl.re.Run("JSON.stringify(debug.dumpBlock(eth.blockNumber))")
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
}
|
||||
beforeExport := val.String()
|
||||
|
||||
// do the export
|
||||
extmp, err := ioutil.TempDir("", "geth-test-export")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(extmp)
|
||||
tmpfile := filepath.Join(extmp, "export.chain")
|
||||
tmpfileq := strconv.Quote(tmpfile)
|
||||
|
||||
var ethereum *eth.Ethereum
|
||||
node.Service(ðereum)
|
||||
ethereum.BlockChain().Reset()
|
||||
|
||||
checkEvalJSON(t, repl, `admin.exportChain(`+tmpfileq+`)`, `true`)
|
||||
if _, err := os.Stat(tmpfile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check import, verify that dumpBlock gives the same result.
|
||||
checkEvalJSON(t, repl, `admin.importChain(`+tmpfileq+`)`, `true`)
|
||||
checkEvalJSON(t, repl, `debug.dumpBlock(eth.blockNumber)`, beforeExport)
|
||||
}
|
||||
|
||||
func TestMining(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
checkEvalJSON(t, repl, `eth.mining`, `false`)
|
||||
}
|
||||
|
||||
func TestRPC(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
checkEvalJSON(t, repl, `admin.startRPC("127.0.0.1", 5004, "*", "web3,eth,net")`, `true`)
|
||||
}
|
||||
|
||||
func TestCheckTestAccountBalance(t *testing.T) {
|
||||
t.Skip() // i don't think it tests the correct behaviour here. it's actually testing
|
||||
// internals which shouldn't be tested. This now fails because of a change in the core
|
||||
// and i have no means to fix this, sorry - @obscuren
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
repl.re.Run(`primary = "` + testAddress + `"`)
|
||||
checkEvalJSON(t, repl, `eth.getBalance(primary)`, `"`+testBalance+`"`)
|
||||
}
|
||||
|
||||
func TestSignature(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
val, err := repl.re.Run(`eth.sign("` + testAddress + `", "` + testHash + `")`)
|
||||
|
||||
// This is a very preliminary test, lacking actual signature verification
|
||||
if err != nil {
|
||||
t.Errorf("Error running js: %v", err)
|
||||
return
|
||||
}
|
||||
output := val.String()
|
||||
t.Logf("Output: %v", output)
|
||||
|
||||
regex := regexp.MustCompile(`^0x[0-9a-f]{130}$`)
|
||||
if !regex.MatchString(output) {
|
||||
t.Errorf("Signature is not 65 bytes represented in hexadecimal.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestContract(t *testing.T) {
|
||||
t.Skip("contract testing is implemented with mining in ethash test mode. This takes about 7seconds to run. Unskip and run on demand")
|
||||
coinbase := common.HexToAddress(testAddress)
|
||||
tmp, repl, ethereum := testREPL(t, func(conf *eth.Config) {
|
||||
conf.Etherbase = coinbase
|
||||
conf.PowTest = true
|
||||
})
|
||||
if err := ethereum.Start(); err != nil {
|
||||
t.Errorf("error starting ethereum: %v", err)
|
||||
return
|
||||
}
|
||||
defer ethereum.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
// Temporary disabled while registrar isn't migrated
|
||||
//reg := registrar.New(repl.xeth)
|
||||
//_, err := reg.SetGlobalRegistrar("", coinbase)
|
||||
//if err != nil {
|
||||
// t.Errorf("error setting HashReg: %v", err)
|
||||
//}
|
||||
//_, err = reg.SetHashReg("", coinbase)
|
||||
//if err != nil {
|
||||
// t.Errorf("error setting HashReg: %v", err)
|
||||
//}
|
||||
//_, err = reg.SetUrlHint("", coinbase)
|
||||
//if err != nil {
|
||||
// t.Errorf("error setting HashReg: %v", err)
|
||||
//}
|
||||
/* TODO:
|
||||
* lookup receipt and contract addresses by tx hash
|
||||
* name registration for HashReg and UrlHint addresses
|
||||
* mine those transactions
|
||||
* then set once more SetHashReg SetUrlHint
|
||||
*/
|
||||
|
||||
source := `contract test {\n` +
|
||||
" /// @notice Will multiply `a` by 7." + `\n` +
|
||||
` function multiply(uint a) returns(uint d) {\n` +
|
||||
` return a * 7;\n` +
|
||||
` }\n` +
|
||||
`}\n`
|
||||
|
||||
if checkEvalJSON(t, repl, `admin.stopNatSpec()`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
contractInfo, err := ioutil.ReadFile("info_test.json")
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
if checkEvalJSON(t, repl, `primary = eth.accounts[0]`, `"`+testAddress+`"`) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `source = "`+source+`"`, `"`+source+`"`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// if solc is found with right version, test it, otherwise read from file
|
||||
sol, err := compiler.New("")
|
||||
if err != nil {
|
||||
t.Logf("solc not found: mocking contract compilation step")
|
||||
} else if sol.Version() != solcVersion {
|
||||
t.Logf("WARNING: solc different version found (%v, test written for %v, may need to update)", sol.Version(), solcVersion)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
info, err := ioutil.ReadFile("info_test.json")
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
_, err = repl.re.Run(`contract = JSON.parse(` + strconv.Quote(string(info)) + `)`)
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
} else {
|
||||
if checkEvalJSON(t, repl, `contract = eth.compile.solidity(source).test`, string(contractInfo)) != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if checkEvalJSON(t, repl, `contract.code`, `"0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if checkEvalJSON(
|
||||
t, repl,
|
||||
`contractaddress = eth.sendTransaction({from: primary, data: contract.code})`,
|
||||
`"0x46d69d55c3c4b86a924a92c9fc4720bb7bce1d74"`,
|
||||
) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !processTxs(repl, t, 8) {
|
||||
return
|
||||
}
|
||||
|
||||
callSetup := `abiDef = JSON.parse('[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}]');
|
||||
Multiply7 = eth.contract(abiDef);
|
||||
multiply7 = Multiply7.at(contractaddress);
|
||||
`
|
||||
_, err = repl.re.Run(callSetup)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error setting up contract, got %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
expNotice := ""
|
||||
if repl.lastConfirm != expNotice {
|
||||
t.Errorf("incorrect confirmation message: expected %v, got %v", expNotice, repl.lastConfirm)
|
||||
return
|
||||
}
|
||||
|
||||
if checkEvalJSON(t, repl, `admin.startNatSpec()`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary })`, `"0x4ef9088431a8033e4580d00e4eb2487275e031ff4163c7529df0ef45af17857b"`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !processTxs(repl, t, 1) {
|
||||
return
|
||||
}
|
||||
|
||||
expNotice = `About to submit transaction (no NatSpec info found for contract: content hash not found for '0x87e2802265838c7f14bb69eecd2112911af6767907a702eeaa445239fb20711b'): {"params":[{"to":"0x46d69d55c3c4b86a924a92c9fc4720bb7bce1d74","data": "0xc6888fa10000000000000000000000000000000000000000000000000000000000000006"}]}`
|
||||
if repl.lastConfirm != expNotice {
|
||||
t.Errorf("incorrect confirmation message: expected\n%v, got\n%v", expNotice, repl.lastConfirm)
|
||||
return
|
||||
}
|
||||
|
||||
var contentHash = `"0x86d2b7cf1e72e9a7a3f8d96601f0151742a2f780f1526414304fbe413dc7f9bd"`
|
||||
if sol != nil && solcVersion != sol.Version() {
|
||||
modContractInfo := versionRE.ReplaceAll(contractInfo, []byte(`"compilerVersion":"`+sol.Version()+`"`))
|
||||
fmt.Printf("modified contractinfo:\n%s\n", modContractInfo)
|
||||
contentHash = `"` + common.ToHex(crypto.Keccak256([]byte(modContractInfo))) + `"`
|
||||
}
|
||||
if checkEvalJSON(t, repl, `filename = "/tmp/info.json"`, `"/tmp/info.json"`) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `contentHash = admin.saveInfo(contract.info, filename)`, contentHash) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `admin.register(primary, contractaddress, contentHash)`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `admin.registerUrl(primary, contentHash, "file://"+filename)`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if checkEvalJSON(t, repl, `admin.startNatSpec()`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !processTxs(repl, t, 3) {
|
||||
return
|
||||
}
|
||||
|
||||
if checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary })`, `"0x66d7635c12ad0b231e66da2f987ca3dfdca58ffe49c6442aa55960858103fd0c"`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !processTxs(repl, t, 1) {
|
||||
return
|
||||
}
|
||||
|
||||
expNotice = "Will multiply 6 by 7."
|
||||
if repl.lastConfirm != expNotice {
|
||||
t.Errorf("incorrect confirmation message: expected\n%v, got\n%v", expNotice, repl.lastConfirm)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func pendingTransactions(repl *testjethre, t *testing.T) (txc int64, err error) {
|
||||
var ethereum *eth.Ethereum
|
||||
repl.stack.Service(ðereum)
|
||||
|
||||
txs := ethereum.TxPool().GetTransactions()
|
||||
return int64(len(txs)), nil
|
||||
}
|
||||
|
||||
func processTxs(repl *testjethre, t *testing.T, expTxc int) bool {
|
||||
var txc int64
|
||||
var err error
|
||||
for i := 0; i < 50; i++ {
|
||||
txc, err = pendingTransactions(repl, t)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error checking pending transactions: %v", err)
|
||||
return false
|
||||
}
|
||||
if expTxc < int(txc) {
|
||||
t.Errorf("too many pending transactions: expected %v, got %v", expTxc, txc)
|
||||
return false
|
||||
} else if expTxc == int(txc) {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if int(txc) != expTxc {
|
||||
t.Errorf("incorrect number of pending transactions, expected %v, got %v", expTxc, txc)
|
||||
return false
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
repl.stack.Service(ðereum)
|
||||
|
||||
err = ethereum.StartMining(runtime.NumCPU(), "")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error mining: %v", err)
|
||||
return false
|
||||
}
|
||||
defer ethereum.StopMining()
|
||||
|
||||
timer := time.NewTimer(100 * time.Second)
|
||||
blockNr := ethereum.BlockChain().CurrentBlock().Number()
|
||||
height := new(big.Int).Add(blockNr, big.NewInt(1))
|
||||
repl.wait <- height
|
||||
select {
|
||||
case <-timer.C:
|
||||
// if times out make sure the xeth loop does not block
|
||||
go func() {
|
||||
select {
|
||||
case repl.wait <- nil:
|
||||
case <-repl.wait:
|
||||
}
|
||||
}()
|
||||
case <-repl.wait:
|
||||
}
|
||||
txc, err = pendingTransactions(repl, t)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error checking pending transactions: %v", err)
|
||||
return false
|
||||
}
|
||||
if txc != 0 {
|
||||
t.Errorf("%d trasactions were not mined", txc)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkEvalJSON(t *testing.T, re *testjethre, expr, want string) error {
|
||||
val, err := re.re.Run("JSON.stringify(" + expr + ")")
|
||||
if err == nil && val.String() != want {
|
||||
err = fmt.Errorf("Output mismatch for `%s`:\ngot: %s\nwant: %s", expr, val.String(), want)
|
||||
}
|
||||
if err != nil {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
file = filepath.Base(file)
|
||||
fmt.Printf("\t%s:%d: %v\n", file, line, err)
|
||||
t.Fail()
|
||||
}
|
||||
return err
|
||||
}
|
186
cmd/geth/main.go
186
cmd/geth/main.go
@ -18,6 +18,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"github.com/ethereum/ethash"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
@ -40,31 +42,48 @@ import (
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/release"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
ClientIdentifier = "Geth"
|
||||
Version = "1.4.0-rc"
|
||||
VersionMajor = 1
|
||||
VersionMinor = 4
|
||||
VersionPatch = 0
|
||||
clientIdentifier = "Geth" // Client identifier to advertise over the network
|
||||
versionMajor = 1 // Major version component of the current release
|
||||
versionMinor = 4 // Minor version component of the current release
|
||||
versionPatch = 6 // Patch version component of the current release
|
||||
versionMeta = "stable" // Version metadata to append to the version string
|
||||
|
||||
versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle
|
||||
)
|
||||
|
||||
var (
|
||||
gitCommit string // set via linker flagg
|
||||
nodeNameVersion string
|
||||
app *cli.App
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
verString string // Combined textual representation of all the version components
|
||||
relConfig release.Config // Structured version information and release oracle config
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
if gitCommit == "" {
|
||||
nodeNameVersion = Version
|
||||
} else {
|
||||
nodeNameVersion = Version + "-" + gitCommit[:8]
|
||||
// Construct the textual version string from the individual components
|
||||
verString = fmt.Sprintf("%d.%d.%d", versionMajor, versionMinor, versionPatch)
|
||||
if versionMeta != "" {
|
||||
verString += "-" + versionMeta
|
||||
}
|
||||
if gitCommit != "" {
|
||||
verString += "-" + gitCommit[:8]
|
||||
}
|
||||
// Construct the version release oracle configuration
|
||||
relConfig.Oracle = common.HexToAddress(versionOracle)
|
||||
|
||||
app = utils.NewApp(Version, "the go-ethereum command line interface")
|
||||
relConfig.Major = uint32(versionMajor)
|
||||
relConfig.Minor = uint32(versionMinor)
|
||||
relConfig.Patch = uint32(versionPatch)
|
||||
|
||||
commit, _ := hex.DecodeString(gitCommit)
|
||||
copy(relConfig.Commit[:], commit)
|
||||
|
||||
// Initialize the CLI app and start Geth
|
||||
app = utils.NewApp(verString, "the go-ethereum command line interface")
|
||||
app.Action = geth
|
||||
app.HideVersion = true // we have a command to print the version
|
||||
app.Commands = []cli.Command{
|
||||
@ -76,6 +95,9 @@ func init() {
|
||||
monitorCommand,
|
||||
accountCommand,
|
||||
walletCommand,
|
||||
consoleCommand,
|
||||
attachCommand,
|
||||
javascriptCommand,
|
||||
{
|
||||
Action: makedag,
|
||||
Name: "makedag",
|
||||
@ -119,36 +141,6 @@ The output of this command is supposed to be machine-readable.
|
||||
The init command initialises a new genesis block and definition for the network.
|
||||
This is a destructive action and changes the network in which you will be
|
||||
participating.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: console,
|
||||
Name: "console",
|
||||
Usage: `Geth Console: interactive JavaScript environment`,
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: attach,
|
||||
Name: "attach",
|
||||
Usage: `Geth Console: interactive JavaScript environment (connect to node)`,
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.
|
||||
This command allows to open a console on a running geth node.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: execScripts,
|
||||
Name: "js",
|
||||
Usage: `executes the given JavaScript files in the Geth JavaScript VM`,
|
||||
Description: `
|
||||
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
},
|
||||
}
|
||||
@ -195,7 +187,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
utils.IPCApiFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreLoadJSFlag,
|
||||
utils.PreloadJSFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.TestNetFlag,
|
||||
@ -205,6 +197,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.SolcPathFlag,
|
||||
utils.GpoMinGasPriceFlag,
|
||||
utils.GpoMaxGasPriceFlag,
|
||||
@ -224,6 +217,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
|
||||
// This should be the only place where reporting is enabled
|
||||
// because it is not intended to run while testing.
|
||||
// In addition to this check, bad block reports are sent only
|
||||
// for chains with the main network genesis block and network id 1.
|
||||
eth.EnableBadBlockReporting = true
|
||||
|
||||
utils.SetupNetwork(ctx)
|
||||
|
||||
// Deprecation warning.
|
||||
@ -237,7 +236,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
logger.Flush()
|
||||
debug.Exit()
|
||||
utils.Stdin.Close() // Resets terminal mode.
|
||||
console.Stdin.Close() // Resets terminal mode.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -255,7 +254,7 @@ func makeDefaultExtra() []byte {
|
||||
Name string
|
||||
GoVersion string
|
||||
Os string
|
||||
}{uint(VersionMajor<<16 | VersionMinor<<8 | VersionPatch), ClientIdentifier, runtime.Version(), runtime.GOOS}
|
||||
}{uint(versionMajor<<16 | versionMinor<<8 | versionPatch), clientIdentifier, runtime.Version(), runtime.GOOS}
|
||||
extra, err := rlp.EncodeToBytes(clientInfo)
|
||||
if err != nil {
|
||||
glog.V(logger.Warn).Infoln("error setting canonical miner information:", err)
|
||||
@ -273,41 +272,11 @@ func makeDefaultExtra() []byte {
|
||||
// It creates a default node based on the command line arguments and runs it in
|
||||
// blocking mode, waiting for it to be shut down.
|
||||
func geth(ctx *cli.Context) {
|
||||
node := utils.MakeSystemNode(ClientIdentifier, nodeNameVersion, makeDefaultExtra(), ctx)
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
node.Wait()
|
||||
}
|
||||
|
||||
// attach will connect to a running geth instance attaching a JavaScript console and to it.
|
||||
func attach(ctx *cli.Context) {
|
||||
// attach to a running geth instance
|
||||
client, err := utils.NewRemoteRPCClient(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("Unable to attach to geth: %v", err)
|
||||
}
|
||||
|
||||
repl := newLightweightJSRE(
|
||||
ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
client,
|
||||
ctx.GlobalString(utils.DataDirFlag.Name),
|
||||
true,
|
||||
)
|
||||
|
||||
// preload user defined JS files into the console
|
||||
err = repl.preloadJSFiles(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to preload JS file %v", err)
|
||||
}
|
||||
|
||||
// in case the exec flag holds a JS statement execute it and return
|
||||
if ctx.GlobalString(utils.ExecFlag.Name) != "" {
|
||||
repl.batch(ctx.GlobalString(utils.ExecFlag.Name))
|
||||
} else {
|
||||
repl.welcome()
|
||||
repl.interactive()
|
||||
}
|
||||
}
|
||||
|
||||
// initGenesis will initialise the given JSON format genesis file and writes it as
|
||||
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
||||
func initGenesis(ctx *cli.Context) {
|
||||
@ -333,62 +302,6 @@ func initGenesis(ctx *cli.Context) {
|
||||
glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash())
|
||||
}
|
||||
|
||||
// console starts a new geth node, attaching a JavaScript console to it at the
|
||||
// same time.
|
||||
func console(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(ClientIdentifier, nodeNameVersion, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
|
||||
// Attach to the newly started node, and either execute script or become interactive
|
||||
client, err := node.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
repl := newJSRE(node,
|
||||
ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
||||
client, true)
|
||||
|
||||
// preload user defined JS files into the console
|
||||
err = repl.preloadJSFiles(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to preload JS file %v", err)
|
||||
}
|
||||
|
||||
// in case the exec flag holds a JS statement execute it and return
|
||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||
repl.batch(script)
|
||||
} else {
|
||||
repl.welcome()
|
||||
repl.interactive()
|
||||
}
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
// execScripts starts a new geth node based on the CLI flags, and executes each
|
||||
// of the JavaScript files specified as command arguments.
|
||||
func execScripts(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(ClientIdentifier, nodeNameVersion, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
|
||||
// Attach to the newly started node and execute the given scripts
|
||||
client, err := node.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
repl := newJSRE(node,
|
||||
ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
||||
client, false)
|
||||
|
||||
for _, file := range ctx.Args() {
|
||||
repl.exec(file)
|
||||
}
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
// startNode boots up the system node and all registered protocols, after which
|
||||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||
// miner.
|
||||
@ -471,11 +384,8 @@ func gpubench(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
func version(c *cli.Context) {
|
||||
fmt.Println(ClientIdentifier)
|
||||
fmt.Println("Version:", Version)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
fmt.Println(clientIdentifier)
|
||||
fmt.Println("Version:", verString)
|
||||
fmt.Println("Protocol Versions:", eth.ProtocolVersions)
|
||||
fmt.Println("Network Id:", c.GlobalInt(utils.NetworkIdFlag.Name))
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -28,6 +27,7 @@ import (
|
||||
"regexp"
|
||||
"sync"
|
||||
"testing"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -45,6 +45,7 @@ type testgeth struct {
|
||||
// template variables for expect
|
||||
Datadir string
|
||||
Executable string
|
||||
Etherbase string
|
||||
Func template.FuncMap
|
||||
|
||||
removeDatadir bool
|
||||
@ -67,11 +68,15 @@ func init() {
|
||||
func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
tt := &testgeth{T: t, Executable: os.Args[0]}
|
||||
for i, arg := range args {
|
||||
if arg == "-datadir" || arg == "--datadir" {
|
||||
switch {
|
||||
case arg == "-datadir" || arg == "--datadir":
|
||||
if i < len(args)-1 {
|
||||
tt.Datadir = args[i+1]
|
||||
}
|
||||
break
|
||||
case arg == "-etherbase" || arg == "--etherbase":
|
||||
if i < len(args)-1 {
|
||||
tt.Etherbase = args[i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
if tt.Datadir == "" {
|
||||
|
@ -101,7 +101,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.JSpathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreLoadJSFlag,
|
||||
utils.PreloadJSFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -150,8 +150,11 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "LOGGING AND DEBUGGING",
|
||||
Flags: append([]cli.Flag{utils.MetricsEnabledFlag}, debug.Flags...),
|
||||
Name: "LOGGING AND DEBUGGING",
|
||||
Flags: append([]cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
}, debug.Flags...),
|
||||
},
|
||||
{
|
||||
Name: "EXPERIMENTAL",
|
||||
|
@ -73,15 +73,13 @@ func StartNode(stack *node.Node) {
|
||||
<-sigc
|
||||
glog.V(logger.Info).Infoln("Got interrupt, shutting down...")
|
||||
go stack.Stop()
|
||||
logger.Flush()
|
||||
for i := 10; i > 0; i-- {
|
||||
<-sigc
|
||||
if i > 1 {
|
||||
glog.V(logger.Info).Infoln("Already shutting down, please be patient.")
|
||||
glog.V(logger.Info).Infoln("Interrupt", i-1, "more times to induce panic.")
|
||||
glog.V(logger.Info).Infof("Already shutting down, interrupt %d more times for panic.", i-1)
|
||||
}
|
||||
}
|
||||
glog.V(logger.Error).Infof("Force quitting: this might not end so well.")
|
||||
debug.Exit() // ensure trace and CPU profile data is flushed.
|
||||
debug.LoudPanic("boom")
|
||||
}()
|
||||
}
|
||||
|
54
cmd/utils/fdlimit_freebsd.go
Normal file
54
cmd/utils/fdlimit_freebsd.go
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build freebsd
|
||||
|
||||
package utils
|
||||
|
||||
import "syscall"
|
||||
|
||||
// This file is largely identical to fdlimit_unix.go,
|
||||
// but Rlimit fields have type int64 on FreeBSD so it needs
|
||||
// an extra conversion.
|
||||
|
||||
// raiseFdLimit tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
func raiseFdLimit(max uint64) error {
|
||||
// Get the current limit
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
// Try to update the limit to the max allowance
|
||||
limit.Cur = limit.Max
|
||||
if limit.Cur > int64(max) {
|
||||
limit.Cur = int64(max)
|
||||
}
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFdLimit retrieves the number of file descriptors allowed to be opened by this
|
||||
// process.
|
||||
func getFdLimit() (int, error) {
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(limit.Cur), nil
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin
|
||||
// +build linux darwin netbsd openbsd solaris
|
||||
|
||||
package utils
|
||||
|
||||
|
@ -47,6 +47,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
"github.com/ethereum/go-ethereum/release"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/whisper"
|
||||
)
|
||||
@ -228,6 +230,10 @@ var (
|
||||
Name: metrics.MetricsEnabledFlag,
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
}
|
||||
FakePoWFlag = cli.BoolFlag{
|
||||
Name: "fakepow",
|
||||
Usage: "Disables proof-of-work verification",
|
||||
}
|
||||
|
||||
// RPC settings
|
||||
RPCEnabledFlag = cli.BoolFlag{
|
||||
@ -296,7 +302,7 @@ var (
|
||||
Name: "exec",
|
||||
Usage: "Execute JavaScript statement (only in combination with console/attach)",
|
||||
}
|
||||
PreLoadJSFlag = cli.StringFlag{
|
||||
PreloadJSFlag = cli.StringFlag{
|
||||
Name: "preload",
|
||||
Usage: "Comma separated list of JavaScript files to preload into the console",
|
||||
}
|
||||
@ -636,7 +642,7 @@ func MakePasswordList(ctx *cli.Context) []string {
|
||||
|
||||
// MakeSystemNode sets up a local node, configures the services to launch and
|
||||
// assembles the P2P protocol stack.
|
||||
func MakeSystemNode(name, version string, extra []byte, ctx *cli.Context) *node.Node {
|
||||
func MakeSystemNode(name, version string, relconf release.Config, extra []byte, ctx *cli.Context) *node.Node {
|
||||
// Avoid conflicting network flags
|
||||
networks, netFlags := 0, []cli.BoolFlag{DevModeFlag, TestNetFlag, OlympicFlag}
|
||||
for _, flag := range netFlags {
|
||||
@ -767,7 +773,11 @@ func MakeSystemNode(name, version string, extra []byte, ctx *cli.Context) *node.
|
||||
Fatalf("Failed to register the Whisper service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return release.NewReleaseService(ctx, relconf)
|
||||
}); err != nil {
|
||||
Fatalf("Failed to register the Geth release oracle service: %v", err)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
@ -842,13 +852,32 @@ func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database
|
||||
glog.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
chainConfig := MustMakeChainConfigFromDb(ctx, chainDb)
|
||||
|
||||
var eventMux event.TypeMux
|
||||
chain, err = core.NewBlockChain(chainDb, chainConfig, ethash.New(), &eventMux)
|
||||
pow := pow.PoW(core.FakePow{})
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
pow = ethash.New()
|
||||
}
|
||||
chain, err = core.NewBlockChain(chainDb, chainConfig, pow, new(event.TypeMux))
|
||||
if err != nil {
|
||||
Fatalf("Could not start chainmanager: %v", err)
|
||||
}
|
||||
return chain, chainDb
|
||||
}
|
||||
|
||||
// MakeConsolePreloads retrieves the absolute paths for the console JavaScript
|
||||
// scripts to preload before starting.
|
||||
func MakeConsolePreloads(ctx *cli.Context) []string {
|
||||
// Skip preloading if there's nothing to preload
|
||||
if ctx.GlobalString(PreloadJSFlag.Name) == "" {
|
||||
return nil
|
||||
}
|
||||
// Otherwise resolve absolute paths and return them
|
||||
preloads := []string{}
|
||||
|
||||
assets := ctx.GlobalString(JSpathFlag.Name)
|
||||
for _, file := range strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") {
|
||||
preloads = append(preloads, common.AbsolutePath(assets, strings.TrimSpace(file)))
|
||||
}
|
||||
return preloads
|
||||
}
|
||||
|
@ -1,98 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/peterh/liner"
|
||||
)
|
||||
|
||||
// Holds the stdin line reader.
|
||||
// Only this reader may be used for input because it keeps
|
||||
// an internal buffer.
|
||||
var Stdin = newUserInputReader()
|
||||
|
||||
type userInputReader struct {
|
||||
*liner.State
|
||||
warned bool
|
||||
supported bool
|
||||
normalMode liner.ModeApplier
|
||||
rawMode liner.ModeApplier
|
||||
}
|
||||
|
||||
func newUserInputReader() *userInputReader {
|
||||
r := new(userInputReader)
|
||||
// Get the original mode before calling NewLiner.
|
||||
// This is usually regular "cooked" mode where characters echo.
|
||||
normalMode, _ := liner.TerminalMode()
|
||||
// Turn on liner. It switches to raw mode.
|
||||
r.State = liner.NewLiner()
|
||||
rawMode, err := liner.TerminalMode()
|
||||
if err != nil || !liner.TerminalSupported() {
|
||||
r.supported = false
|
||||
} else {
|
||||
r.supported = true
|
||||
r.normalMode = normalMode
|
||||
r.rawMode = rawMode
|
||||
// Switch back to normal mode while we're not prompting.
|
||||
normalMode.ApplyMode()
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *userInputReader) Prompt(prompt string) (string, error) {
|
||||
if r.supported {
|
||||
r.rawMode.ApplyMode()
|
||||
defer r.normalMode.ApplyMode()
|
||||
} else {
|
||||
// liner tries to be smart about printing the prompt
|
||||
// and doesn't print anything if input is redirected.
|
||||
// Un-smart it by printing the prompt always.
|
||||
fmt.Print(prompt)
|
||||
prompt = ""
|
||||
defer fmt.Println()
|
||||
}
|
||||
return r.State.Prompt(prompt)
|
||||
}
|
||||
|
||||
func (r *userInputReader) PasswordPrompt(prompt string) (passwd string, err error) {
|
||||
if r.supported {
|
||||
r.rawMode.ApplyMode()
|
||||
defer r.normalMode.ApplyMode()
|
||||
return r.State.PasswordPrompt(prompt)
|
||||
}
|
||||
if !r.warned {
|
||||
fmt.Println("!! Unsupported terminal, password will be echoed.")
|
||||
r.warned = true
|
||||
}
|
||||
// Just as in Prompt, handle printing the prompt here instead of relying on liner.
|
||||
fmt.Print(prompt)
|
||||
passwd, err = r.State.Prompt("")
|
||||
fmt.Println()
|
||||
return passwd, err
|
||||
}
|
||||
|
||||
func (r *userInputReader) ConfirmPrompt(prompt string) (bool, error) {
|
||||
prompt = prompt + " [y/N] "
|
||||
input, err := r.Prompt(prompt)
|
||||
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
@ -1,301 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/jsre"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
type Jeth struct {
|
||||
re *jsre.JSRE
|
||||
client rpc.Client
|
||||
}
|
||||
|
||||
// NewJeth create a new backend for the JSRE console
|
||||
func NewJeth(re *jsre.JSRE, client rpc.Client) *Jeth {
|
||||
return &Jeth{re, client}
|
||||
}
|
||||
|
||||
// err returns an error object for the given error code and message.
|
||||
func (self *Jeth) err(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) {
|
||||
m := rpc.JSONErrResponse{
|
||||
Version: "2.0",
|
||||
Id: id,
|
||||
Error: rpc.JSONError{
|
||||
Code: code,
|
||||
Message: msg,
|
||||
},
|
||||
}
|
||||
|
||||
errObj, _ := json.Marshal(m.Error)
|
||||
errRes, _ := json.Marshal(m)
|
||||
|
||||
call.Otto.Run("ret_error = " + string(errObj))
|
||||
res, _ := call.Otto.Run("ret_response = " + string(errRes))
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// UnlockAccount asks the user for the password and than executes the jeth.UnlockAccount callback in the jsre.
|
||||
// It will need the public address for the account to unlock as first argument.
|
||||
// The second argument is an optional string with the password. If not given the user is prompted for the password.
|
||||
// The third argument is an optional integer which specifies for how long the account will be unlocked (in seconds).
|
||||
func (self *Jeth) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
var account, passwd otto.Value
|
||||
duration := otto.NullValue()
|
||||
|
||||
if !call.Argument(0).IsString() {
|
||||
fmt.Println("first argument must be the account to unlock")
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
account = call.Argument(0)
|
||||
|
||||
// if password is not given or as null value -> ask user for password
|
||||
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
|
||||
fmt.Printf("Unlock account %s\n", account)
|
||||
if input, err := Stdin.PasswordPrompt("Passphrase: "); err != nil {
|
||||
throwJSExeception(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
}
|
||||
} else {
|
||||
if !call.Argument(1).IsString() {
|
||||
throwJSExeception("password must be a string")
|
||||
}
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
|
||||
// third argument is the duration how long the account must be unlocked.
|
||||
// verify that its a number.
|
||||
if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() {
|
||||
if !call.Argument(2).IsNumber() {
|
||||
throwJSExeception("unlock duration must be a number")
|
||||
}
|
||||
duration = call.Argument(2)
|
||||
}
|
||||
|
||||
// jeth.unlockAccount will send the request to the backend.
|
||||
if val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration); err == nil {
|
||||
return val
|
||||
} else {
|
||||
throwJSExeception(err.Error())
|
||||
}
|
||||
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
// NewAccount asks the user for the password and than executes the jeth.newAccount callback in the jsre
|
||||
func (self *Jeth) NewAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
var passwd string
|
||||
if len(call.ArgumentList) == 0 {
|
||||
var err error
|
||||
passwd, err = Stdin.PasswordPrompt("Passphrase: ")
|
||||
if err != nil {
|
||||
return otto.FalseValue()
|
||||
}
|
||||
passwd2, err := Stdin.PasswordPrompt("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
if passwd != passwd2 {
|
||||
fmt.Println("Passphrases don't match")
|
||||
return otto.FalseValue()
|
||||
}
|
||||
} else if len(call.ArgumentList) == 1 && call.Argument(0).IsString() {
|
||||
passwd, _ = call.Argument(0).ToString()
|
||||
} else {
|
||||
fmt.Println("expected 0 or 1 string argument")
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
ret, err := call.Otto.Call("jeth.newAccount", nil, passwd)
|
||||
if err == nil {
|
||||
return ret
|
||||
}
|
||||
fmt.Println(err)
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
// Send will serialize the first argument, send it to the node and returns the response.
|
||||
func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
// verify we got a batch request (array) or a single request (object)
|
||||
ro := call.Argument(0).Object()
|
||||
if ro == nil || (ro.Class() != "Array" && ro.Class() != "Object") {
|
||||
throwJSExeception("Internal Error: request must be an object or array")
|
||||
}
|
||||
|
||||
// convert otto vm arguments to go values by JSON serialising and parsing.
|
||||
data, err := call.Otto.Call("JSON.stringify", nil, ro)
|
||||
if err != nil {
|
||||
throwJSExeception(err.Error())
|
||||
}
|
||||
|
||||
jsonreq, _ := data.ToString()
|
||||
|
||||
// parse arguments to JSON rpc requests, either to an array (batch) or to a single request.
|
||||
var reqs []rpc.JSONRequest
|
||||
batch := true
|
||||
if err = json.Unmarshal([]byte(jsonreq), &reqs); err != nil {
|
||||
// single request?
|
||||
reqs = make([]rpc.JSONRequest, 1)
|
||||
if err = json.Unmarshal([]byte(jsonreq), &reqs[0]); err != nil {
|
||||
throwJSExeception("invalid request")
|
||||
}
|
||||
batch = false
|
||||
}
|
||||
|
||||
call.Otto.Set("response_len", len(reqs))
|
||||
call.Otto.Run("var ret_response = new Array(response_len);")
|
||||
|
||||
for i, req := range reqs {
|
||||
if err := self.client.Send(&req); err != nil {
|
||||
return self.err(call, -32603, err.Error(), req.Id)
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
if err = self.client.Recv(&result); err != nil {
|
||||
return self.err(call, -32603, err.Error(), req.Id)
|
||||
}
|
||||
|
||||
id, _ := result["id"]
|
||||
jsonver, _ := result["jsonrpc"]
|
||||
|
||||
call.Otto.Set("ret_id", id)
|
||||
call.Otto.Set("ret_jsonrpc", jsonver)
|
||||
call.Otto.Set("response_idx", i)
|
||||
|
||||
// call was successful
|
||||
if res, ok := result["result"]; ok {
|
||||
payload, _ := json.Marshal(res)
|
||||
call.Otto.Set("ret_result", string(payload))
|
||||
response, err = call.Otto.Run(`
|
||||
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) };
|
||||
`)
|
||||
continue
|
||||
}
|
||||
|
||||
// request returned an error
|
||||
if res, ok := result["error"]; ok {
|
||||
payload, _ := json.Marshal(res)
|
||||
call.Otto.Set("ret_result", string(payload))
|
||||
response, err = call.Otto.Run(`
|
||||
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, error: JSON.parse(ret_result) };
|
||||
`)
|
||||
continue
|
||||
}
|
||||
|
||||
return self.err(call, -32603, fmt.Sprintf("Invalid response"), new(int64))
|
||||
}
|
||||
|
||||
if !batch {
|
||||
call.Otto.Run("ret_response = ret_response[0];")
|
||||
}
|
||||
|
||||
// if a callback was given execute it.
|
||||
if call.Argument(1).IsObject() {
|
||||
call.Otto.Set("callback", call.Argument(1))
|
||||
call.Otto.Run(`
|
||||
if (Object.prototype.toString.call(callback) == '[object Function]') {
|
||||
callback(null, ret_response);
|
||||
}
|
||||
`)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// throwJSExeception panics on an otto value, the Otto VM will then throw msg as a javascript error.
|
||||
func throwJSExeception(msg interface{}) otto.Value {
|
||||
p, _ := otto.ToValue(msg)
|
||||
panic(p)
|
||||
}
|
||||
|
||||
// Sleep will halt the console for arg[0] seconds.
|
||||
func (self *Jeth) Sleep(call otto.FunctionCall) (response otto.Value) {
|
||||
if len(call.ArgumentList) >= 1 {
|
||||
if call.Argument(0).IsNumber() {
|
||||
sleep, _ := call.Argument(0).ToInteger()
|
||||
time.Sleep(time.Duration(sleep) * time.Second)
|
||||
return otto.TrueValue()
|
||||
}
|
||||
}
|
||||
return throwJSExeception("usage: sleep(<sleep in seconds>)")
|
||||
}
|
||||
|
||||
// SleepBlocks will wait for a specified number of new blocks or max for a
|
||||
// given of seconds. sleepBlocks(nBlocks[, maxSleep]).
|
||||
func (self *Jeth) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
|
||||
nBlocks := int64(0)
|
||||
maxSleep := int64(9999999999999999) // indefinitely
|
||||
|
||||
nArgs := len(call.ArgumentList)
|
||||
|
||||
if nArgs == 0 {
|
||||
throwJSExeception("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
|
||||
}
|
||||
|
||||
if nArgs >= 1 {
|
||||
if call.Argument(0).IsNumber() {
|
||||
nBlocks, _ = call.Argument(0).ToInteger()
|
||||
} else {
|
||||
throwJSExeception("expected number as first argument")
|
||||
}
|
||||
}
|
||||
|
||||
if nArgs >= 2 {
|
||||
if call.Argument(1).IsNumber() {
|
||||
maxSleep, _ = call.Argument(1).ToInteger()
|
||||
} else {
|
||||
throwJSExeception("expected number as second argument")
|
||||
}
|
||||
}
|
||||
|
||||
// go through the console, this will allow web3 to call the appropriate
|
||||
// callbacks if a delayed response or notification is received.
|
||||
currentBlockNr := func() int64 {
|
||||
result, err := call.Otto.Run("eth.blockNumber")
|
||||
if err != nil {
|
||||
throwJSExeception(err.Error())
|
||||
}
|
||||
blockNr, err := result.ToInteger()
|
||||
if err != nil {
|
||||
throwJSExeception(err.Error())
|
||||
}
|
||||
return blockNr
|
||||
}
|
||||
|
||||
targetBlockNr := currentBlockNr() + nBlocks
|
||||
deadline := time.Now().Add(time.Duration(maxSleep) * time.Second)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
if currentBlockNr() >= targetBlockNr {
|
||||
return otto.TrueValue()
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
return otto.FalseValue()
|
||||
}
|
@ -149,7 +149,6 @@ func (sol *Solidity) Compile(source string) (map[string]*Contract, error) {
|
||||
compilerOptions := strings.Join(params, " ")
|
||||
|
||||
cmd := exec.Command(sol.solcPath, params...)
|
||||
cmd.Dir = wd
|
||||
cmd.Stdin = strings.NewReader(source)
|
||||
cmd.Stderr = stderr
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (a Address) MarshalJSON() ([]byte, error) {
|
||||
// Parse address from raw json data
|
||||
func (a *Address) UnmarshalJSON(data []byte) error {
|
||||
if len(data) > 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
||||
data = data[:len(data)-1][1:]
|
||||
data = data[1 : len(data)-1]
|
||||
}
|
||||
|
||||
if len(data) > 2 && data[0] == '0' && data[1] == 'x' {
|
||||
|
@ -16,7 +16,10 @@
|
||||
|
||||
package common
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBytesConversion(t *testing.T) {
|
||||
bytes := []byte{5}
|
||||
@ -47,7 +50,38 @@ func TestHashJsonValidation(t *testing.T) {
|
||||
}
|
||||
for i, test := range tests {
|
||||
if err := h.UnmarshalJSON(append([]byte(test.Prefix), make([]byte, test.Size)...)); err != test.Error {
|
||||
t.Error(i, "expected", test.Error, "got", err)
|
||||
t.Errorf("test #%d: error mismatch: have %v, want %v", i, err, test.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressUnmarshalJSON(t *testing.T) {
|
||||
var a Address
|
||||
var tests = []struct {
|
||||
Input string
|
||||
ShouldErr bool
|
||||
Output *big.Int
|
||||
}{
|
||||
{"", true, nil},
|
||||
{`""`, true, nil},
|
||||
{`"0x"`, true, nil},
|
||||
{`"0x00"`, true, nil},
|
||||
{`"0xG000000000000000000000000000000000000000"`, true, nil},
|
||||
{`"0x0000000000000000000000000000000000000000"`, false, big.NewInt(0)},
|
||||
{`"0x0000000000000000000000000000000000000010"`, false, big.NewInt(16)},
|
||||
}
|
||||
for i, test := range tests {
|
||||
err := a.UnmarshalJSON([]byte(test.Input))
|
||||
if err != nil && !test.ShouldErr {
|
||||
t.Errorf("test #%d: unexpected error: %v", i, err)
|
||||
}
|
||||
if err == nil {
|
||||
if test.ShouldErr {
|
||||
t.Errorf("test #%d: expected error, got none", i)
|
||||
}
|
||||
if a.Big().Cmp(test.Output) != 0 {
|
||||
t.Errorf("test #%d: address mismatch: have %v, want %v", i, a.Big(), test.Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
317
console/bridge.go
Normal file
317
console/bridge.go
Normal file
@ -0,0 +1,317 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
// bridge is a collection of JavaScript utility methods to bride the .js runtime
|
||||
// environment and the Go RPC connection backing the remote method calls.
|
||||
type bridge struct {
|
||||
client rpc.Client // RPC client to execute Ethereum requests through
|
||||
prompter UserPrompter // Input prompter to allow interactive user feedback
|
||||
printer io.Writer // Output writer to serialize any display strings to
|
||||
}
|
||||
|
||||
// newBridge creates a new JavaScript wrapper around an RPC client.
|
||||
func newBridge(client rpc.Client, prompter UserPrompter, printer io.Writer) *bridge {
|
||||
return &bridge{
|
||||
client: client,
|
||||
prompter: prompter,
|
||||
printer: printer,
|
||||
}
|
||||
}
|
||||
|
||||
// NewAccount is a wrapper around the personal.newAccount RPC method that uses a
|
||||
// non-echoing password prompt to aquire the passphrase and executes the original
|
||||
// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.
|
||||
func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
var (
|
||||
password string
|
||||
confirm string
|
||||
err error
|
||||
)
|
||||
switch {
|
||||
// No password was specified, prompt the user for it
|
||||
case len(call.ArgumentList) == 0:
|
||||
if password, err = b.prompter.PromptPassword("Passphrase: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
if confirm, err = b.prompter.PromptPassword("Repeat passphrase: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
if password != confirm {
|
||||
throwJSException("passphrases don't match!")
|
||||
}
|
||||
|
||||
// A single string password was specified, use that
|
||||
case len(call.ArgumentList) == 1 && call.Argument(0).IsString():
|
||||
password, _ = call.Argument(0).ToString()
|
||||
|
||||
// Otherwise fail with some error
|
||||
default:
|
||||
throwJSException("expected 0 or 1 string argument")
|
||||
}
|
||||
// Password aquired, execute the call and return
|
||||
ret, err := call.Otto.Call("jeth.newAccount", nil, password)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that
|
||||
// uses a non-echoing password prompt to aquire the passphrase and executes the
|
||||
// original RPC method (saved in jeth.unlockAccount) with it to actually execute
|
||||
// the RPC call.
|
||||
func (b *bridge) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
// Make sure we have an account specified to unlock
|
||||
if !call.Argument(0).IsString() {
|
||||
throwJSException("first argument must be the account to unlock")
|
||||
}
|
||||
account := call.Argument(0)
|
||||
|
||||
// If password is not given or is the null value, prompt the user for it
|
||||
var passwd otto.Value
|
||||
|
||||
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
|
||||
fmt.Fprintf(b.printer, "Unlock account %s\n", account)
|
||||
if input, err := b.prompter.PromptPassword("Passphrase: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
}
|
||||
} else {
|
||||
if !call.Argument(1).IsString() {
|
||||
throwJSException("password must be a string")
|
||||
}
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
// Third argument is the duration how long the account must be unlocked.
|
||||
duration := otto.NullValue()
|
||||
if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() {
|
||||
if !call.Argument(2).IsNumber() {
|
||||
throwJSException("unlock duration must be a number")
|
||||
}
|
||||
duration = call.Argument(2)
|
||||
}
|
||||
// Send the request to the backend and return
|
||||
val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// Sleep will block the console for the specified number of seconds.
|
||||
func (b *bridge) Sleep(call otto.FunctionCall) (response otto.Value) {
|
||||
if call.Argument(0).IsNumber() {
|
||||
sleep, _ := call.Argument(0).ToInteger()
|
||||
time.Sleep(time.Duration(sleep) * time.Second)
|
||||
return otto.TrueValue()
|
||||
}
|
||||
return throwJSException("usage: sleep(<number of seconds>)")
|
||||
}
|
||||
|
||||
// SleepBlocks will block the console for a specified number of new blocks optionally
|
||||
// until the given timeout is reached.
|
||||
func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
|
||||
var (
|
||||
blocks = int64(0)
|
||||
sleep = int64(9999999999999999) // indefinitely
|
||||
)
|
||||
// Parse the input parameters for the sleep
|
||||
nArgs := len(call.ArgumentList)
|
||||
if nArgs == 0 {
|
||||
throwJSException("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
|
||||
}
|
||||
if nArgs >= 1 {
|
||||
if call.Argument(0).IsNumber() {
|
||||
blocks, _ = call.Argument(0).ToInteger()
|
||||
} else {
|
||||
throwJSException("expected number as first argument")
|
||||
}
|
||||
}
|
||||
if nArgs >= 2 {
|
||||
if call.Argument(1).IsNumber() {
|
||||
sleep, _ = call.Argument(1).ToInteger()
|
||||
} else {
|
||||
throwJSException("expected number as second argument")
|
||||
}
|
||||
}
|
||||
// go through the console, this will allow web3 to call the appropriate
|
||||
// callbacks if a delayed response or notification is received.
|
||||
blockNumber := func() int64 {
|
||||
result, err := call.Otto.Run("eth.blockNumber")
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
block, err := result.ToInteger()
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
return block
|
||||
}
|
||||
// Poll the current block number until either it ot a timeout is reached
|
||||
targetBlockNr := blockNumber() + blocks
|
||||
deadline := time.Now().Add(time.Duration(sleep) * time.Second)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
if blockNumber() >= targetBlockNr {
|
||||
return otto.TrueValue()
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
// Send will serialize the first argument, send it to the node and returns the response.
|
||||
func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
// Ensure that we've got a batch request (array) or a single request (object)
|
||||
arg := call.Argument(0).Object()
|
||||
if arg == nil || (arg.Class() != "Array" && arg.Class() != "Object") {
|
||||
throwJSException("request must be an object or array")
|
||||
}
|
||||
// Convert the otto VM arguments to Go values
|
||||
data, err := call.Otto.Call("JSON.stringify", nil, arg)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
reqjson, err := data.ToString()
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
|
||||
var (
|
||||
reqs []rpc.JSONRequest
|
||||
batch = true
|
||||
)
|
||||
if err = json.Unmarshal([]byte(reqjson), &reqs); err != nil {
|
||||
// single request?
|
||||
reqs = make([]rpc.JSONRequest, 1)
|
||||
if err = json.Unmarshal([]byte(reqjson), &reqs[0]); err != nil {
|
||||
throwJSException("invalid request")
|
||||
}
|
||||
batch = false
|
||||
}
|
||||
// Iteratively execute the requests
|
||||
call.Otto.Set("response_len", len(reqs))
|
||||
call.Otto.Run("var ret_response = new Array(response_len);")
|
||||
|
||||
for i, req := range reqs {
|
||||
// Execute the RPC request and parse the reply
|
||||
if err = b.client.Send(&req); err != nil {
|
||||
return newErrorResponse(call, -32603, err.Error(), req.Id)
|
||||
}
|
||||
result := make(map[string]interface{})
|
||||
if err = b.client.Recv(&result); err != nil {
|
||||
return newErrorResponse(call, -32603, err.Error(), req.Id)
|
||||
}
|
||||
// Feed the reply back into the JavaScript runtime environment
|
||||
id, _ := result["id"]
|
||||
jsonver, _ := result["jsonrpc"]
|
||||
|
||||
call.Otto.Set("ret_id", id)
|
||||
call.Otto.Set("ret_jsonrpc", jsonver)
|
||||
call.Otto.Set("response_idx", i)
|
||||
|
||||
if res, ok := result["result"]; ok {
|
||||
payload, _ := json.Marshal(res)
|
||||
call.Otto.Set("ret_result", string(payload))
|
||||
response, err = call.Otto.Run(`
|
||||
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) };
|
||||
`)
|
||||
continue
|
||||
}
|
||||
if res, ok := result["error"]; ok {
|
||||
payload, _ := json.Marshal(res)
|
||||
call.Otto.Set("ret_result", string(payload))
|
||||
response, err = call.Otto.Run(`
|
||||
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, error: JSON.parse(ret_result) };
|
||||
`)
|
||||
continue
|
||||
}
|
||||
return newErrorResponse(call, -32603, fmt.Sprintf("Invalid response"), new(int64))
|
||||
}
|
||||
// Convert single requests back from batch ones
|
||||
if !batch {
|
||||
call.Otto.Run("ret_response = ret_response[0];")
|
||||
}
|
||||
// Execute any registered callbacks
|
||||
if call.Argument(1).IsObject() {
|
||||
call.Otto.Set("callback", call.Argument(1))
|
||||
call.Otto.Run(`
|
||||
if (Object.prototype.toString.call(callback) == '[object Function]') {
|
||||
callback(null, ret_response);
|
||||
}
|
||||
`)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// throwJSException panics on an otto.Value. The Otto VM will recover from the
|
||||
// Go panic and throw msg as a JavaScript error.
|
||||
func throwJSException(msg interface{}) otto.Value {
|
||||
val, err := otto.ToValue(msg)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to serialize JavaScript exception %v: %v", msg, err)
|
||||
}
|
||||
panic(val)
|
||||
}
|
||||
|
||||
// newErrorResponse creates a JSON RPC error response for a specific request id,
|
||||
// containing the specified error code and error message. Beside returning the
|
||||
// error to the caller, it also sets the ret_error and ret_response JavaScript
|
||||
// variables.
|
||||
func newErrorResponse(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) {
|
||||
// Bundle the error into a JSON RPC call response
|
||||
res := rpc.JSONErrResponse{
|
||||
Version: rpc.JSONRPCVersion,
|
||||
Id: id,
|
||||
Error: rpc.JSONError{
|
||||
Code: code,
|
||||
Message: msg,
|
||||
},
|
||||
}
|
||||
// Serialize the error response into JavaScript variables
|
||||
errObj, err := json.Marshal(res.Error)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to serialize JSON RPC error: %v", err)
|
||||
}
|
||||
resObj, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to serialize JSON RPC error response: %v", err)
|
||||
}
|
||||
|
||||
if _, err = call.Otto.Run("ret_error = " + string(errObj)); err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to set `ret_error` to the occurred error: %v", err)
|
||||
}
|
||||
resVal, err := call.Otto.Run("ret_response = " + string(resObj))
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to set `ret_response` to the JSON RPC response: %v", err)
|
||||
}
|
||||
return resVal
|
||||
}
|
371
console/console.go
Normal file
371
console/console.go
Normal file
@ -0,0 +1,371 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/peterh/liner"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
var (
|
||||
passwordRegexp = regexp.MustCompile("personal.[nus]")
|
||||
onlyWhitespace = regexp.MustCompile("^\\s*$")
|
||||
exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$")
|
||||
)
|
||||
|
||||
// HistoryFile is the file within the data directory to store input scrollback.
|
||||
const HistoryFile = "history"
|
||||
|
||||
// DefaultPrompt is the default prompt line prefix to use for user input querying.
|
||||
const DefaultPrompt = "> "
|
||||
|
||||
// Config is te collection of configurations to fine tune the behavior of the
|
||||
// JavaScript console.
|
||||
type Config struct {
|
||||
DataDir string // Data directory to store the console history at
|
||||
DocRoot string // Filesystem path from where to load JavaScript files from
|
||||
Client rpc.Client // RPC client to execute Ethereum requests through
|
||||
Prompt string // Input prompt prefix string (defaults to DefaultPrompt)
|
||||
Prompter UserPrompter // Input prompter to allow interactive user feedback (defaults to TerminalPrompter)
|
||||
Printer io.Writer // Output writer to serialize any display strings to (defaults to os.Stdout)
|
||||
Preload []string // Absolute paths to JavaScript files to preload
|
||||
}
|
||||
|
||||
// Console is a JavaScript interpreted runtime environment. It is a fully fleged
|
||||
// JavaScript console attached to a running node via an external or in-process RPC
|
||||
// client.
|
||||
type Console struct {
|
||||
client rpc.Client // RPC client to execute Ethereum requests through
|
||||
jsre *jsre.JSRE // JavaScript runtime environment running the interpreter
|
||||
prompt string // Input prompt prefix string
|
||||
prompter UserPrompter // Input prompter to allow interactive user feedback
|
||||
histPath string // Absolute path to the console scrollback history
|
||||
history []string // Scroll history maintained by the console
|
||||
printer io.Writer // Output writer to serialize any display strings to
|
||||
}
|
||||
|
||||
func New(config Config) (*Console, error) {
|
||||
// Handle unset config values gracefully
|
||||
if config.Prompter == nil {
|
||||
config.Prompter = Stdin
|
||||
}
|
||||
if config.Prompt == "" {
|
||||
config.Prompt = DefaultPrompt
|
||||
}
|
||||
if config.Printer == nil {
|
||||
config.Printer = os.Stdout
|
||||
}
|
||||
// Initialize the console and return
|
||||
console := &Console{
|
||||
client: config.Client,
|
||||
jsre: jsre.New(config.DocRoot, config.Printer),
|
||||
prompt: config.Prompt,
|
||||
prompter: config.Prompter,
|
||||
printer: config.Printer,
|
||||
histPath: filepath.Join(config.DataDir, HistoryFile),
|
||||
}
|
||||
if err := console.init(config.Preload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return console, nil
|
||||
}
|
||||
|
||||
// init retrieves the available APIs from the remote RPC provider and initializes
|
||||
// the console's JavaScript namespaces based on the exposed modules.
|
||||
func (c *Console) init(preload []string) error {
|
||||
// Initialize the JavaScript <-> Go RPC bridge
|
||||
bridge := newBridge(c.client, c.prompter, c.printer)
|
||||
c.jsre.Set("jeth", struct{}{})
|
||||
|
||||
jethObj, _ := c.jsre.Get("jeth")
|
||||
jethObj.Object().Set("send", bridge.Send)
|
||||
jethObj.Object().Set("sendAsync", bridge.Send)
|
||||
|
||||
consoleObj, _ := c.jsre.Get("console")
|
||||
consoleObj.Object().Set("log", c.consoleOutput)
|
||||
consoleObj.Object().Set("error", c.consoleOutput)
|
||||
|
||||
// Load all the internal utility JavaScript libraries
|
||||
if err := c.jsre.Compile("bignumber.js", jsre.BigNumber_JS); err != nil {
|
||||
return fmt.Errorf("bignumber.js: %v", err)
|
||||
}
|
||||
if err := c.jsre.Compile("web3.js", jsre.Web3_JS); err != nil {
|
||||
return fmt.Errorf("web3.js: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var Web3 = require('web3');"); err != nil {
|
||||
return fmt.Errorf("web3 require: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var web3 = new Web3(jeth);"); err != nil {
|
||||
return fmt.Errorf("web3 provider: %v", err)
|
||||
}
|
||||
// Load the supported APIs into the JavaScript runtime environment
|
||||
apis, err := c.client.SupportedModules()
|
||||
if err != nil {
|
||||
return fmt.Errorf("api modules: %v", err)
|
||||
}
|
||||
flatten := "var eth = web3.eth; var personal = web3.personal; "
|
||||
for api := range apis {
|
||||
if api == "web3" {
|
||||
continue // manually mapped or ignore
|
||||
}
|
||||
if file, ok := web3ext.Modules[api]; ok {
|
||||
if err = c.jsre.Compile(fmt.Sprintf("%s.js", api), file); err != nil {
|
||||
return fmt.Errorf("%s.js: %v", api, err)
|
||||
}
|
||||
flatten += fmt.Sprintf("var %s = web3.%s; ", api, api)
|
||||
}
|
||||
}
|
||||
if _, err = c.jsre.Run(flatten); err != nil {
|
||||
return fmt.Errorf("namespace flattening: %v", err)
|
||||
}
|
||||
// Initialize the global name register (disabled for now)
|
||||
//c.jsre.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
|
||||
|
||||
// If the console is in interactive mode, instrument password related methods to query the user
|
||||
if c.prompter != nil {
|
||||
// Retrieve the account management object to instrument
|
||||
personal, err := c.jsre.Get("personal")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Override the unlockAccount and newAccount methods since these require user interaction.
|
||||
// Assign the jeth.unlockAccount and jeth.newAccount in the Console the original web3 callbacks.
|
||||
// These will be called by the jeth.* methods after they got the password from the user and send
|
||||
// the original web3 request to the backend.
|
||||
if obj := personal.Object(); obj != nil { // make sure the personal api is enabled over the interface
|
||||
if _, err = c.jsre.Run(`jeth.unlockAccount = personal.unlockAccount;`); err != nil {
|
||||
return fmt.Errorf("personal.unlockAccount: %v", err)
|
||||
}
|
||||
if _, err = c.jsre.Run(`jeth.newAccount = personal.newAccount;`); err != nil {
|
||||
return fmt.Errorf("personal.newAccount: %v", err)
|
||||
}
|
||||
obj.Set("unlockAccount", bridge.UnlockAccount)
|
||||
obj.Set("newAccount", bridge.NewAccount)
|
||||
}
|
||||
}
|
||||
// The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer.
|
||||
admin, err := c.jsre.Get("admin")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if obj := admin.Object(); obj != nil { // make sure the admin api is enabled over the interface
|
||||
obj.Set("sleepBlocks", bridge.SleepBlocks)
|
||||
obj.Set("sleep", bridge.Sleep)
|
||||
}
|
||||
// Preload any JavaScript files before starting the console
|
||||
for _, path := range preload {
|
||||
if err := c.jsre.Exec(path); err != nil {
|
||||
failure := err.Error()
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
failure = ottoErr.String()
|
||||
}
|
||||
return fmt.Errorf("%s: %v", path, failure)
|
||||
}
|
||||
}
|
||||
// Configure the console's input prompter for scrollback and tab completion
|
||||
if c.prompter != nil {
|
||||
if content, err := ioutil.ReadFile(c.histPath); err != nil {
|
||||
c.prompter.SetHistory(nil)
|
||||
} else {
|
||||
c.history = strings.Split(string(content), "\n")
|
||||
c.prompter.SetHistory(c.history)
|
||||
}
|
||||
c.prompter.SetWordCompleter(c.AutoCompleteInput)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consoleOutput is an override for the console.log and console.error methods to
|
||||
// stream the output into the configured output stream instead of stdout.
|
||||
func (c *Console) consoleOutput(call otto.FunctionCall) otto.Value {
|
||||
output := []string{}
|
||||
for _, argument := range call.ArgumentList {
|
||||
output = append(output, fmt.Sprintf("%v", argument))
|
||||
}
|
||||
fmt.Fprintln(c.printer, strings.Join(output, " "))
|
||||
return otto.Value{}
|
||||
}
|
||||
|
||||
// AutoCompleteInput is a pre-assembled word completer to be used by the user
|
||||
// input prompter to provide hints to the user about the methods available.
|
||||
func (c *Console) AutoCompleteInput(line string, pos int) (string, []string, string) {
|
||||
// No completions can be provided for empty inputs
|
||||
if len(line) == 0 || pos == 0 {
|
||||
return "", nil, ""
|
||||
}
|
||||
// Chunck data to relevant part for autocompletion
|
||||
// E.g. in case of nested lines eth.getBalance(eth.coinb<tab><tab>
|
||||
start := 0
|
||||
for start = pos - 1; start > 0; start-- {
|
||||
// Skip all methods and namespaces (i.e. including te dot)
|
||||
if line[start] == '.' || (line[start] >= 'a' && line[start] <= 'z') || (line[start] >= 'A' && line[start] <= 'Z') {
|
||||
continue
|
||||
}
|
||||
// Handle web3 in a special way (i.e. other numbers aren't auto completed)
|
||||
if start >= 3 && line[start-3:start] == "web3" {
|
||||
start -= 3
|
||||
continue
|
||||
}
|
||||
// We've hit an unexpected character, autocomplete form here
|
||||
start++
|
||||
break
|
||||
}
|
||||
return line[:start], c.jsre.CompleteKeywords(line[start:pos]), line[pos:]
|
||||
}
|
||||
|
||||
// Welcome show summary of current Geth instance and some metadata about the
|
||||
// console's available modules.
|
||||
func (c *Console) Welcome() {
|
||||
// Print some generic Geth metadata
|
||||
fmt.Fprintf(c.printer, "Welcome to the Geth JavaScript console!\n\n")
|
||||
c.jsre.Run(`
|
||||
console.log("instance: " + web3.version.node);
|
||||
console.log("coinbase: " + eth.coinbase);
|
||||
console.log("at block: " + eth.blockNumber + " (" + new Date(1000 * eth.getBlock(eth.blockNumber).timestamp) + ")");
|
||||
console.log(" datadir: " + admin.datadir);
|
||||
`)
|
||||
// List all the supported modules for the user to call
|
||||
if apis, err := c.client.SupportedModules(); err == nil {
|
||||
modules := make([]string, 0, len(apis))
|
||||
for api, version := range apis {
|
||||
modules = append(modules, fmt.Sprintf("%s:%s", api, version))
|
||||
}
|
||||
sort.Strings(modules)
|
||||
fmt.Fprintln(c.printer, " modules:", strings.Join(modules, " "))
|
||||
}
|
||||
fmt.Fprintln(c.printer)
|
||||
}
|
||||
|
||||
// Evaluate executes code and pretty prints the result to the specified output
|
||||
// stream.
|
||||
func (c *Console) Evaluate(statement string) error {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Fprintf(c.printer, "[native] error: %v\n", r)
|
||||
}
|
||||
}()
|
||||
if err := c.jsre.Evaluate(statement, c.printer); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Interactive starts an interactive user session, where input is propted from
|
||||
// the configured user prompter.
|
||||
func (c *Console) Interactive() {
|
||||
var (
|
||||
prompt = c.prompt // Current prompt line (used for multi-line inputs)
|
||||
indents = 0 // Current number of input indents (used for multi-line inputs)
|
||||
input = "" // Current user input
|
||||
scheduler = make(chan string) // Channel to send the next prompt on and receive the input
|
||||
)
|
||||
// Start a goroutine to listen for promt requests and send back inputs
|
||||
go func() {
|
||||
for {
|
||||
// Read the next user input
|
||||
line, err := c.prompter.PromptInput(<-scheduler)
|
||||
if err != nil {
|
||||
// In case of an error, either clear the prompt or fail
|
||||
if err == liner.ErrPromptAborted { // ctrl-C
|
||||
prompt, indents, input = c.prompt, 0, ""
|
||||
scheduler <- ""
|
||||
continue
|
||||
}
|
||||
close(scheduler)
|
||||
return
|
||||
}
|
||||
// User input retrieved, send for interpretation and loop
|
||||
scheduler <- line
|
||||
}
|
||||
}()
|
||||
// Monitor Ctrl-C too in case the input is empty and we need to bail
|
||||
abort := make(chan os.Signal, 1)
|
||||
signal.Notify(abort, os.Interrupt)
|
||||
|
||||
// Start sending prompts to the user and reading back inputs
|
||||
for {
|
||||
// Send the next prompt, triggering an input read and process the result
|
||||
scheduler <- prompt
|
||||
select {
|
||||
case <-abort:
|
||||
// User forcefully quite the console
|
||||
fmt.Fprintln(c.printer, "caught interrupt, exiting")
|
||||
return
|
||||
|
||||
case line, ok := <-scheduler:
|
||||
// User input was returned by the prompter, handle special cases
|
||||
if !ok || (indents <= 0 && exit.MatchString(line)) {
|
||||
return
|
||||
}
|
||||
if onlyWhitespace.MatchString(line) {
|
||||
continue
|
||||
}
|
||||
// Append the line to the input and check for multi-line interpretation
|
||||
input += line + "\n"
|
||||
|
||||
indents = strings.Count(input, "{") + strings.Count(input, "(") - strings.Count(input, "}") - strings.Count(input, ")")
|
||||
if indents <= 0 {
|
||||
prompt = c.prompt
|
||||
} else {
|
||||
prompt = strings.Repeat("..", indents*2) + " "
|
||||
}
|
||||
// If all the needed lines are present, save the command and run
|
||||
if indents <= 0 {
|
||||
if len(input) > 0 && input[0] != ' ' && !passwordRegexp.MatchString(input) {
|
||||
if command := strings.TrimSpace(input); len(c.history) == 0 || command != c.history[len(c.history)-1] {
|
||||
c.history = append(c.history, command)
|
||||
if c.prompter != nil {
|
||||
c.prompter.AppendHistory(command)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.Evaluate(input)
|
||||
input = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Execute runs the JavaScript file specified as the argument.
|
||||
func (c *Console) Execute(path string) error {
|
||||
return c.jsre.Exec(path)
|
||||
}
|
||||
|
||||
// Stop cleans up the console and terminates the runtime envorinment.
|
||||
func (c *Console) Stop(graceful bool) error {
|
||||
if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Chmod(c.histPath, 0600); err != nil { // Force 0600, even if it was different previously
|
||||
return err
|
||||
}
|
||||
c.jsre.Stop(graceful)
|
||||
return nil
|
||||
}
|
296
console/console_test.go
Normal file
296
console/console_test.go
Normal file
@ -0,0 +1,296 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
)
|
||||
|
||||
const (
|
||||
testInstance = "console-tester"
|
||||
testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
)
|
||||
|
||||
// hookedPrompter implements UserPrompter to simulate use input via channels.
|
||||
type hookedPrompter struct {
|
||||
scheduler chan string
|
||||
}
|
||||
|
||||
func (p *hookedPrompter) PromptInput(prompt string) (string, error) {
|
||||
// Send the prompt to the tester
|
||||
select {
|
||||
case p.scheduler <- prompt:
|
||||
case <-time.After(time.Second):
|
||||
return "", errors.New("prompt timeout")
|
||||
}
|
||||
// Retrieve the response and feed to the console
|
||||
select {
|
||||
case input := <-p.scheduler:
|
||||
return input, nil
|
||||
case <-time.After(time.Second):
|
||||
return "", errors.New("input timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *hookedPrompter) PromptPassword(prompt string) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
func (p *hookedPrompter) PromptConfirm(prompt string) (bool, error) {
|
||||
return false, errors.New("not implemented")
|
||||
}
|
||||
func (p *hookedPrompter) SetHistory(history []string) {}
|
||||
func (p *hookedPrompter) AppendHistory(command string) {}
|
||||
func (p *hookedPrompter) SetWordCompleter(completer WordCompleter) {}
|
||||
|
||||
// tester is a console test environment for the console tests to operate on.
|
||||
type tester struct {
|
||||
workspace string
|
||||
stack *node.Node
|
||||
ethereum *eth.Ethereum
|
||||
console *Console
|
||||
input *hookedPrompter
|
||||
output *bytes.Buffer
|
||||
|
||||
lastConfirm string
|
||||
}
|
||||
|
||||
// newTester creates a test environment based on which the console can operate.
|
||||
// Please ensure you call Close() on the returned tester to avoid leaks.
|
||||
func newTester(t *testing.T, confOverride func(*eth.Config)) *tester {
|
||||
// Create a temporary storage for the node keys and initialize it
|
||||
workspace, err := ioutil.TempDir("", "console-tester-")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temporary keystore: %v", err)
|
||||
}
|
||||
accman := accounts.NewPlaintextManager(filepath.Join(workspace, "keystore"))
|
||||
|
||||
// Create a networkless protocol stack and start an Ethereum service within
|
||||
stack, err := node.New(&node.Config{DataDir: workspace, Name: testInstance, NoDiscovery: true})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node: %v", err)
|
||||
}
|
||||
ethConf := ð.Config{
|
||||
ChainConfig: &core.ChainConfig{HomesteadBlock: new(big.Int)},
|
||||
Etherbase: common.HexToAddress(testAddress),
|
||||
AccountManager: accman,
|
||||
PowTest: true,
|
||||
}
|
||||
if confOverride != nil {
|
||||
confOverride(ethConf)
|
||||
}
|
||||
if err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return eth.New(ctx, ethConf) }); err != nil {
|
||||
t.Fatalf("failed to register Ethereum protocol: %v", err)
|
||||
}
|
||||
// Start the node and assemble the JavaScript console around it
|
||||
if err = stack.Start(); err != nil {
|
||||
t.Fatalf("failed to start test stack: %v", err)
|
||||
}
|
||||
client, err := stack.Attach()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach to node: %v", err)
|
||||
}
|
||||
prompter := &hookedPrompter{scheduler: make(chan string)}
|
||||
printer := new(bytes.Buffer)
|
||||
|
||||
console, err := New(Config{
|
||||
DataDir: stack.DataDir(),
|
||||
DocRoot: "testdata",
|
||||
Client: client,
|
||||
Prompter: prompter,
|
||||
Printer: printer,
|
||||
Preload: []string{"preload.js"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create JavaScript console: %v", err)
|
||||
}
|
||||
// Create the final tester and return
|
||||
var ethereum *eth.Ethereum
|
||||
stack.Service(ðereum)
|
||||
|
||||
return &tester{
|
||||
workspace: workspace,
|
||||
stack: stack,
|
||||
ethereum: ethereum,
|
||||
console: console,
|
||||
input: prompter,
|
||||
output: printer,
|
||||
}
|
||||
}
|
||||
|
||||
// Close cleans up any temporary data folders and held resources.
|
||||
func (env *tester) Close(t *testing.T) {
|
||||
if err := env.console.Stop(false); err != nil {
|
||||
t.Errorf("failed to stop embedded console: %v", err)
|
||||
}
|
||||
if err := env.stack.Stop(); err != nil {
|
||||
t.Errorf("failed to stop embedded node: %v", err)
|
||||
}
|
||||
os.RemoveAll(env.workspace)
|
||||
}
|
||||
|
||||
// Tests that the node lists the correct welcome message, notably that it contains
|
||||
// the instance name, coinbase account, block number, data directory and supported
|
||||
// console modules.
|
||||
func TestWelcome(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Welcome()
|
||||
|
||||
output := string(tester.output.Bytes())
|
||||
if want := "Welcome"; !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing welcome message: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
if want := fmt.Sprintf("instance: %s", testInstance); !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing instance: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
if want := fmt.Sprintf("coinbase: %s", testAddress); !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
if want := "at block: 0"; !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing sync status: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
if want := fmt.Sprintf("datadir: %s", tester.workspace); !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that JavaScript statement evaluation works as intended.
|
||||
func TestEvaluate(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Evaluate("2 + 2")
|
||||
if output := string(tester.output.Bytes()); !strings.Contains(output, "4") {
|
||||
t.Fatalf("statement evaluation failed: have %s, want %s", output, "4")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the console can be used in interactive mode.
|
||||
func TestInteractive(t *testing.T) {
|
||||
// Create a tester and run an interactive console in the background
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
go tester.console.Interactive()
|
||||
|
||||
// Wait for a promt and send a statement back
|
||||
select {
|
||||
case <-tester.input.scheduler:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("initial prompt timeout")
|
||||
}
|
||||
select {
|
||||
case tester.input.scheduler <- "2+2":
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("input feedback timeout")
|
||||
}
|
||||
// Wait for the second promt and ensure first statement was evaluated
|
||||
select {
|
||||
case <-tester.input.scheduler:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("secondary prompt timeout")
|
||||
}
|
||||
if output := string(tester.output.Bytes()); !strings.Contains(output, "4") {
|
||||
t.Fatalf("statement evaluation failed: have %s, want %s", output, "4")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that preloaded JavaScript files have been executed before user is given
|
||||
// input.
|
||||
func TestPreload(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Evaluate("preloaded")
|
||||
if output := string(tester.output.Bytes()); !strings.Contains(output, "some-preloaded-string") {
|
||||
t.Fatalf("preloaded variable missing: have %s, want %s", output, "some-preloaded-string")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that JavaScript scripts can be executes from the configured asset path.
|
||||
func TestExecute(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Execute("exec.js")
|
||||
|
||||
tester.console.Evaluate("execed")
|
||||
if output := string(tester.output.Bytes()); !strings.Contains(output, "some-executed-string") {
|
||||
t.Fatalf("execed variable missing: have %s, want %s", output, "some-executed-string")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the JavaScript objects returned by statement executions are properly
|
||||
// pretty printed instead of just displaing "[object]".
|
||||
func TestPrettyPrint(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Evaluate("obj = {int: 1, string: 'two', list: [3, 3, 3], obj: {null: null, func: function(){}}}")
|
||||
|
||||
// Define some specially formatted fields
|
||||
var (
|
||||
one = jsre.NumberColor("1")
|
||||
two = jsre.StringColor("\"two\"")
|
||||
three = jsre.NumberColor("3")
|
||||
null = jsre.SpecialColor("null")
|
||||
fun = jsre.FunctionColor("function()")
|
||||
)
|
||||
// Assemble the actual output we're after and verify
|
||||
want := `{
|
||||
int: ` + one + `,
|
||||
list: [` + three + `, ` + three + `, ` + three + `],
|
||||
obj: {
|
||||
null: ` + null + `,
|
||||
func: ` + fun + `
|
||||
},
|
||||
string: ` + two + `
|
||||
}
|
||||
`
|
||||
if output := string(tester.output.Bytes()); output != want {
|
||||
t.Fatalf("pretty print mismatch: have %s, want %s", output, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the JavaScript exceptions are properly formatted and colored.
|
||||
func TestPrettyError(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
tester.console.Evaluate("throw 'hello'")
|
||||
|
||||
want := jsre.ErrorColor("hello") + "\n"
|
||||
if output := string(tester.output.Bytes()); output != want {
|
||||
t.Fatalf("pretty error mismatch: have %s, want %s", output, want)
|
||||
}
|
||||
}
|
165
console/prompter.go
Normal file
165
console/prompter.go
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/peterh/liner"
|
||||
)
|
||||
|
||||
// Stdin holds the stdin line reader (also using stdout for printing prompts).
|
||||
// Only this reader may be used for input because it keeps an internal buffer.
|
||||
var Stdin = newTerminalPrompter()
|
||||
|
||||
// UserPrompter defines the methods needed by the console to promt the user for
|
||||
// various types of inputs.
|
||||
type UserPrompter interface {
|
||||
// PromptInput displays the given prompt to the user and requests some textual
|
||||
// data to be entered, returning the input of the user.
|
||||
PromptInput(prompt string) (string, error)
|
||||
|
||||
// PromptPassword displays the given prompt to the user and requests some textual
|
||||
// data to be entered, but one which must not be echoed out into the terminal.
|
||||
// The method returns the input provided by the user.
|
||||
PromptPassword(prompt string) (string, error)
|
||||
|
||||
// PromptConfirm displays the given prompt to the user and requests a boolean
|
||||
// choice to be made, returning that choice.
|
||||
PromptConfirm(prompt string) (bool, error)
|
||||
|
||||
// SetHistory sets the the input scrollback history that the prompter will allow
|
||||
// the user to scoll back to.
|
||||
SetHistory(history []string)
|
||||
|
||||
// AppendHistory appends an entry to the scrollback history. It should be called
|
||||
// if and only if the prompt to append was a valid command.
|
||||
AppendHistory(command string)
|
||||
|
||||
// SetWordCompleter sets the completion function that the prompter will call to
|
||||
// fetch completion candidates when the user presses tab.
|
||||
SetWordCompleter(completer WordCompleter)
|
||||
}
|
||||
|
||||
// WordCompleter takes the currently edited line with the cursor position and
|
||||
// returns the completion candidates for the partial word to be completed. If
|
||||
// the line is "Hello, wo!!!" and the cursor is before the first '!', ("Hello,
|
||||
// wo!!!", 9) is passed to the completer which may returns ("Hello, ", {"world",
|
||||
// "Word"}, "!!!") to have "Hello, world!!!".
|
||||
type WordCompleter func(line string, pos int) (string, []string, string)
|
||||
|
||||
// terminalPrompter is a UserPrompter backed by the liner package. It supports
|
||||
// prompting the user for various input, among others for non-echoing password
|
||||
// input.
|
||||
type terminalPrompter struct {
|
||||
*liner.State
|
||||
warned bool
|
||||
supported bool
|
||||
normalMode liner.ModeApplier
|
||||
rawMode liner.ModeApplier
|
||||
}
|
||||
|
||||
// newTerminalPrompter creates a liner based user input prompter working off the
|
||||
// standard input and output streams.
|
||||
func newTerminalPrompter() *terminalPrompter {
|
||||
p := new(terminalPrompter)
|
||||
// Get the original mode before calling NewLiner.
|
||||
// This is usually regular "cooked" mode where characters echo.
|
||||
normalMode, _ := liner.TerminalMode()
|
||||
// Turn on liner. It switches to raw mode.
|
||||
p.State = liner.NewLiner()
|
||||
rawMode, err := liner.TerminalMode()
|
||||
if err != nil || !liner.TerminalSupported() {
|
||||
p.supported = false
|
||||
} else {
|
||||
p.supported = true
|
||||
p.normalMode = normalMode
|
||||
p.rawMode = rawMode
|
||||
// Switch back to normal mode while we're not prompting.
|
||||
normalMode.ApplyMode()
|
||||
}
|
||||
p.SetCtrlCAborts(true)
|
||||
p.SetTabCompletionStyle(liner.TabPrints)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// PromptInput displays the given prompt to the user and requests some textual
|
||||
// data to be entered, returning the input of the user.
|
||||
func (p *terminalPrompter) PromptInput(prompt string) (string, error) {
|
||||
if p.supported {
|
||||
p.rawMode.ApplyMode()
|
||||
defer p.normalMode.ApplyMode()
|
||||
} else {
|
||||
// liner tries to be smart about printing the prompt
|
||||
// and doesn't print anything if input is redirected.
|
||||
// Un-smart it by printing the prompt always.
|
||||
fmt.Print(prompt)
|
||||
prompt = ""
|
||||
defer fmt.Println()
|
||||
}
|
||||
return p.State.Prompt(prompt)
|
||||
}
|
||||
|
||||
// PromptPassword displays the given prompt to the user and requests some textual
|
||||
// data to be entered, but one which must not be echoed out into the terminal.
|
||||
// The method returns the input provided by the user.
|
||||
func (p *terminalPrompter) PromptPassword(prompt string) (passwd string, err error) {
|
||||
if p.supported {
|
||||
p.rawMode.ApplyMode()
|
||||
defer p.normalMode.ApplyMode()
|
||||
return p.State.PasswordPrompt(prompt)
|
||||
}
|
||||
if !p.warned {
|
||||
fmt.Println("!! Unsupported terminal, password will be echoed.")
|
||||
p.warned = true
|
||||
}
|
||||
// Just as in Prompt, handle printing the prompt here instead of relying on liner.
|
||||
fmt.Print(prompt)
|
||||
passwd, err = p.State.Prompt("")
|
||||
fmt.Println()
|
||||
return passwd, err
|
||||
}
|
||||
|
||||
// PromptConfirm displays the given prompt to the user and requests a boolean
|
||||
// choice to be made, returning that choice.
|
||||
func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {
|
||||
input, err := p.Prompt(prompt + " [y/N] ")
|
||||
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// SetHistory sets the the input scrollback history that the prompter will allow
|
||||
// the user to scoll back to.
|
||||
func (p *terminalPrompter) SetHistory(history []string) {
|
||||
p.State.ReadHistory(strings.NewReader(strings.Join(history, "\n")))
|
||||
}
|
||||
|
||||
// AppendHistory appends an entry to the scrollback history. It should be called
|
||||
// if and only if the prompt to append was a valid command.
|
||||
func (p *terminalPrompter) AppendHistory(command string) {
|
||||
p.State.AppendHistory(command)
|
||||
}
|
||||
|
||||
// SetWordCompleter sets the completion function that the prompter will call to
|
||||
// fetch completion candidates when the user presses tab.
|
||||
func (p *terminalPrompter) SetWordCompleter(completer WordCompleter) {
|
||||
p.State.SetWordCompleter(liner.WordCompleter(completer))
|
||||
}
|
1
console/testdata/exec.js
vendored
Normal file
1
console/testdata/exec.js
vendored
Normal file
@ -0,0 +1 @@
|
||||
var execed = "some-executed-string";
|
1
console/testdata/preload.js
vendored
Normal file
1
console/testdata/preload.js
vendored
Normal file
@ -0,0 +1 @@
|
||||
var preloaded = "some-preloaded-string";
|
@ -1,72 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// DisabledBadBlockReporting can be set to prevent blocks being reported.
|
||||
var DisableBadBlockReporting = true
|
||||
|
||||
// ReportBlock reports the block to the block reporting tool found at
|
||||
// badblocks.ethdev.com
|
||||
func ReportBlock(block *types.Block, err error) {
|
||||
if DisableBadBlockReporting {
|
||||
return
|
||||
}
|
||||
|
||||
const url = "https://badblocks.ethdev.com"
|
||||
|
||||
blockRlp, _ := rlp.EncodeToBytes(block)
|
||||
data := map[string]interface{}{
|
||||
"block": common.Bytes2Hex(blockRlp),
|
||||
"errortype": err.Error(),
|
||||
"hints": map[string]interface{}{
|
||||
"receipts": "NYI",
|
||||
"vmtrace": "NYI",
|
||||
},
|
||||
}
|
||||
jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "params": []interface{}{data}, "id": "1", "jsonrpc": "2.0"})
|
||||
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infoln("POST err:", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if glog.V(logger.Debug) {
|
||||
glog.Infoln("response Status:", resp.Status)
|
||||
glog.Infoln("response Headers:", resp.Header)
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
glog.Infoln("response Body:", string(body))
|
||||
}
|
||||
}
|
@ -292,7 +292,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
|
||||
// minimum difficulty can ever be (before exponential factor)
|
||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||
x = params.MinimumDifficulty
|
||||
x.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
// for the exponential factor
|
||||
@ -325,7 +325,7 @@ func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *b
|
||||
diff.Sub(parentDiff, adjust)
|
||||
}
|
||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||
diff = params.MinimumDifficulty
|
||||
diff.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||
|
@ -819,6 +819,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
tstart = time.Now()
|
||||
|
||||
nonceChecked = make([]bool, len(chain))
|
||||
statedb *state.StateDB
|
||||
)
|
||||
|
||||
// Start the parallel nonce verifier.
|
||||
@ -885,7 +886,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
|
||||
// Create a new statedb using the parent block and report an
|
||||
// error if it fails.
|
||||
statedb, err := state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb)
|
||||
if statedb == nil {
|
||||
statedb, err = state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb)
|
||||
} else {
|
||||
err = statedb.Reset(chain[i-1].Root())
|
||||
}
|
||||
if err != nil {
|
||||
reportBlock(block, err)
|
||||
return i, err
|
||||
@ -1117,15 +1122,12 @@ func (self *BlockChain) update() {
|
||||
}
|
||||
}
|
||||
|
||||
// reportBlock reports the given block and error using the canonical block
|
||||
// reporting tool. Reporting the block to the service is handled in a separate
|
||||
// goroutine.
|
||||
// reportBlock logs a bad block error.
|
||||
func reportBlock(block *types.Block, err error) {
|
||||
if glog.V(logger.Error) {
|
||||
glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex())
|
||||
glog.Errorf(" %v", err)
|
||||
}
|
||||
go ReportBlock(block, err)
|
||||
}
|
||||
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
@ -1213,3 +1215,6 @@ func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []c
|
||||
func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
||||
return self.hc.GetHeaderByNumber(number)
|
||||
}
|
||||
|
||||
// Config retrieves the blockchain's chain configuration.
|
||||
func (self *BlockChain) Config() *ChainConfig { return self.config }
|
||||
|
@ -43,7 +43,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
|
||||
}
|
||||
|
||||
var genesis struct {
|
||||
ChainConfig *ChainConfig
|
||||
ChainConfig *ChainConfig `json:"config"`
|
||||
Nonce string
|
||||
Timestamp string
|
||||
ParentHash string
|
||||
|
@ -68,6 +68,28 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reset clears out all emphemeral state objects from the state db, but keeps
|
||||
// the underlying state trie to avoid reloading data for the next operations.
|
||||
func (self *StateDB) Reset(root common.Hash) error {
|
||||
var (
|
||||
err error
|
||||
tr = self.trie
|
||||
)
|
||||
if self.trie.Hash() != root {
|
||||
if tr, err = trie.NewSecure(root, self.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
*self = StateDB{
|
||||
db: self.db,
|
||||
trie: tr,
|
||||
stateObjects: make(map[string]*StateObject),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash]vm.Logs),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) {
|
||||
self.thash = thash
|
||||
self.bhash = bhash
|
||||
@ -127,7 +149,7 @@ func (self *StateDB) GetNonce(addr common.Address) uint64 {
|
||||
return stateObject.nonce
|
||||
}
|
||||
|
||||
return 0
|
||||
return StartingNonce
|
||||
}
|
||||
|
||||
func (self *StateDB) GetCode(addr common.Address) []byte {
|
||||
|
@ -60,8 +60,7 @@ type stateFn func() (*state.StateDB, error)
|
||||
// two states over time as they are received and processed.
|
||||
type TxPool struct {
|
||||
config *ChainConfig
|
||||
quit chan bool // Quitting channel
|
||||
currentState stateFn // The state function which will allow us to do some pre checks
|
||||
currentState stateFn // The state function which will allow us to do some pre checks
|
||||
pendingState *state.ManagedState
|
||||
gasLimit func() *big.Int // The current gas limit function callback
|
||||
minGasPrice *big.Int
|
||||
@ -72,6 +71,8 @@ type TxPool struct {
|
||||
pending map[common.Hash]*types.Transaction // processable transactions
|
||||
queue map[common.Address]map[common.Hash]*types.Transaction
|
||||
|
||||
wg sync.WaitGroup // for shutdown sync
|
||||
|
||||
homestead bool
|
||||
}
|
||||
|
||||
@ -80,7 +81,6 @@ func NewTxPool(config *ChainConfig, eventMux *event.TypeMux, currentStateFn stat
|
||||
config: config,
|
||||
pending: make(map[common.Hash]*types.Transaction),
|
||||
queue: make(map[common.Address]map[common.Hash]*types.Transaction),
|
||||
quit: make(chan bool),
|
||||
eventMux: eventMux,
|
||||
currentState: currentStateFn,
|
||||
gasLimit: gasLimitFn,
|
||||
@ -90,12 +90,15 @@ func NewTxPool(config *ChainConfig, eventMux *event.TypeMux, currentStateFn stat
|
||||
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}, RemovedTransactionEvent{}),
|
||||
}
|
||||
|
||||
pool.wg.Add(1)
|
||||
go pool.eventLoop()
|
||||
|
||||
return pool
|
||||
}
|
||||
|
||||
func (pool *TxPool) eventLoop() {
|
||||
defer pool.wg.Done()
|
||||
|
||||
// Track chain events. When a chain events occurs (new chain canon block)
|
||||
// we need to know the new state. The new state will help us determine
|
||||
// the nonces in the managed state
|
||||
@ -155,8 +158,8 @@ func (pool *TxPool) resetState() {
|
||||
}
|
||||
|
||||
func (pool *TxPool) Stop() {
|
||||
close(pool.quit)
|
||||
pool.events.Unsubscribe()
|
||||
pool.wg.Wait()
|
||||
glog.V(logger.Info).Infoln("Transaction pool stopped")
|
||||
}
|
||||
|
||||
@ -365,6 +368,9 @@ func (self *TxPool) AddTransactions(txs []*types.Transaction) {
|
||||
// GetTransaction returns a transaction if it is contained in the pool
|
||||
// and nil otherwise.
|
||||
func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
|
||||
tp.mu.RLock()
|
||||
defer tp.mu.RUnlock()
|
||||
|
||||
// check the txs first
|
||||
if tx, ok := tp.pending[hash]; ok {
|
||||
return tx
|
||||
@ -418,12 +424,18 @@ func (self *TxPool) RemoveTransactions(txs types.Transactions) {
|
||||
self.mu.Lock()
|
||||
defer self.mu.Unlock()
|
||||
for _, tx := range txs {
|
||||
self.RemoveTx(tx.Hash())
|
||||
self.removeTx(tx.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTx removes the transaction with the given hash from the pool.
|
||||
func (pool *TxPool) RemoveTx(hash common.Hash) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
pool.removeTx(hash)
|
||||
}
|
||||
|
||||
func (pool *TxPool) removeTx(hash common.Hash) {
|
||||
// delete from pending pool
|
||||
delete(pool.pending, hash)
|
||||
// delete from queue
|
||||
|
@ -141,8 +141,10 @@ type Block struct {
|
||||
// of the chain up to and including the block.
|
||||
td *big.Int
|
||||
|
||||
// ReceivedAt is used by package eth to track block propagation time.
|
||||
ReceivedAt time.Time
|
||||
// These fields are used by package eth to track
|
||||
// inter-peer block relay.
|
||||
ReceivedAt time.Time
|
||||
ReceivedFrom interface{}
|
||||
}
|
||||
|
||||
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
|
||||
|
@ -39,12 +39,12 @@ func TestBloom(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, data := range positive {
|
||||
if !bloom.Test(new(big.Int).SetBytes([]byte(data))) {
|
||||
if !bloom.TestBytes([]byte(data)) {
|
||||
t.Error("expected", data, "to test true")
|
||||
}
|
||||
}
|
||||
for _, data := range negative {
|
||||
if bloom.Test(new(big.Int).SetBytes([]byte(data))) {
|
||||
if bloom.TestBytes([]byte(data)) {
|
||||
t.Error("did not expect", data, "to test true")
|
||||
}
|
||||
}
|
||||
|
219
eth/api.go
219
eth/api.go
@ -18,6 +18,7 @@ package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -97,8 +98,11 @@ type PublicEthereumAPI struct {
|
||||
}
|
||||
|
||||
// NewPublicEthereumAPI creates a new Ethereum protocol API.
|
||||
func NewPublicEthereumAPI(e *Ethereum, gpo *GasPriceOracle) *PublicEthereumAPI {
|
||||
return &PublicEthereumAPI{e, gpo}
|
||||
func NewPublicEthereumAPI(e *Ethereum) *PublicEthereumAPI {
|
||||
return &PublicEthereumAPI{
|
||||
e: e,
|
||||
gpo: e.gpo,
|
||||
}
|
||||
}
|
||||
|
||||
// GasPrice returns a suggestion for a gas price.
|
||||
@ -109,7 +113,7 @@ func (s *PublicEthereumAPI) GasPrice() *big.Int {
|
||||
// GetCompilers returns the collection of available smart contract compilers
|
||||
func (s *PublicEthereumAPI) GetCompilers() ([]string, error) {
|
||||
solc, err := s.e.Solc()
|
||||
if err != nil && solc != nil {
|
||||
if err == nil && solc != nil {
|
||||
return []string{"Solidity"}, nil
|
||||
}
|
||||
|
||||
@ -135,7 +139,7 @@ func (s *PublicEthereumAPI) Etherbase() (common.Address, error) {
|
||||
return s.e.Etherbase()
|
||||
}
|
||||
|
||||
// see Etherbase
|
||||
// Coinbase is the address that mining rewards will be send to (alias for Etherbase)
|
||||
func (s *PublicEthereumAPI) Coinbase() (common.Address, error) {
|
||||
return s.Etherbase()
|
||||
}
|
||||
@ -204,18 +208,17 @@ func (s *PublicMinerAPI) SubmitWork(nonce rpc.HexNumber, solution, digest common
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
func (s *PublicMinerAPI) GetWork() ([]string, error) {
|
||||
func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
|
||||
if !s.e.IsMining() {
|
||||
if err := s.e.StartMining(0, ""); err != nil {
|
||||
return nil, err
|
||||
return work, err
|
||||
}
|
||||
}
|
||||
if work, err := s.agent.GetWork(); err == nil {
|
||||
return work[:], nil
|
||||
} else {
|
||||
glog.Infof("%v\n", err)
|
||||
if work, err = s.agent.GetWork(); err == nil {
|
||||
return
|
||||
}
|
||||
return nil, fmt.Errorf("mining not ready")
|
||||
glog.V(logger.Debug).Infof("%v", err)
|
||||
return work, fmt.Errorf("mining not ready")
|
||||
}
|
||||
|
||||
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
|
||||
@ -410,14 +413,23 @@ func (s *PublicAccountAPI) Accounts() []accounts.Account {
|
||||
}
|
||||
|
||||
// PrivateAccountAPI provides an API to access accounts managed by this node.
|
||||
// It offers methods to create, (un)lock en list accounts.
|
||||
// It offers methods to create, (un)lock en list accounts. Some methods accept
|
||||
// passwords and are therefore considered private by default.
|
||||
type PrivateAccountAPI struct {
|
||||
am *accounts.Manager
|
||||
am *accounts.Manager
|
||||
txPool *core.TxPool
|
||||
txMu *sync.Mutex
|
||||
gpo *GasPriceOracle
|
||||
}
|
||||
|
||||
// NewPrivateAccountAPI create a new PrivateAccountAPI.
|
||||
func NewPrivateAccountAPI(am *accounts.Manager) *PrivateAccountAPI {
|
||||
return &PrivateAccountAPI{am}
|
||||
func NewPrivateAccountAPI(e *Ethereum) *PrivateAccountAPI {
|
||||
return &PrivateAccountAPI{
|
||||
am: e.accountManager,
|
||||
txPool: e.txPool,
|
||||
txMu: &e.txMu,
|
||||
gpo: e.gpo,
|
||||
}
|
||||
}
|
||||
|
||||
// ListAccounts will return a list of addresses for accounts this node manages.
|
||||
@ -439,6 +451,18 @@ func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error)
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
// ImportRawKey stores the given hex encoded ECDSA key into the key directory,
|
||||
// encrypting it with the passphrase.
|
||||
func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) {
|
||||
hexkey, err := hex.DecodeString(privkey)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
acc, err := s.am.ImportECDSA(crypto.ToECDSA(hexkey), password)
|
||||
return acc.Address, err
|
||||
}
|
||||
|
||||
// UnlockAccount will unlock the account associated with the given address with
|
||||
// the given password for duration seconds. If duration is nil it will use a
|
||||
// default of 300 seconds. It returns an indication if the account was unlocked.
|
||||
@ -459,6 +483,34 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool {
|
||||
return s.am.Lock(addr) == nil
|
||||
}
|
||||
|
||||
// SignAndSendTransaction will create a transaction from the given arguments and
|
||||
// tries to sign it with the key associated with args.To. If the given passwd isn't
|
||||
// able to decrypt the key it fails.
|
||||
func (s *PrivateAccountAPI) SignAndSendTransaction(args SendTxArgs, passwd string) (common.Hash, error) {
|
||||
args = prepareSendTxArgs(args, s.gpo)
|
||||
|
||||
s.txMu.Lock()
|
||||
defer s.txMu.Unlock()
|
||||
|
||||
if args.Nonce == nil {
|
||||
args.Nonce = rpc.NewHexNumber(s.txPool.State().GetNonce(args.From))
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
}
|
||||
|
||||
signature, err := s.am.SignWithPassphrase(args.From, passwd, tx.SigHash().Bytes())
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
return submitTransaction(s.txPool, tx, signature)
|
||||
}
|
||||
|
||||
// PublicBlockChainAPI provides an API to access the Ethereum blockchain.
|
||||
// It offers only methods that operate on public data that is freely available to anyone.
|
||||
type PublicBlockChainAPI struct {
|
||||
@ -622,15 +674,14 @@ func (s *PublicBlockChainAPI) NewBlocks(ctx context.Context, args NewBlocksArgs)
|
||||
// add a callback that is called on chain events which will format the block and notify the client
|
||||
s.muNewBlockSubscriptions.Lock()
|
||||
s.newBlockSubscriptions[subscription.ID()] = func(e core.ChainEvent) error {
|
||||
if notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails); err == nil {
|
||||
notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails)
|
||||
if err == nil {
|
||||
return subscription.Notify(notification)
|
||||
} else {
|
||||
glog.V(logger.Warn).Info("unable to format block %v\n", err)
|
||||
}
|
||||
glog.V(logger.Warn).Info("unable to format block %v\n", err)
|
||||
return nil
|
||||
}
|
||||
s.muNewBlockSubscriptions.Unlock()
|
||||
|
||||
return subscription, nil
|
||||
}
|
||||
|
||||
@ -677,6 +728,7 @@ func (m callmsg) Gas() *big.Int { return m.gas }
|
||||
func (m callmsg) Value() *big.Int { return m.value }
|
||||
func (m callmsg) Data() []byte { return m.data }
|
||||
|
||||
// CallArgs represents the arguments for a call.
|
||||
type CallArgs struct {
|
||||
From common.Address `json:"from"`
|
||||
To *common.Address `json:"to"`
|
||||
@ -882,24 +934,24 @@ type PublicTransactionPoolAPI struct {
|
||||
miner *miner.Miner
|
||||
am *accounts.Manager
|
||||
txPool *core.TxPool
|
||||
txMu sync.Mutex
|
||||
txMu *sync.Mutex
|
||||
muPendingTxSubs sync.Mutex
|
||||
pendingTxSubs map[string]rpc.Subscription
|
||||
}
|
||||
|
||||
// NewPublicTransactionPoolAPI creates a new RPC service with methods specific for the transaction pool.
|
||||
func NewPublicTransactionPoolAPI(e *Ethereum, gpo *GasPriceOracle) *PublicTransactionPoolAPI {
|
||||
func NewPublicTransactionPoolAPI(e *Ethereum) *PublicTransactionPoolAPI {
|
||||
api := &PublicTransactionPoolAPI{
|
||||
eventMux: e.EventMux(),
|
||||
gpo: gpo,
|
||||
chainDb: e.ChainDb(),
|
||||
bc: e.BlockChain(),
|
||||
am: e.AccountManager(),
|
||||
txPool: e.TxPool(),
|
||||
miner: e.Miner(),
|
||||
eventMux: e.eventMux,
|
||||
gpo: e.gpo,
|
||||
chainDb: e.chainDb,
|
||||
bc: e.blockchain,
|
||||
am: e.accountManager,
|
||||
txPool: e.txPool,
|
||||
txMu: &e.txMu,
|
||||
miner: e.miner,
|
||||
pendingTxSubs: make(map[string]rpc.Subscription),
|
||||
}
|
||||
|
||||
go api.subscriptionLoop()
|
||||
|
||||
return api
|
||||
@ -1096,6 +1148,7 @@ func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transacti
|
||||
return tx.WithSignature(signature)
|
||||
}
|
||||
|
||||
// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool.
|
||||
type SendTxArgs struct {
|
||||
From common.Address `json:"from"`
|
||||
To *common.Address `json:"to"`
|
||||
@ -1106,18 +1159,47 @@ type SendTxArgs struct {
|
||||
Nonce *rpc.HexNumber `json:"nonce"`
|
||||
}
|
||||
|
||||
// SendTransaction will create a transaction for the given transaction argument, sign it and submit it to the
|
||||
// transaction pool.
|
||||
func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash, error) {
|
||||
// prepareSendTxArgs is a helper function that fills in default values for unspecified tx fields.
|
||||
func prepareSendTxArgs(args SendTxArgs, gpo *GasPriceOracle) SendTxArgs {
|
||||
if args.Gas == nil {
|
||||
args.Gas = rpc.NewHexNumber(defaultGas)
|
||||
}
|
||||
if args.GasPrice == nil {
|
||||
args.GasPrice = rpc.NewHexNumber(s.gpo.SuggestPrice())
|
||||
args.GasPrice = rpc.NewHexNumber(gpo.SuggestPrice())
|
||||
}
|
||||
if args.Value == nil {
|
||||
args.Value = rpc.NewHexNumber(0)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// submitTransaction is a helper function that submits tx to txPool and creates a log entry.
|
||||
func submitTransaction(txPool *core.TxPool, tx *types.Transaction, signature []byte) (common.Hash, error) {
|
||||
signedTx, err := tx.WithSignature(signature)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
txPool.SetLocal(signedTx)
|
||||
if err := txPool.Add(signedTx); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
if signedTx.To() == nil {
|
||||
from, _ := signedTx.From()
|
||||
addr := crypto.CreateAddress(from, signedTx.Nonce())
|
||||
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex())
|
||||
} else {
|
||||
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex())
|
||||
}
|
||||
|
||||
return signedTx.Hash(), nil
|
||||
}
|
||||
|
||||
// SendTransaction creates a transaction for the given argument, sign it and submit it to the
|
||||
// transaction pool.
|
||||
func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash, error) {
|
||||
args = prepareSendTxArgs(args, s.gpo)
|
||||
|
||||
s.txMu.Lock()
|
||||
defer s.txMu.Unlock()
|
||||
@ -1127,32 +1209,18 @@ func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
contractCreation := (args.To == nil)
|
||||
|
||||
if contractCreation {
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
}
|
||||
|
||||
signedTx, err := s.sign(args.From, tx)
|
||||
signature, err := s.am.Sign(args.From, tx.SigHash().Bytes())
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
s.txPool.SetLocal(signedTx)
|
||||
if err := s.txPool.Add(signedTx); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
if contractCreation {
|
||||
addr := crypto.CreateAddress(args.From, args.Nonce.Uint64())
|
||||
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex())
|
||||
} else {
|
||||
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex())
|
||||
}
|
||||
|
||||
return signedTx.Hash(), nil
|
||||
return submitTransaction(s.txPool, tx, signature)
|
||||
}
|
||||
|
||||
// SendRawTransaction will add the signed transaction to the transaction pool.
|
||||
@ -1189,6 +1257,7 @@ func (s *PublicTransactionPoolAPI) Sign(addr common.Address, hash common.Hash) (
|
||||
return common.ToHex(signature), error
|
||||
}
|
||||
|
||||
// SignTransactionArgs represents the arguments to sign a transaction.
|
||||
type SignTransactionArgs struct {
|
||||
From common.Address
|
||||
To *common.Address
|
||||
@ -1215,6 +1284,7 @@ type Tx struct {
|
||||
Hash common.Hash `json:"hash"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses JSON data into tx.
|
||||
func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
req := struct {
|
||||
To *common.Address `json:"to"`
|
||||
@ -1255,8 +1325,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
tx.GasPrice = rpc.NewHexNumber(int64(50000000000))
|
||||
}
|
||||
|
||||
contractCreation := (req.To == nil)
|
||||
if contractCreation {
|
||||
if req.To == nil {
|
||||
tx.tx = types.NewContractCreation(tx.Nonce.Uint64(), tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data)
|
||||
} else {
|
||||
tx.tx = types.NewTransaction(tx.Nonce.Uint64(), *tx.To, tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data)
|
||||
@ -1265,6 +1334,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignTransactionResult represents a RLP encoded signed transaction.
|
||||
type SignTransactionResult struct {
|
||||
Raw string `json:"raw"`
|
||||
Tx *Tx `json:"tx"`
|
||||
@ -1307,9 +1377,7 @@ func (s *PublicTransactionPoolAPI) SignTransaction(args SignTransactionArgs) (*S
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
contractCreation := (args.To == nil)
|
||||
|
||||
if contractCreation {
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
@ -1325,14 +1393,14 @@ func (s *PublicTransactionPoolAPI) SignTransaction(args SignTransactionArgs) (*S
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(tx)}, nil
|
||||
return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(signedTx)}, nil
|
||||
}
|
||||
|
||||
// PendingTransactions returns the transactions that are in the transaction pool and have a from address that is one of
|
||||
// the accounts this node manages.
|
||||
func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
|
||||
pending := s.txPool.GetTransactions()
|
||||
transactions := make([]*RPCTransaction, 0)
|
||||
transactions := make([]*RPCTransaction, 0, len(pending))
|
||||
for _, tx := range pending {
|
||||
from, _ := tx.FromFrontier()
|
||||
if s.am.HasAddress(from) {
|
||||
@ -1342,7 +1410,7 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
|
||||
return transactions
|
||||
}
|
||||
|
||||
// NewPendingTransaction creates a subscription that is triggered each time a transaction enters the transaction pool
|
||||
// NewPendingTransactions creates a subscription that is triggered each time a transaction enters the transaction pool
|
||||
// and is send from one of the transactions this nodes manages.
|
||||
func (s *PublicTransactionPoolAPI) NewPendingTransactions(ctx context.Context) (rpc.Subscription, error) {
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
@ -1382,8 +1450,7 @@ func (s *PublicTransactionPoolAPI) Resend(tx Tx, gasPrice, gasLimit *rpc.HexNumb
|
||||
}
|
||||
|
||||
var newTx *types.Transaction
|
||||
contractCreation := (tx.tx.To() == nil)
|
||||
if contractCreation {
|
||||
if tx.tx.To() == nil {
|
||||
newTx = types.NewContractCreation(tx.tx.Nonce(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data())
|
||||
} else {
|
||||
newTx = types.NewTransaction(tx.tx.Nonce(), *tx.tx.To(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data())
|
||||
@ -1584,7 +1651,7 @@ func (api *PrivateDebugAPI) ChaindbProperty(property string) (string, error) {
|
||||
return ldb.LDB().GetProperty(property)
|
||||
}
|
||||
|
||||
// BlockTraceResults is the returned value when replaying a block to check for
|
||||
// BlockTraceResult is the returned value when replaying a block to check for
|
||||
// consensus results and full VM trace logs for all included transactions.
|
||||
type BlockTraceResult struct {
|
||||
Validated bool `json:"validated"`
|
||||
@ -1619,7 +1686,7 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.Config) B
|
||||
return api.TraceBlock(blockRlp, config)
|
||||
}
|
||||
|
||||
// TraceProcessBlock processes the block by canonical block number.
|
||||
// TraceBlockByNumber processes the block by canonical block number.
|
||||
func (api *PrivateDebugAPI) TraceBlockByNumber(number uint64, config *vm.Config) BlockTraceResult {
|
||||
// Fetch the block that we aim to reprocess
|
||||
block := api.eth.BlockChain().GetBlockByNumber(number)
|
||||
@ -1724,15 +1791,6 @@ type structLogRes struct {
|
||||
Storage map[string]string `json:"storage"`
|
||||
}
|
||||
|
||||
// VmLoggerOptions are the options used for debugging transactions and capturing
|
||||
// specific data.
|
||||
type VmLoggerOptions struct {
|
||||
DisableMemory bool // disable memory capture
|
||||
DisableStack bool // disable stack capture
|
||||
DisableStorage bool // disable storage capture
|
||||
FullStorage bool // show full storage (slow)
|
||||
}
|
||||
|
||||
// formatLogs formats EVM returned structured logs for json output
|
||||
func formatLogs(structLogs []vm.StructLog) []structLogRes {
|
||||
formattedStructLogs := make([]structLogRes, len(structLogs))
|
||||
@ -1774,25 +1832,25 @@ func formatError(err error) string {
|
||||
|
||||
// TraceTransaction returns the structured logs created during the execution of EVM
|
||||
// and returns them as a JSON object.
|
||||
func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ExecutionResult, error) {
|
||||
func (api *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ExecutionResult, error) {
|
||||
if logger == nil {
|
||||
logger = new(vm.LogConfig)
|
||||
}
|
||||
// Retrieve the tx from the chain and the containing block
|
||||
tx, blockHash, _, txIndex := core.GetTransaction(s.eth.ChainDb(), txHash)
|
||||
tx, blockHash, _, txIndex := core.GetTransaction(api.eth.ChainDb(), txHash)
|
||||
if tx == nil {
|
||||
return nil, fmt.Errorf("transaction %x not found", txHash)
|
||||
}
|
||||
block := s.eth.BlockChain().GetBlock(blockHash)
|
||||
block := api.eth.BlockChain().GetBlock(blockHash)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("block %x not found", blockHash)
|
||||
}
|
||||
// Create the state database to mutate and eventually trace
|
||||
parent := s.eth.BlockChain().GetBlock(block.ParentHash())
|
||||
parent := api.eth.BlockChain().GetBlock(block.ParentHash())
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("block parent %x not found", block.ParentHash())
|
||||
}
|
||||
stateDb, err := state.New(parent.Root(), s.eth.ChainDb())
|
||||
stateDb, err := state.New(parent.Root(), api.eth.ChainDb())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1813,7 +1871,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
}
|
||||
// Mutate the state if we haven't reached the tracing transaction yet
|
||||
if uint64(idx) < txIndex {
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, parent.Header(), vm.Config{})
|
||||
vmenv := core.NewEnv(stateDb, api.config, api.eth.BlockChain(), msg, block.Header(), vm.Config{})
|
||||
_, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mutation failed: %v", err)
|
||||
@ -1821,7 +1879,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
continue
|
||||
}
|
||||
// Otherwise trace the transaction and return
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, parent.Header(), vm.Config{Debug: true, Logger: *logger})
|
||||
vmenv := core.NewEnv(stateDb, api.config, api.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
|
||||
ret, gas, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tracing failed: %v", err)
|
||||
@ -1835,6 +1893,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
return nil, errors.New("database inconsistency")
|
||||
}
|
||||
|
||||
// TraceCall executes a call and returns the amount of gas, created logs and optionally returned values.
|
||||
func (s *PublicBlockChainAPI) TraceCall(args CallArgs, blockNr rpc.BlockNumber) (*ExecutionResult, error) {
|
||||
// Fetch the state associated with the block number
|
||||
stateDb, block, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb)
|
||||
@ -1903,12 +1962,12 @@ func (s *PublicNetAPI) Listening() bool {
|
||||
return true // always listening
|
||||
}
|
||||
|
||||
// Peercount returns the number of connected peers
|
||||
// PeerCount returns the number of connected peers
|
||||
func (s *PublicNetAPI) PeerCount() *rpc.HexNumber {
|
||||
return rpc.NewHexNumber(s.net.PeerCount())
|
||||
}
|
||||
|
||||
// ProtocolVersion returns the current ethereum protocol version.
|
||||
// Version returns the current ethereum protocol version.
|
||||
func (s *PublicNetAPI) Version() string {
|
||||
return fmt.Sprintf("%d", s.networkVersion)
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/ethash"
|
||||
@ -113,12 +114,14 @@ type Ethereum struct {
|
||||
|
||||
// Handlers
|
||||
txPool *core.TxPool
|
||||
txMu sync.Mutex
|
||||
blockchain *core.BlockChain
|
||||
accountManager *accounts.Manager
|
||||
pow *ethash.Ethash
|
||||
protocolManager *ProtocolManager
|
||||
SolcPath string
|
||||
solc *compiler.Solidity
|
||||
gpo *GasPriceOracle
|
||||
|
||||
GpoMinGasPrice *big.Int
|
||||
GpoMaxGasPrice *big.Int
|
||||
@ -260,6 +263,8 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
eth.gpo = NewGasPriceOracle(eth)
|
||||
|
||||
newPool := core.NewTxPool(eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
|
||||
eth.txPool = newPool
|
||||
|
||||
@ -276,34 +281,31 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
// APIs returns the collection of RPC services the ethereum package offers.
|
||||
// NOTE, some of these services probably need to be moved to somewhere else.
|
||||
func (s *Ethereum) APIs() []rpc.API {
|
||||
// share gas price oracle in API's
|
||||
gpo := NewGasPriceOracle(s)
|
||||
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: NewPublicEthereumAPI(s, gpo),
|
||||
Service: NewPublicEthereumAPI(s),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: NewPublicAccountAPI(s.AccountManager()),
|
||||
Service: NewPublicAccountAPI(s.accountManager),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "personal",
|
||||
Version: "1.0",
|
||||
Service: NewPrivateAccountAPI(s.AccountManager()),
|
||||
Service: NewPrivateAccountAPI(s),
|
||||
Public: false,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: NewPublicBlockChainAPI(s.chainConfig, s.BlockChain(), s.Miner(), s.ChainDb(), gpo, s.EventMux(), s.AccountManager()),
|
||||
Service: NewPublicBlockChainAPI(s.chainConfig, s.blockchain, s.miner, s.chainDb, s.gpo, s.eventMux, s.accountManager),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: NewPublicTransactionPoolAPI(s, gpo),
|
||||
Service: NewPublicTransactionPoolAPI(s),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
@ -313,7 +315,7 @@ func (s *Ethereum) APIs() []rpc.API {
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: downloader.NewPublicDownloaderAPI(s.Downloader(), s.EventMux()),
|
||||
Service: downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "miner",
|
||||
@ -328,7 +330,7 @@ func (s *Ethereum) APIs() []rpc.API {
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: filters.NewPublicFilterAPI(s.ChainDb(), s.EventMux()),
|
||||
Service: filters.NewPublicFilterAPI(s.chainDb, s.eventMux),
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "admin",
|
||||
@ -351,7 +353,7 @@ func (s *Ethereum) APIs() []rpc.API {
|
||||
}, {
|
||||
Namespace: "admin",
|
||||
Version: "1.0",
|
||||
Service: ethreg.NewPrivateRegistarAPI(s.chainConfig, s.BlockChain(), s.ChainDb(), s.TxPool(), s.AccountManager()),
|
||||
Service: ethreg.NewPrivateRegistarAPI(s.chainConfig, s.blockchain, s.chainDb, s.txPool, s.accountManager),
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -416,6 +418,7 @@ func (s *Ethereum) Stop() error {
|
||||
s.blockchain.Stop()
|
||||
s.protocolManager.Stop()
|
||||
s.txPool.Stop()
|
||||
s.miner.Stop()
|
||||
s.eventMux.Stop()
|
||||
|
||||
s.StopAutoDAG()
|
||||
|
74
eth/bad_block.go
Normal file
74
eth/bad_block.go
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
// The Ethereum main network genesis block.
|
||||
defaultGenesisHash = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
||||
badBlocksURL = "https://badblocks.ethdev.com"
|
||||
)
|
||||
|
||||
var EnableBadBlockReporting = false
|
||||
|
||||
func sendBadBlockReport(block *types.Block, err error) {
|
||||
if !EnableBadBlockReporting {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
blockRLP, _ = rlp.EncodeToBytes(block)
|
||||
params = map[string]interface{}{
|
||||
"block": common.Bytes2Hex(blockRLP),
|
||||
"blockHash": block.Hash().Hex(),
|
||||
"errortype": err.Error(),
|
||||
"client": "go",
|
||||
}
|
||||
)
|
||||
if !block.ReceivedAt.IsZero() {
|
||||
params["receivedAt"] = block.ReceivedAt.UTC().String()
|
||||
}
|
||||
if p, ok := block.ReceivedFrom.(*peer); ok {
|
||||
params["receivedFrom"] = map[string]interface{}{
|
||||
"enode": fmt.Sprintf("enode://%x@%v", p.ID(), p.RemoteAddr()),
|
||||
"name": p.Name(),
|
||||
"protocolVersion": p.version,
|
||||
}
|
||||
}
|
||||
jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "id": "1", "jsonrpc": "2.0", "params": []interface{}{params}})
|
||||
client := http.Client{Timeout: 8 * time.Second}
|
||||
resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infoln(err)
|
||||
return
|
||||
}
|
||||
glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode)
|
||||
resp.Body.Close()
|
||||
}
|
114
eth/bind.go
Normal file
114
eth/bind.go
Normal file
@ -0,0 +1,114 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// ContractBackend implements bind.ContractBackend with direct calls to Ethereum
|
||||
// internals to support operating on contracts within subprotocols like eth and
|
||||
// swarm.
|
||||
//
|
||||
// Internally this backend uses the already exposed API endpoints of the Ethereum
|
||||
// object. These should be rewritten to internal Go method calls when the Go API
|
||||
// is refactored to support a clean library use.
|
||||
type ContractBackend struct {
|
||||
eapi *PublicEthereumAPI // Wrapper around the Ethereum object to access metadata
|
||||
bcapi *PublicBlockChainAPI // Wrapper around the blockchain to access chain data
|
||||
txapi *PublicTransactionPoolAPI // Wrapper around the transaction pool to access transaction data
|
||||
}
|
||||
|
||||
// NewContractBackend creates a new native contract backend using an existing
|
||||
// Etheruem object.
|
||||
func NewContractBackend(eth *Ethereum) *ContractBackend {
|
||||
return &ContractBackend{
|
||||
eapi: NewPublicEthereumAPI(eth),
|
||||
bcapi: NewPublicBlockChainAPI(eth.chainConfig, eth.blockchain, eth.miner, eth.chainDb, eth.gpo, eth.eventMux, eth.accountManager),
|
||||
txapi: NewPublicTransactionPoolAPI(eth),
|
||||
}
|
||||
}
|
||||
|
||||
// HasCode implements bind.ContractVerifier.HasCode by retrieving any code associated
|
||||
// with the contract from the local API, and checking its size.
|
||||
func (b *ContractBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
block := rpc.LatestBlockNumber
|
||||
if pending {
|
||||
block = rpc.PendingBlockNumber
|
||||
}
|
||||
out, err := b.bcapi.GetCode(contract, block)
|
||||
return len(common.FromHex(out)) > 0, err
|
||||
}
|
||||
|
||||
// ContractCall implements bind.ContractCaller executing an Ethereum contract
|
||||
// call with the specified data as the input. The pending flag requests execution
|
||||
// against the pending block, not the stable head of the chain.
|
||||
func (b *ContractBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) {
|
||||
// Convert the input args to the API spec
|
||||
args := CallArgs{
|
||||
To: &contract,
|
||||
Data: common.ToHex(data),
|
||||
}
|
||||
block := rpc.LatestBlockNumber
|
||||
if pending {
|
||||
block = rpc.PendingBlockNumber
|
||||
}
|
||||
// Execute the call and convert the output back to Go types
|
||||
out, err := b.bcapi.Call(args, block)
|
||||
return common.FromHex(out), err
|
||||
}
|
||||
|
||||
// PendingAccountNonce implements bind.ContractTransactor retrieving the current
|
||||
// pending nonce associated with an account.
|
||||
func (b *ContractBackend) PendingAccountNonce(account common.Address) (uint64, error) {
|
||||
out, err := b.txapi.GetTransactionCount(account, rpc.PendingBlockNumber)
|
||||
return out.Uint64(), err
|
||||
}
|
||||
|
||||
// SuggestGasPrice implements bind.ContractTransactor retrieving the currently
|
||||
// suggested gas price to allow a timely execution of a transaction.
|
||||
func (b *ContractBackend) SuggestGasPrice() (*big.Int, error) {
|
||||
return b.eapi.GasPrice(), nil
|
||||
}
|
||||
|
||||
// EstimateGasLimit implements bind.ContractTransactor triing to estimate the gas
|
||||
// needed to execute a specific transaction based on the current pending state of
|
||||
// the backend blockchain. There is no guarantee that this is the true gas limit
|
||||
// requirement as other transactions may be added or removed by miners, but it
|
||||
// should provide a basis for setting a reasonable default.
|
||||
func (b *ContractBackend) EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error) {
|
||||
out, err := b.bcapi.EstimateGas(CallArgs{
|
||||
From: sender,
|
||||
To: contract,
|
||||
Value: *rpc.NewHexNumber(value),
|
||||
Data: common.ToHex(data),
|
||||
})
|
||||
return out.BigInt(), err
|
||||
}
|
||||
|
||||
// SendTransaction implements bind.ContractTransactor injects the transaction
|
||||
// into the pending pool for execution.
|
||||
func (b *ContractBackend) SendTransaction(tx *types.Transaction) error {
|
||||
raw, _ := rlp.EncodeToBytes(tx)
|
||||
_, err := b.txapi.SendRawTransaction(common.ToHex(raw))
|
||||
return err
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -43,8 +43,9 @@ var (
|
||||
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
|
||||
)
|
||||
|
||||
// Reduce the block cache limit, otherwise the tests will be very heavy.
|
||||
// Reduce some of the parameters to make the tester faster.
|
||||
func init() {
|
||||
MaxForkAncestry = uint64(10000)
|
||||
blockCacheLimit = 1024
|
||||
}
|
||||
|
||||
@ -52,11 +53,15 @@ func init() {
|
||||
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
||||
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||
// reassembly.
|
||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||
// Generate the block chain
|
||||
blocks, receipts := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
|
||||
block.SetCoinbase(common.Address{seed})
|
||||
|
||||
// If a heavy chain is requested, delay blocks to raise difficulty
|
||||
if heavy {
|
||||
block.OffsetTime(-1)
|
||||
}
|
||||
// If the block number is multiple of 3, send a bonus transaction to the miner
|
||||
if parent == genesis && i%3 == 0 {
|
||||
tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
|
||||
@ -97,15 +102,19 @@ func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Recei
|
||||
|
||||
// makeChainFork creates two chains of length n, such that h1[:f] and
|
||||
// h2[:f] are different but have a common suffix of length n-f.
|
||||
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
|
||||
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
|
||||
// Create the common suffix
|
||||
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts)
|
||||
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts, false)
|
||||
|
||||
// Create the forks
|
||||
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]])
|
||||
// Create the forks, making the second heavyer if non balanced forks were requested
|
||||
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
|
||||
hashes1 = append(hashes1, hashes[1:]...)
|
||||
|
||||
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]])
|
||||
heavy := false
|
||||
if !balanced {
|
||||
heavy = true
|
||||
}
|
||||
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
|
||||
hashes2 = append(hashes2, hashes[1:]...)
|
||||
|
||||
for hash, header := range headers {
|
||||
@ -140,22 +149,25 @@ type downloadTester struct {
|
||||
peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
|
||||
peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains
|
||||
|
||||
peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newTester creates a new downloader test mocker.
|
||||
func newTester() *downloadTester {
|
||||
tester := &downloadTester{
|
||||
ownHashes: []common.Hash{genesis.Hash()},
|
||||
ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
|
||||
ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil},
|
||||
ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
|
||||
peerHashes: make(map[string][]common.Hash),
|
||||
peerHeaders: make(map[string]map[common.Hash]*types.Header),
|
||||
peerBlocks: make(map[string]map[common.Hash]*types.Block),
|
||||
peerReceipts: make(map[string]map[common.Hash]types.Receipts),
|
||||
peerChainTds: make(map[string]map[common.Hash]*big.Int),
|
||||
ownHashes: []common.Hash{genesis.Hash()},
|
||||
ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
|
||||
ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil},
|
||||
ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
|
||||
peerHashes: make(map[string][]common.Hash),
|
||||
peerHeaders: make(map[string]map[common.Hash]*types.Header),
|
||||
peerBlocks: make(map[string]map[common.Hash]*types.Block),
|
||||
peerReceipts: make(map[string]map[common.Hash]types.Receipts),
|
||||
peerChainTds: make(map[string]map[common.Hash]*big.Int),
|
||||
peerMissingStates: make(map[string]map[common.Hash]bool),
|
||||
}
|
||||
tester.stateDb, _ = ethdb.NewMemDatabase()
|
||||
tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
|
||||
@ -167,6 +179,12 @@ func newTester() *downloadTester {
|
||||
return tester
|
||||
}
|
||||
|
||||
// terminate aborts any operations on the embedded downloader and releases all
|
||||
// held resources.
|
||||
func (dl *downloadTester) terminate() {
|
||||
dl.downloader.Terminate()
|
||||
}
|
||||
|
||||
// sync starts synchronizing with a remote peer, blocking until it completes.
|
||||
func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
|
||||
dl.lock.RLock()
|
||||
@ -179,7 +197,17 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
|
||||
}
|
||||
}
|
||||
dl.lock.RUnlock()
|
||||
return dl.downloader.synchronise(id, hash, td, mode)
|
||||
|
||||
// Synchronise with the chosen peer and ensure proper cleanup afterwards
|
||||
err := dl.downloader.synchronise(id, hash, td, mode)
|
||||
select {
|
||||
case <-dl.downloader.cancelCh:
|
||||
// Ok, downloader fully cancelled after sync cycle
|
||||
default:
|
||||
// Downloader is still accepting packets, can block a peer up
|
||||
panic("downloader active post sync cycle") // panic will be caught by tester
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// hasHeader checks if a header is present in the testers canonical chain.
|
||||
@ -389,6 +417,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
|
||||
dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
|
||||
dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
|
||||
dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
|
||||
dl.peerMissingStates[id] = make(map[common.Hash]bool)
|
||||
|
||||
genesis := hashes[len(hashes)-1]
|
||||
if header := headers[genesis]; header != nil {
|
||||
@ -551,8 +580,8 @@ func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) fu
|
||||
hashes := dl.peerHashes[id]
|
||||
headers := dl.peerHeaders[id]
|
||||
result := make([]*types.Header, 0, amount)
|
||||
for i := 0; i < amount && len(hashes)-int(origin)-1-i >= 0; i++ {
|
||||
if header, ok := headers[hashes[len(hashes)-int(origin)-1-i]]; ok {
|
||||
for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
|
||||
if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
|
||||
result = append(result, header)
|
||||
}
|
||||
}
|
||||
@ -629,7 +658,9 @@ func (dl *downloadTester) peerGetNodeDataFn(id string, delay time.Duration) func
|
||||
results := make([][]byte, 0, len(hashes))
|
||||
for _, hash := range hashes {
|
||||
if data, err := testdb.Get(hash.Bytes()); err == nil {
|
||||
results = append(results, data)
|
||||
if !dl.peerMissingStates[id][hash] {
|
||||
results = append(results, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
go dl.downloader.DeliverNodeData(id, results)
|
||||
@ -712,9 +743,11 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
|
||||
// Synchronise with the peer and make sure all relevant data was retrieved
|
||||
@ -736,9 +769,11 @@ func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
|
||||
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a long block chain to download and the tester
|
||||
targetBlocks := 8 * blockCacheLimit
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
|
||||
// Wrap the importer to allow stepping
|
||||
@ -810,22 +845,24 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Tests that simple synchronization against a forked chain works correctly. In
|
||||
// this test common ancestor lookup should *not* be short circuited, and a full
|
||||
// binary search should be executed.
|
||||
func TestForkedSynchronisation61(t *testing.T) { testForkedSynchronisation(t, 61, FullSync) }
|
||||
func TestForkedSynchronisation62(t *testing.T) { testForkedSynchronisation(t, 62, FullSync) }
|
||||
func TestForkedSynchronisation63Full(t *testing.T) { testForkedSynchronisation(t, 63, FullSync) }
|
||||
func TestForkedSynchronisation63Fast(t *testing.T) { testForkedSynchronisation(t, 63, FastSync) }
|
||||
func TestForkedSynchronisation64Full(t *testing.T) { testForkedSynchronisation(t, 64, FullSync) }
|
||||
func TestForkedSynchronisation64Fast(t *testing.T) { testForkedSynchronisation(t, 64, FastSync) }
|
||||
func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(t, 64, LightSync) }
|
||||
func TestForkedSync61(t *testing.T) { testForkedSync(t, 61, FullSync) }
|
||||
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
|
||||
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
||||
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
||||
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
|
||||
func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
|
||||
func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := MaxHashFetch, 2*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
|
||||
|
||||
@ -842,6 +879,42 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
|
||||
}
|
||||
|
||||
// Tests that synchronising against a much shorter but much heavyer fork works
|
||||
// corrently and is not dropped.
|
||||
func TestHeavyForkedSync61(t *testing.T) { testHeavyForkedSync(t, 61, FullSync) }
|
||||
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := MaxHashFetch, 4*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("light", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and make sure that fork is pulled too
|
||||
if err := tester.sync("heavy", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
||||
func TestInactiveDownloader61(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -856,11 +929,85 @@ func TestInactiveDownloader61(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
||||
// long dead chains.
|
||||
func TestBoundedForkedSync61(t *testing.T) { testBoundedForkedSync(t, 61, FullSync) }
|
||||
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := 13, int(MaxForkAncestry+17)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head for short but heavy forks too. These are a bit special because they
|
||||
// take different ancestor lookup paths.
|
||||
func TestBoundedHeavyForkedSync61(t *testing.T) { testBoundedHeavyForkedSync(t, 61, FullSync) }
|
||||
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := 13, int(MaxForkAncestry+17)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming block headers and
|
||||
// bodies.
|
||||
func TestInactiveDownloader62(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Check that neither block headers nor bodies are accepted
|
||||
if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
|
||||
@ -875,7 +1022,9 @@ func TestInactiveDownloader62(t *testing.T) {
|
||||
// bodies and receipts.
|
||||
func TestInactiveDownloader63(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Check that neither block headers nor bodies are accepted
|
||||
if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
|
||||
@ -909,9 +1058,11 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
|
||||
if targetBlocks >= MaxHeaderFetch {
|
||||
targetBlocks = MaxHeaderFetch - 15
|
||||
}
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
|
||||
// Make sure canceling works with a pristine downloader
|
||||
@ -944,9 +1095,11 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create various peers with various parts of the chain
|
||||
targetPeers := 8
|
||||
targetBlocks := targetPeers*blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
for i := 0; i < targetPeers; i++ {
|
||||
id := fmt.Sprintf("peer #%d", i)
|
||||
tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts)
|
||||
@ -972,10 +1125,12 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Create peers of every type
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer 61", 61, hashes, nil, blocks, nil)
|
||||
tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
|
||||
tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
|
||||
@ -1010,9 +1165,11 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a block chain to download
|
||||
targetBlocks := 2*blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
|
||||
// Instrument the downloader to signal body requests
|
||||
@ -1063,9 +1220,10 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Attempt a full sync with an attacker feeding gapped headers
|
||||
tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
|
||||
@ -1095,9 +1253,10 @@ func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 6
|
||||
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Attempt a full sync with an attacker feeding shifted headers
|
||||
tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
|
||||
@ -1126,9 +1285,10 @@ func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(
|
||||
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := 3*fsHeaderSafetyNet + fsMinFullBlocks
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Attempt to sync with an attacker that feeds junk during the fast sync phase.
|
||||
// This should result in the last fsHeaderSafetyNet headers being rolled back.
|
||||
@ -1147,6 +1307,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
// rolled back, and also the pivot point being reverted to a non-block status.
|
||||
tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
|
||||
missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
|
||||
delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
|
||||
delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
|
||||
|
||||
if err := tester.sync("block-attack", nil, mode); err == nil {
|
||||
@ -1166,7 +1327,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
|
||||
missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
|
||||
|
||||
tester.downloader.noFast = false
|
||||
tester.downloader.fsPivotFails = 0
|
||||
tester.downloader.syncInitHook = func(uint64, uint64) {
|
||||
for i := missing; i <= len(hashes); i++ {
|
||||
delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
|
||||
@ -1185,6 +1346,8 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Errorf("fast sync pivot block #%d not rolled back", head)
|
||||
}
|
||||
}
|
||||
tester.downloader.fsPivotFails = fsCriticalTrials
|
||||
|
||||
// Synchronise with the valid peer and make sure sync succeeds. Since the last
|
||||
// rollback should also disable fast syncing for this process, verify that we
|
||||
// did a fresh full sync. Note, we can't assert anything about the receipts
|
||||
@ -1217,9 +1380,11 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil)
|
||||
defer tester.terminate()
|
||||
|
||||
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil, false)
|
||||
tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
|
||||
|
||||
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
|
||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
|
||||
}
|
||||
@ -1237,29 +1402,33 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
||||
result error
|
||||
drop bool
|
||||
}{
|
||||
{nil, false}, // Sync succeeded, all is well
|
||||
{errBusy, false}, // Sync is already in progress, no problem
|
||||
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
|
||||
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it
|
||||
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
|
||||
{errNoPeers, false}, // No peers to download from, soft race, no issue
|
||||
{errTimeout, true}, // No hashes received in due time, drop the peer
|
||||
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
|
||||
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
||||
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
||||
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
|
||||
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
|
||||
{errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{nil, false}, // Sync succeeded, all is well
|
||||
{errBusy, false}, // Sync is already in progress, no problem
|
||||
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
|
||||
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it
|
||||
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
|
||||
{errNoPeers, false}, // No peers to download from, soft race, no issue
|
||||
{errTimeout, true}, // No hashes received in due time, drop the peer
|
||||
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
|
||||
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
||||
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
||||
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
|
||||
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
|
||||
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
|
||||
{errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelHeaderProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
}
|
||||
// Run the tests and check disconnection status
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
for i, tt := range tests {
|
||||
// Register a new peer and ensure it's presence
|
||||
id := fmt.Sprintf("test %d", i)
|
||||
@ -1294,13 +1463,15 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
progress := make(chan struct{})
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
||||
starting <- struct{}{}
|
||||
<-progress
|
||||
@ -1366,13 +1537,15 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a forked chain to simulate origin revertal
|
||||
common, fork := MaxHashFetch, 2*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
progress := make(chan struct{})
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
||||
starting <- struct{}{}
|
||||
<-progress
|
||||
@ -1441,13 +1614,15 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
progress := make(chan struct{})
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
||||
starting <- struct{}{}
|
||||
<-progress
|
||||
@ -1517,13 +1692,15 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small block chain
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
progress := make(chan struct{})
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
||||
starting <- struct{}{}
|
||||
<-progress
|
||||
@ -1590,7 +1767,7 @@ func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64,
|
||||
|
||||
func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil, false)
|
||||
fakeHeads := []*types.Header{{}, {}, {}, {}}
|
||||
for i := 0; i < 200; i++ {
|
||||
tester := newTester()
|
||||
@ -1610,7 +1787,7 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
impl := tester.peerGetAbsHeadersFn("peer", 0)
|
||||
go impl(from, count, skip, reverse)
|
||||
// None of the extra deliveries should block.
|
||||
timeout := time.After(5 * time.Second)
|
||||
timeout := time.After(15 * time.Second)
|
||||
for i := 0; i < cap(deliveriesDone); i++ {
|
||||
select {
|
||||
case <-deliveriesDone:
|
||||
@ -1623,5 +1800,48 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
if err := tester.sync("peer", nil, mode); err != nil {
|
||||
t.Errorf("sync failed: %v", err)
|
||||
}
|
||||
tester.terminate()
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that if fast sync aborts in the critical section, it can restart a few
|
||||
// times before giving up.
|
||||
func TestFastCriticalRestarts63(t *testing.T) { testFastCriticalRestarts(t, 63) }
|
||||
func TestFastCriticalRestarts64(t *testing.T) { testFastCriticalRestarts(t, 64) }
|
||||
|
||||
func testFastCriticalRestarts(t *testing.T, protocol int) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a large enough blockchin to actually fast sync on
|
||||
targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Create a tester peer with the critical section state roots missing (force failures)
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
for i := 0; i < fsPivotInterval; i++ {
|
||||
tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
|
||||
}
|
||||
// Synchronise with the peer a few times and make sure they fail until the retry limit
|
||||
for i := 0; i < fsCriticalTrials; i++ {
|
||||
// Attempt a sync and ensure it fails properly
|
||||
if err := tester.sync("peer", nil, FastSync); err == nil {
|
||||
t.Fatalf("failing fast sync succeeded: %v", err)
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond) // Make sure no in-flight requests remain
|
||||
|
||||
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
|
||||
if i == 0 {
|
||||
tester.lock.Lock()
|
||||
tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
|
||||
tester.lock.Unlock()
|
||||
}
|
||||
}
|
||||
// Retry limit exhausted, downloader will switch to full sync, should succeed
|
||||
if err := tester.sync("peer", nil, FastSync); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks in slow sync: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, targetBlocks+1)
|
||||
}
|
||||
|
@ -23,6 +23,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -31,8 +33,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
|
||||
throughputImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
|
||||
maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
|
||||
measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
|
||||
)
|
||||
|
||||
// Hash and block fetchers belonging to eth/61 and below
|
||||
@ -58,15 +60,20 @@ type peer struct {
|
||||
id string // Unique identifier of the peer
|
||||
head common.Hash // Hash of the peers latest known block
|
||||
|
||||
headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
|
||||
blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
|
||||
receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
|
||||
stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1)
|
||||
|
||||
headerThroughput float64 // Number of headers measured to be retrievable per second
|
||||
blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second
|
||||
receiptThroughput float64 // Number of receipts measured to be retrievable per second
|
||||
stateThroughput float64 // Number of node data pieces measured to be retrievable per second
|
||||
|
||||
blockStarted time.Time // Time instance when the last block (body)fetch was started
|
||||
rtt time.Duration // Request round trip time to track responsiveness (QoS)
|
||||
|
||||
headerStarted time.Time // Time instance when the last header fetch was started
|
||||
blockStarted time.Time // Time instance when the last block (body) fetch was started
|
||||
receiptStarted time.Time // Time instance when the last receipt fetch was started
|
||||
stateStarted time.Time // Time instance when the last node data fetch was started
|
||||
|
||||
@ -118,10 +125,12 @@ func (p *peer) Reset() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
atomic.StoreInt32(&p.headerIdle, 0)
|
||||
atomic.StoreInt32(&p.blockIdle, 0)
|
||||
atomic.StoreInt32(&p.receiptIdle, 0)
|
||||
atomic.StoreInt32(&p.stateIdle, 0)
|
||||
|
||||
p.headerThroughput = 0
|
||||
p.blockThroughput = 0
|
||||
p.receiptThroughput = 0
|
||||
p.stateThroughput = 0
|
||||
@ -151,6 +160,24 @@ func (p *peer) Fetch61(request *fetchRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchHeaders sends a header retrieval request to the remote peer.
|
||||
func (p *peer) FetchHeaders(from uint64, count int) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 62 {
|
||||
panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version))
|
||||
}
|
||||
// Short circuit if the peer is already fetching
|
||||
if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {
|
||||
return errAlreadyFetching
|
||||
}
|
||||
p.headerStarted = time.Now()
|
||||
|
||||
// Issue the header retrieval request (absolut upwards without gaps)
|
||||
go p.getAbsHeaders(from, count, 0, false)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchBodies sends a block body retrieval request to the remote peer.
|
||||
func (p *peer) FetchBodies(request *fetchRequest) error {
|
||||
// Sanity check the protocol version
|
||||
@ -217,6 +244,13 @@ func (p *peer) FetchNodeData(request *fetchRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
|
||||
// requests. Its estimated header retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
func (p *peer) SetHeadersIdle(delivered int) {
|
||||
p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle)
|
||||
}
|
||||
|
||||
// SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval
|
||||
// requests. Its estimated block retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
@ -260,35 +294,47 @@ func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, id
|
||||
return
|
||||
}
|
||||
// Otherwise update the throughput with a new measurement
|
||||
measured := float64(delivered) / (float64(time.Since(started)+1) / float64(time.Second)) // +1 (ns) to ensure non-zero divisor
|
||||
*throughput = (1-throughputImpact)*(*throughput) + throughputImpact*measured
|
||||
elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor
|
||||
measured := float64(delivered) / (float64(elapsed) / float64(time.Second))
|
||||
|
||||
*throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured
|
||||
p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed))
|
||||
}
|
||||
|
||||
// HeaderCapacity retrieves the peers header download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) HeaderCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch)))
|
||||
}
|
||||
|
||||
// BlockCapacity retrieves the peers block download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) BlockCapacity() int {
|
||||
func (p *peer) BlockCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return int(math.Max(1, math.Min(p.blockThroughput*float64(blockTargetRTT)/float64(time.Second), float64(MaxBlockFetch))))
|
||||
return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch)))
|
||||
}
|
||||
|
||||
// ReceiptCapacity retrieves the peers receipt download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) ReceiptCapacity() int {
|
||||
func (p *peer) ReceiptCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return int(math.Max(1, math.Min(p.receiptThroughput*float64(receiptTargetRTT)/float64(time.Second), float64(MaxReceiptFetch))))
|
||||
return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch)))
|
||||
}
|
||||
|
||||
// NodeDataCapacity retrieves the peers state download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) NodeDataCapacity() int {
|
||||
func (p *peer) NodeDataCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return int(math.Max(1, math.Min(p.stateThroughput*float64(stateTargetRTT)/float64(time.Second), float64(MaxStateFetch))))
|
||||
return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch)))
|
||||
}
|
||||
|
||||
// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
|
||||
@ -322,15 +368,17 @@ func (p *peer) String() string {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return fmt.Sprintf("Peer %s [%s]", p.id,
|
||||
fmt.Sprintf("blocks %3.2f/s, ", p.blockThroughput)+
|
||||
fmt.Sprintf("receipts %3.2f/s, ", p.receiptThroughput)+
|
||||
fmt.Sprintf("states %3.2f/s, ", p.stateThroughput)+
|
||||
fmt.Sprintf("lacking %4d", len(p.lacking)),
|
||||
)
|
||||
return fmt.Sprintf("Peer %s [%s]", p.id, strings.Join([]string{
|
||||
fmt.Sprintf("hs %3.2f/s", p.headerThroughput),
|
||||
fmt.Sprintf("bs %3.2f/s", p.blockThroughput),
|
||||
fmt.Sprintf("rs %3.2f/s", p.receiptThroughput),
|
||||
fmt.Sprintf("ss %3.2f/s", p.stateThroughput),
|
||||
fmt.Sprintf("miss %4d", len(p.lacking)),
|
||||
fmt.Sprintf("rtt %v", p.rtt),
|
||||
}, ", "))
|
||||
}
|
||||
|
||||
// peerSet represents the collection of active peer participating in the block
|
||||
// peerSet represents the collection of active peer participating in the chain
|
||||
// download procedure.
|
||||
type peerSet struct {
|
||||
peers map[string]*peer
|
||||
@ -359,9 +407,13 @@ func (ps *peerSet) Reset() {
|
||||
// peer is already known.
|
||||
//
|
||||
// The method also sets the starting throughput values of the new peer to the
|
||||
// average of all existing peers, to give it a realistic change of being used
|
||||
// average of all existing peers, to give it a realistic chance of being used
|
||||
// for data retrievals.
|
||||
func (ps *peerSet) Register(p *peer) error {
|
||||
// Retrieve the current median RTT as a sane default
|
||||
p.rtt = ps.medianRTT()
|
||||
|
||||
// Register the new peer with some meaningful defaults
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
@ -369,15 +421,17 @@ func (ps *peerSet) Register(p *peer) error {
|
||||
return errAlreadyRegistered
|
||||
}
|
||||
if len(ps.peers) > 0 {
|
||||
p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0
|
||||
p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0
|
||||
|
||||
for _, peer := range ps.peers {
|
||||
peer.lock.RLock()
|
||||
p.headerThroughput += peer.headerThroughput
|
||||
p.blockThroughput += peer.blockThroughput
|
||||
p.receiptThroughput += peer.receiptThroughput
|
||||
p.stateThroughput += peer.stateThroughput
|
||||
peer.lock.RUnlock()
|
||||
}
|
||||
p.headerThroughput /= float64(len(ps.peers))
|
||||
p.blockThroughput /= float64(len(ps.peers))
|
||||
p.receiptThroughput /= float64(len(ps.peers))
|
||||
p.stateThroughput /= float64(len(ps.peers))
|
||||
@ -441,6 +495,20 @@ func (ps *peerSet) BlockIdlePeers() ([]*peer, int) {
|
||||
return ps.idlePeers(61, 61, idle, throughput)
|
||||
}
|
||||
|
||||
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
|
||||
// within the active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
|
||||
idle := func(p *peer) bool {
|
||||
return atomic.LoadInt32(&p.headerIdle) == 0
|
||||
}
|
||||
throughput := func(p *peer) float64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
return p.headerThroughput
|
||||
}
|
||||
return ps.idlePeers(62, 64, idle, throughput)
|
||||
}
|
||||
|
||||
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
||||
// the active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) BodyIdlePeers() ([]*peer, int) {
|
||||
@ -508,3 +576,34 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer)
|
||||
}
|
||||
return idle, total
|
||||
}
|
||||
|
||||
// medianRTT returns the median RTT of te peerset, considering only the tuning
|
||||
// peers if there are more peers available.
|
||||
func (ps *peerSet) medianRTT() time.Duration {
|
||||
// Gather all the currnetly measured round trip times
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
rtts := make([]float64, 0, len(ps.peers))
|
||||
for _, p := range ps.peers {
|
||||
p.lock.RLock()
|
||||
rtts = append(rtts, float64(p.rtt))
|
||||
p.lock.RUnlock()
|
||||
}
|
||||
sort.Float64s(rtts)
|
||||
|
||||
median := rttMaxEstimate
|
||||
if qosTuningPeers <= len(rtts) {
|
||||
median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers
|
||||
} else if len(rtts) > 0 {
|
||||
median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos)
|
||||
}
|
||||
// Restrict the RTT into some QoS defaults, irrelevant of true RTT
|
||||
if median < rttMinEstimate {
|
||||
median = rttMinEstimate
|
||||
}
|
||||
if median > rttMaxEstimate {
|
||||
median = rttMaxEstimate
|
||||
}
|
||||
return median
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ import (
|
||||
|
||||
var (
|
||||
blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download
|
||||
maxInFlightStates = 4096 // Maximum number of state downloads to allow concurrently
|
||||
maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently
|
||||
)
|
||||
|
||||
var (
|
||||
@ -52,6 +52,7 @@ var (
|
||||
// fetchRequest is a currently running data retrieval operation.
|
||||
type fetchRequest struct {
|
||||
Peer *peer // Peer to which the request was sent
|
||||
From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
|
||||
Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority)
|
||||
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
|
||||
Time time.Time // Time when the request was made
|
||||
@ -79,6 +80,18 @@ type queue struct {
|
||||
|
||||
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
|
||||
|
||||
// Headers are "special", they download in batches, supported by a skeleton chain
|
||||
headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers
|
||||
headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for
|
||||
headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable
|
||||
headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
|
||||
headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches
|
||||
headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
|
||||
headerProced int // [eth/62] Number of headers already processed from the results
|
||||
headerOffset uint64 // [eth/62] Number of the first header in the result cache
|
||||
headerContCh chan bool // [eth/62] Channel to notify when header download finishes
|
||||
|
||||
// All data retrievals below are based on an already assembles header chain
|
||||
blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers
|
||||
blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for
|
||||
blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations
|
||||
@ -113,6 +126,8 @@ func newQueue(stateDb ethdb.Database) *queue {
|
||||
return &queue{
|
||||
hashPool: make(map[common.Hash]int),
|
||||
hashQueue: prque.New(),
|
||||
headerPendPool: make(map[string]*fetchRequest),
|
||||
headerContCh: make(chan bool),
|
||||
blockTaskPool: make(map[common.Hash]*types.Header),
|
||||
blockTaskQueue: prque.New(),
|
||||
blockPendPool: make(map[string]*fetchRequest),
|
||||
@ -149,6 +164,8 @@ func (q *queue) Reset() {
|
||||
|
||||
q.headerHead = common.Hash{}
|
||||
|
||||
q.headerPendPool = make(map[string]*fetchRequest)
|
||||
|
||||
q.blockTaskPool = make(map[common.Hash]*types.Header)
|
||||
q.blockTaskQueue.Reset()
|
||||
q.blockPendPool = make(map[string]*fetchRequest)
|
||||
@ -178,6 +195,14 @@ func (q *queue) Close() {
|
||||
q.active.Broadcast()
|
||||
}
|
||||
|
||||
// PendingHeaders retrieves the number of header requests pending for retrieval.
|
||||
func (q *queue) PendingHeaders() int {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.headerTaskQueue.Size()
|
||||
}
|
||||
|
||||
// PendingBlocks retrieves the number of block (body) requests pending for retrieval.
|
||||
func (q *queue) PendingBlocks() int {
|
||||
q.lock.Lock()
|
||||
@ -205,6 +230,15 @@ func (q *queue) PendingNodeData() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// InFlightHeaders retrieves whether there are header fetch requests currently
|
||||
// in flight.
|
||||
func (q *queue) InFlightHeaders() bool {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return len(q.headerPendPool) > 0
|
||||
}
|
||||
|
||||
// InFlightBlocks retrieves whether there are block fetch requests currently in
|
||||
// flight.
|
||||
func (q *queue) InFlightBlocks() bool {
|
||||
@ -317,6 +351,45 @@ func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash {
|
||||
return inserts
|
||||
}
|
||||
|
||||
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
|
||||
// up an already retrieved header skeleton.
|
||||
func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
|
||||
if q.headerResults != nil {
|
||||
panic("skeleton assembly already in progress")
|
||||
}
|
||||
// Shedule all the header retrieval tasks for the skeleton assembly
|
||||
q.headerTaskPool = make(map[uint64]*types.Header)
|
||||
q.headerTaskQueue = prque.New()
|
||||
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
||||
q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
|
||||
q.headerProced = 0
|
||||
q.headerOffset = from
|
||||
q.headerContCh = make(chan bool, 1)
|
||||
|
||||
for i, header := range skeleton {
|
||||
index := from + uint64(i*MaxHeaderFetch)
|
||||
|
||||
q.headerTaskPool[index] = header
|
||||
q.headerTaskQueue.Push(index, -float32(index))
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveHeaders retrieves the header chain assemble based on the scheduled
|
||||
// skeleton.
|
||||
func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
headers, proced := q.headerResults, q.headerProced
|
||||
q.headerResults, q.headerProced = nil, 0
|
||||
|
||||
return headers, proced
|
||||
}
|
||||
|
||||
// Schedule adds a set of headers for the download queue for scheduling, returning
|
||||
// the new headers encountered.
|
||||
func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
||||
@ -437,6 +510,46 @@ func (q *queue) countProcessableItems() int {
|
||||
return len(q.resultCache)
|
||||
}
|
||||
|
||||
// ReserveHeaders reserves a set of headers for the given peer, skipping any
|
||||
// previously failed batches.
|
||||
func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Short circuit if the peer's already downloading something (sanity check to
|
||||
// not corrupt state)
|
||||
if _, ok := q.headerPendPool[p.id]; ok {
|
||||
return nil
|
||||
}
|
||||
// Retrieve a batch of hashes, skipping previously failed ones
|
||||
send, skip := uint64(0), []uint64{}
|
||||
for send == 0 && !q.headerTaskQueue.Empty() {
|
||||
from, _ := q.headerTaskQueue.Pop()
|
||||
if q.headerPeerMiss[p.id] != nil {
|
||||
if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
|
||||
skip = append(skip, from.(uint64))
|
||||
continue
|
||||
}
|
||||
}
|
||||
send = from.(uint64)
|
||||
}
|
||||
// Merge all the skipped batches back
|
||||
for _, from := range skip {
|
||||
q.headerTaskQueue.Push(from, -float32(from))
|
||||
}
|
||||
// Assemble and return the block download request
|
||||
if send == 0 {
|
||||
return nil
|
||||
}
|
||||
request := &fetchRequest{
|
||||
Peer: p,
|
||||
From: send,
|
||||
Time: time.Now(),
|
||||
}
|
||||
q.headerPendPool[p.id] = request
|
||||
return request
|
||||
}
|
||||
|
||||
// ReserveBlocks reserves a set of block hashes for the given peer, skipping any
|
||||
// previously failed download.
|
||||
func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest {
|
||||
@ -635,6 +748,11 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ
|
||||
return request, progress, nil
|
||||
}
|
||||
|
||||
// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
|
||||
func (q *queue) CancelHeaders(request *fetchRequest) {
|
||||
q.cancel(request, q.headerTaskQueue, q.headerPendPool)
|
||||
}
|
||||
|
||||
// CancelBlocks aborts a fetch request, returning all pending hashes to the queue.
|
||||
func (q *queue) CancelBlocks(request *fetchRequest) {
|
||||
q.cancel(request, q.hashQueue, q.blockPendPool)
|
||||
@ -663,6 +781,9 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
if request.From > 0 {
|
||||
taskQueue.Push(request.From, -float32(request.From))
|
||||
}
|
||||
for hash, index := range request.Hashes {
|
||||
taskQueue.Push(hash, float32(index))
|
||||
}
|
||||
@ -702,6 +823,15 @@ func (q *queue) Revoke(peerId string) {
|
||||
}
|
||||
}
|
||||
|
||||
// ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
|
||||
// canceling them and returning the responsible peers for penalisation.
|
||||
func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
|
||||
}
|
||||
|
||||
// ExpireBlocks checks for in flight requests that exceeded a timeout allowance,
|
||||
// canceling them and returning the responsible peers for penalisation.
|
||||
func (q *queue) ExpireBlocks(timeout time.Duration) map[string]int {
|
||||
@ -753,6 +883,9 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
|
||||
timeoutMeter.Mark(1)
|
||||
|
||||
// Return any non satisfied requests to the pool
|
||||
if request.From > 0 {
|
||||
taskQueue.Push(request.From, -float32(request.From))
|
||||
}
|
||||
for hash, index := range request.Hashes {
|
||||
taskQueue.Push(hash, float32(index))
|
||||
}
|
||||
@ -842,6 +975,94 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// DeliverHeaders injects a header retrieval response into the header results
|
||||
// cache. This method either accepts all headers it received, or none of them
|
||||
// if they do not map correctly to the skeleton.
|
||||
//
|
||||
// If the headers are accepted, the method makes an attempt to deliver the set
|
||||
// of ready headers to the processor to keep the pipeline full. However it will
|
||||
// not block to prevent stalling other pending deliveries.
|
||||
func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Short circuit if the data was never requested
|
||||
request := q.headerPendPool[id]
|
||||
if request == nil {
|
||||
return 0, errNoFetchesPending
|
||||
}
|
||||
headerReqTimer.UpdateSince(request.Time)
|
||||
delete(q.headerPendPool, id)
|
||||
|
||||
// Ensure headers can be mapped onto the skeleton chain
|
||||
target := q.headerTaskPool[request.From].Hash()
|
||||
|
||||
accepted := len(headers) == MaxHeaderFetch
|
||||
if accepted {
|
||||
if headers[0].Number.Uint64() != request.From {
|
||||
glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From)
|
||||
accepted = false
|
||||
} else if headers[len(headers)-1].Hash() != target {
|
||||
glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4])
|
||||
accepted = false
|
||||
}
|
||||
}
|
||||
if accepted {
|
||||
for i, header := range headers[1:] {
|
||||
hash := header.Hash()
|
||||
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
|
||||
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ordering, expected %d", id, header.Number, hash[:4], want)
|
||||
accepted = false
|
||||
break
|
||||
}
|
||||
if headers[i].Hash() != header.ParentHash {
|
||||
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ancestry", id, header.Number, hash[:4])
|
||||
accepted = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the batch of headers wasn't accepted, mark as unavailable
|
||||
if !accepted {
|
||||
glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From)
|
||||
|
||||
miss := q.headerPeerMiss[id]
|
||||
if miss == nil {
|
||||
q.headerPeerMiss[id] = make(map[uint64]struct{})
|
||||
miss = q.headerPeerMiss[id]
|
||||
}
|
||||
miss[request.From] = struct{}{}
|
||||
|
||||
q.headerTaskQueue.Push(request.From, -float32(request.From))
|
||||
return 0, errors.New("delivery not accepted")
|
||||
}
|
||||
// Clean up a successful fetch and try to deliver any sub-results
|
||||
copy(q.headerResults[request.From-q.headerOffset:], headers)
|
||||
delete(q.headerTaskPool, request.From)
|
||||
|
||||
ready := 0
|
||||
for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
|
||||
ready += MaxHeaderFetch
|
||||
}
|
||||
if ready > 0 {
|
||||
// Headers are ready for delivery, gather them and push forward (non blocking)
|
||||
process := make([]*types.Header, ready)
|
||||
copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
|
||||
|
||||
select {
|
||||
case headerProcCh <- process:
|
||||
glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number)
|
||||
q.headerProced += len(process)
|
||||
default:
|
||||
}
|
||||
}
|
||||
// Check for termination and return
|
||||
if len(q.headerTaskPool) == 0 {
|
||||
q.headerContCh <- false
|
||||
}
|
||||
return len(headers), nil
|
||||
}
|
||||
|
||||
// DeliverBodies injects a block body retrieval response into the results queue.
|
||||
// The method returns the number of blocks bodies accepted from the delivery and
|
||||
// also wakes any threads waiting for data delivery.
|
||||
@ -1041,13 +1262,19 @@ func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(error,
|
||||
|
||||
// Prepare configures the result cache to allow accepting and caching inbound
|
||||
// fetch results.
|
||||
func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64) {
|
||||
func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Prepare the queue for sync results
|
||||
if q.resultOffset < offset {
|
||||
q.resultOffset = offset
|
||||
}
|
||||
q.fastSyncPivot = pivot
|
||||
q.mode = mode
|
||||
|
||||
// If long running fast sync, also start up a head stateretrieval immediately
|
||||
if mode == FastSync && pivot > 0 {
|
||||
q.stateScheduler = state.NewStateSync(head.Root, q.stateDatabase)
|
||||
}
|
||||
}
|
||||
|
@ -233,6 +233,7 @@ func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []commo
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Logs creates a subscription that fires for all new log that match the given filter criteria.
|
||||
func (s *PublicFilterAPI) Logs(ctx context.Context, args NewFilterArgs) (rpc.Subscription, error) {
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
@ -291,12 +292,13 @@ type NewFilterArgs struct {
|
||||
Topics [][]common.Hash
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *args fields with given data.
|
||||
func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
type input struct {
|
||||
From *rpc.BlockNumber `json:"fromBlock"`
|
||||
ToBlock *rpc.BlockNumber `json:"toBlock"`
|
||||
Addresses interface{} `json:"address"`
|
||||
Topics interface{} `json:"topics"`
|
||||
Topics []interface{} `json:"topics"`
|
||||
}
|
||||
|
||||
var raw input
|
||||
@ -321,7 +323,6 @@ func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
if raw.Addresses != nil {
|
||||
// raw.Address can contain a single address or an array of addresses
|
||||
var addresses []common.Address
|
||||
|
||||
if strAddrs, ok := raw.Addresses.([]interface{}); ok {
|
||||
for i, addr := range strAddrs {
|
||||
if strAddr, ok := addr.(string); ok {
|
||||
@ -352,56 +353,53 @@ func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
args.Addresses = addresses
|
||||
}
|
||||
|
||||
// helper function which parses a string to a topic hash
|
||||
topicConverter := func(raw string) (common.Hash, error) {
|
||||
if len(raw) == 0 {
|
||||
return common.Hash{}, nil
|
||||
}
|
||||
|
||||
if len(raw) >= 2 && raw[0] == '0' && (raw[1] == 'x' || raw[1] == 'X') {
|
||||
raw = raw[2:]
|
||||
}
|
||||
|
||||
if len(raw) != 2 * common.HashLength {
|
||||
return common.Hash{}, errors.New("invalid topic(s)")
|
||||
}
|
||||
if decAddr, err := hex.DecodeString(raw); err == nil {
|
||||
return common.BytesToHash(decAddr), nil
|
||||
}
|
||||
|
||||
return common.Hash{}, errors.New("invalid topic given")
|
||||
return common.Hash{}, errors.New("invalid topic(s)")
|
||||
}
|
||||
|
||||
// topics is an array consisting of strings or arrays of strings
|
||||
if raw.Topics != nil {
|
||||
topics, ok := raw.Topics.([]interface{})
|
||||
if ok {
|
||||
parsedTopics := make([][]common.Hash, len(topics))
|
||||
for i, topic := range topics {
|
||||
if topic == nil {
|
||||
parsedTopics[i] = []common.Hash{common.StringToHash("")}
|
||||
} else if strTopic, ok := topic.(string); ok {
|
||||
if t, err := topicConverter(strTopic); err != nil {
|
||||
return fmt.Errorf("invalid topic on index %d", i)
|
||||
} else {
|
||||
parsedTopics[i] = []common.Hash{t}
|
||||
}
|
||||
} else if arrTopic, ok := topic.([]interface{}); ok {
|
||||
parsedTopics[i] = make([]common.Hash, len(arrTopic))
|
||||
for j := 0; j < len(parsedTopics[i]); i++ {
|
||||
if arrTopic[j] == nil {
|
||||
parsedTopics[i][j] = common.StringToHash("")
|
||||
} else if str, ok := arrTopic[j].(string); ok {
|
||||
if t, err := topicConverter(str); err != nil {
|
||||
return fmt.Errorf("invalid topic on index %d", i)
|
||||
} else {
|
||||
parsedTopics[i] = []common.Hash{t}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("topic[%d][%d] not a string", i, j)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("topic[%d] invalid", i)
|
||||
// topics is an array consisting of strings and/or arrays of strings.
|
||||
// JSON null values are converted to common.Hash{} and ignored by the filter manager.
|
||||
if len(raw.Topics) > 0 {
|
||||
args.Topics = make([][]common.Hash, len(raw.Topics))
|
||||
for i, t := range raw.Topics {
|
||||
if t == nil { // ignore topic when matching logs
|
||||
args.Topics[i] = []common.Hash{common.Hash{}}
|
||||
} else if topic, ok := t.(string); ok { // match specific topic
|
||||
top, err := topicConverter(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args.Topics[i] = []common.Hash{top}
|
||||
} else if topics, ok := t.([]interface{}); ok { // or case e.g. [null, "topic0", "topic1"]
|
||||
for _, rawTopic := range topics {
|
||||
if rawTopic == nil {
|
||||
args.Topics[i] = append(args.Topics[i], common.Hash{})
|
||||
} else if topic, ok := rawTopic.(string); ok {
|
||||
parsed, err := topicConverter(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args.Topics[i] = append(args.Topics[i], parsed)
|
||||
} else {
|
||||
return fmt.Errorf("invalid topic(s)")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("invalid topic(s)")
|
||||
}
|
||||
args.Topics = parsedTopics
|
||||
}
|
||||
}
|
||||
|
||||
|
198
eth/filters/api_test.go
Normal file
198
eth/filters/api_test.go
Normal file
@ -0,0 +1,198 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filters_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
func TestUnmarshalJSONNewFilterArgs(t *testing.T) {
|
||||
var (
|
||||
fromBlock rpc.BlockNumber = 0x123435
|
||||
toBlock rpc.BlockNumber = 0xabcdef
|
||||
address0 = common.StringToAddress("70c87d191324e6712a591f304b4eedef6ad9bb9d")
|
||||
address1 = common.StringToAddress("9b2055d370f73ec7d8a03e965129118dc8f5bf83")
|
||||
topic0 = common.HexToHash("3ac225168df54212a25c1c01fd35bebfea408fdac2e31ddd6f80a4bbf9a5f1ca")
|
||||
topic1 = common.HexToHash("9084a792d2f8b16a62b882fd56f7860c07bf5fa91dd8a2ae7e809e5180fef0b3")
|
||||
topic2 = common.HexToHash("6ccae1c4af4152f460ff510e573399795dfab5dcf1fa60d1f33ac8fdc1e480ce")
|
||||
nullTopic = common.Hash{}
|
||||
)
|
||||
|
||||
// default values
|
||||
var test0 filters.NewFilterArgs
|
||||
if err := json.Unmarshal([]byte("{}"), &test0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if test0.FromBlock != rpc.LatestBlockNumber {
|
||||
t.Fatalf("expected %d, got %d", rpc.LatestBlockNumber, test0.FromBlock)
|
||||
}
|
||||
if test0.ToBlock != rpc.LatestBlockNumber {
|
||||
t.Fatalf("expected %d, got %d", rpc.LatestBlockNumber, test0.ToBlock)
|
||||
}
|
||||
if len(test0.Addresses) != 0 {
|
||||
t.Fatalf("expected 0 addresses, got %d", len(test0.Addresses))
|
||||
}
|
||||
if len(test0.Topics) != 0 {
|
||||
t.Fatalf("expected 0 topics, got %d topics", len(test0.Topics))
|
||||
}
|
||||
|
||||
// from, to block number
|
||||
var test1 filters.NewFilterArgs
|
||||
vector := fmt.Sprintf(`{"fromBlock":"0x%x","toBlock":"0x%x"}`, fromBlock, toBlock)
|
||||
if err := json.Unmarshal([]byte(vector), &test1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if test1.FromBlock != fromBlock {
|
||||
t.Fatalf("expected FromBlock %d, got %d", fromBlock, test1.FromBlock)
|
||||
}
|
||||
if test1.ToBlock != toBlock {
|
||||
t.Fatalf("expected ToBlock %d, got %d", toBlock, test1.ToBlock)
|
||||
}
|
||||
|
||||
// single address
|
||||
var test2 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"address": "%s"}`, address0.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test2.Addresses) != 1 {
|
||||
t.Fatalf("expected 1 address, got %d address(es)", len(test2.Addresses))
|
||||
}
|
||||
if test2.Addresses[0] != address0 {
|
||||
t.Fatalf("expected address %x, got %x", address0, test2.Addresses[0])
|
||||
}
|
||||
|
||||
// multiple address
|
||||
var test3 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"address": ["%s", "%s"]}`, address0.Hex(), address1.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test3.Addresses) != 2 {
|
||||
t.Fatalf("expected 2 addresses, got %d address(es)", len(test3.Addresses))
|
||||
}
|
||||
if test3.Addresses[0] != address0 {
|
||||
t.Fatalf("expected address %x, got %x", address0, test3.Addresses[0])
|
||||
}
|
||||
if test3.Addresses[1] != address1 {
|
||||
t.Fatalf("expected address %x, got %x", address1, test3.Addresses[1])
|
||||
}
|
||||
|
||||
// single topic
|
||||
var test4 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s"]}`, topic0.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test4.Topics) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test4.Topics))
|
||||
}
|
||||
if len(test4.Topics[0]) != 1 {
|
||||
t.Fatalf("expected len(topics[0]) to be 1, got %d", len(test4.Topics[0]))
|
||||
}
|
||||
if test4.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test4.Topics[0][0], topic0)
|
||||
}
|
||||
|
||||
// test multiple "AND" topics
|
||||
var test5 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s", "%s"]}`, topic0.Hex(), topic1.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test5); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test5.Topics) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d", len(test5.Topics))
|
||||
}
|
||||
if len(test5.Topics[0]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test5.Topics[0]))
|
||||
}
|
||||
if test5.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test5.Topics[0][0], topic0)
|
||||
}
|
||||
if len(test5.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test5.Topics[1]))
|
||||
}
|
||||
if test5.Topics[1][0] != topic1 {
|
||||
t.Fatalf("got %x, expected %x", test5.Topics[1][0], topic1)
|
||||
}
|
||||
|
||||
// test optional topic
|
||||
var test6 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s", null, "%s"]}`, topic0.Hex(), topic2.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test6); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test6.Topics) != 3 {
|
||||
t.Fatalf("expected 3 topics, got %d", len(test6.Topics))
|
||||
}
|
||||
if len(test6.Topics[0]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[0]))
|
||||
}
|
||||
if test6.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test6.Topics[0][0], topic0)
|
||||
}
|
||||
if len(test6.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[1]))
|
||||
}
|
||||
if test6.Topics[1][0] != nullTopic {
|
||||
t.Fatalf("got %x, expected empty hash", test6.Topics[1][0])
|
||||
}
|
||||
if len(test6.Topics[2]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[2]))
|
||||
}
|
||||
if test6.Topics[2][0] != topic2 {
|
||||
t.Fatalf("got %x, expected %x", test6.Topics[2][0], topic2)
|
||||
}
|
||||
|
||||
// test OR topics
|
||||
var test7 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": [["%s", "%s"], null, ["%s", null]]}`, topic0.Hex(), topic1.Hex(), topic2.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test7); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test7.Topics) != 3 {
|
||||
t.Fatalf("expected 3 topics, got %d topics", len(test7.Topics))
|
||||
}
|
||||
if len(test7.Topics[0]) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d topics", len(test7.Topics[0]))
|
||||
}
|
||||
if test7.Topics[0][0] != topic0 || test7.Topics[0][1] != topic1 {
|
||||
t.Fatalf("invalid topics expected [%x,%x], got [%x,%x]",
|
||||
topic0, topic1, test7.Topics[0][0], test7.Topics[0][1],
|
||||
)
|
||||
}
|
||||
if len(test7.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d topics", len(test7.Topics[1]))
|
||||
}
|
||||
if test7.Topics[1][0] != nullTopic {
|
||||
t.Fatalf("expected empty hash, got %x", test7.Topics[1][0])
|
||||
}
|
||||
if len(test7.Topics[2]) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d topics", len(test7.Topics[2]))
|
||||
}
|
||||
if test7.Topics[2][0] != topic2 || test7.Topics[2][1] != nullTopic {
|
||||
t.Fatalf("invalid topics expected [%x,%x], got [%x,%x]",
|
||||
topic2, nullTopic, test7.Topics[2][0], test7.Topics[2][1],
|
||||
)
|
||||
}
|
||||
}
|
@ -164,7 +164,7 @@ func (fs *FilterSystem) filterLoop() {
|
||||
fs.filterMu.RLock()
|
||||
for _, filter := range fs.logFilters {
|
||||
if filter.LogCallback != nil && !filter.created.After(event.Time) {
|
||||
for _, removedLog := range ev.Logs {
|
||||
for _, removedLog := range filter.FilterLogs(ev.Logs) {
|
||||
filter.LogCallback(removedLog, true)
|
||||
}
|
||||
}
|
||||
|
102
eth/handler.go
102
eth/handler.go
@ -22,6 +22,7 @@ import (
|
||||
"math"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -58,7 +59,9 @@ type blockFetcherFn func([]common.Hash) error
|
||||
type ProtocolManager struct {
|
||||
networkId int
|
||||
|
||||
fastSync bool
|
||||
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
|
||||
synced uint32 // Flag whether we're considered synchronised (enables transaction processing)
|
||||
|
||||
txpool txPool
|
||||
blockchain *core.BlockChain
|
||||
chaindb ethdb.Database
|
||||
@ -74,36 +77,41 @@ type ProtocolManager struct {
|
||||
minedBlockSub event.Subscription
|
||||
|
||||
// channels for fetcher, syncer, txsyncLoop
|
||||
newPeerCh chan *peer
|
||||
txsyncCh chan *txsync
|
||||
quitSync chan struct{}
|
||||
newPeerCh chan *peer
|
||||
txsyncCh chan *txsync
|
||||
quitSync chan struct{}
|
||||
noMorePeers chan struct{}
|
||||
|
||||
// wait group is used for graceful shutdowns during downloading
|
||||
// and processing
|
||||
wg sync.WaitGroup
|
||||
quit bool
|
||||
wg sync.WaitGroup
|
||||
|
||||
badBlockReportingEnabled bool
|
||||
}
|
||||
|
||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
// with the ethereum network.
|
||||
func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
networkId: networkId,
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
blockchain: blockchain,
|
||||
chaindb: chaindb,
|
||||
peers: newPeerSet(),
|
||||
newPeerCh: make(chan *peer),
|
||||
noMorePeers: make(chan struct{}),
|
||||
txsyncCh: make(chan *txsync),
|
||||
quitSync: make(chan struct{}),
|
||||
}
|
||||
// Figure out whether to allow fast sync or not
|
||||
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
|
||||
fastSync = false
|
||||
}
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
networkId: networkId,
|
||||
fastSync: fastSync,
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
blockchain: blockchain,
|
||||
chaindb: chaindb,
|
||||
peers: newPeerSet(),
|
||||
newPeerCh: make(chan *peer, 1),
|
||||
txsyncCh: make(chan *txsync),
|
||||
quitSync: make(chan struct{}),
|
||||
if fastSync {
|
||||
manager.fastSync = uint32(1)
|
||||
}
|
||||
// Initiate a sub-protocol for every implemented version we can handle
|
||||
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
|
||||
@ -120,8 +128,14 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
|
||||
Length: ProtocolLengths[i],
|
||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
peer := manager.newPeer(int(version), p, rw)
|
||||
manager.newPeerCh <- peer
|
||||
return manager.handle(peer)
|
||||
select {
|
||||
case manager.newPeerCh <- peer:
|
||||
manager.wg.Add(1)
|
||||
defer manager.wg.Done()
|
||||
return manager.handle(peer)
|
||||
case <-manager.quitSync:
|
||||
return p2p.DiscQuitting
|
||||
}
|
||||
},
|
||||
NodeInfo: func() interface{} {
|
||||
return manager.NodeInfo()
|
||||
@ -140,7 +154,7 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
|
||||
// Construct the different synchronisation mechanisms
|
||||
manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeader,
|
||||
blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead,
|
||||
blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
|
||||
blockchain.GetTd, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
|
||||
manager.removePeer)
|
||||
|
||||
validator := func(block *types.Block, parent *types.Block) error {
|
||||
@ -149,11 +163,28 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
|
||||
heighter := func() uint64 {
|
||||
return blockchain.CurrentBlock().NumberU64()
|
||||
}
|
||||
manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, blockchain.InsertChain, manager.removePeer)
|
||||
inserter := func(blocks types.Blocks) (int, error) {
|
||||
atomic.StoreUint32(&manager.synced, 1) // Mark initial sync done on any fetcher import
|
||||
return manager.insertChain(blocks)
|
||||
}
|
||||
manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||
|
||||
if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 {
|
||||
glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled")
|
||||
manager.badBlockReportingEnabled = true
|
||||
}
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) insertChain(blocks types.Blocks) (i int, err error) {
|
||||
i, err = pm.blockchain.InsertChain(blocks)
|
||||
if pm.badBlockReportingEnabled && core.IsValidationErr(err) && i < len(blocks) {
|
||||
go sendBadBlockReport(blocks[i], err)
|
||||
}
|
||||
return i, err
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) removePeer(id string) {
|
||||
// Short circuit if the peer was already removed
|
||||
peer := pm.peers.Peer(id)
|
||||
@ -187,16 +218,25 @@ func (pm *ProtocolManager) Start() {
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) Stop() {
|
||||
// Showing a log message. During download / process this could actually
|
||||
// take between 5 to 10 seconds and therefor feedback is required.
|
||||
glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")
|
||||
|
||||
pm.quit = true
|
||||
pm.txSub.Unsubscribe() // quits txBroadcastLoop
|
||||
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
|
||||
close(pm.quitSync) // quits syncer, fetcher, txsyncLoop
|
||||
|
||||
// Wait for any process action
|
||||
// Quit the sync loop.
|
||||
// After this send has completed, no new peers will be accepted.
|
||||
pm.noMorePeers <- struct{}{}
|
||||
|
||||
// Quit fetcher, txsyncLoop.
|
||||
close(pm.quitSync)
|
||||
|
||||
// Disconnect existing sessions.
|
||||
// This also closes the gate for any new registrations on the peer set.
|
||||
// sessions which are already established but not added to pm.peers yet
|
||||
// will exit when they try to register.
|
||||
pm.peers.Close()
|
||||
|
||||
// Wait for all peer handler goroutines and the loops to come down.
|
||||
pm.wg.Wait()
|
||||
|
||||
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
|
||||
@ -359,6 +399,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
// Update the receive timestamp of each block
|
||||
for _, block := range blocks {
|
||||
block.ReceivedAt = msg.ReceivedAt
|
||||
block.ReceivedFrom = p
|
||||
}
|
||||
// Filter out any explicitly requested blocks, deliver the rest to the downloader
|
||||
if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 {
|
||||
@ -645,6 +686,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return errResp(ErrDecode, "block validation %v: %v", msg, err)
|
||||
}
|
||||
request.Block.ReceivedAt = msg.ReceivedAt
|
||||
request.Block.ReceivedFrom = p
|
||||
|
||||
// Mark the peer as owning the block and schedule it for import
|
||||
p.MarkBlock(request.Block.Hash())
|
||||
@ -662,7 +704,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case msg.Code == TxMsg:
|
||||
// Transactions arrived, parse all of them and deliver to the pool
|
||||
// Transactions arrived, make sure we have a valid and fresh chain to handle them
|
||||
if atomic.LoadUint32(&pm.synced) == 0 {
|
||||
break
|
||||
}
|
||||
// Transactions can be processed, parse all of them and deliver to the pool
|
||||
var txs []*types.Transaction
|
||||
if err := msg.Decode(&txs); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
|
@ -140,14 +140,14 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
|
||||
// Start the peer on a new thread
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
pm.newPeerCh <- peer
|
||||
errc <- pm.handle(peer)
|
||||
select {
|
||||
case pm.newPeerCh <- peer:
|
||||
errc <- pm.handle(peer)
|
||||
case <-pm.quitSync:
|
||||
errc <- p2p.DiscQuitting
|
||||
}
|
||||
}()
|
||||
tp := &testPeer{
|
||||
app: app,
|
||||
net: net,
|
||||
peer: peer,
|
||||
}
|
||||
tp := &testPeer{app: app, net: net, peer: peer}
|
||||
// Execute any implicitly requested handshakes and return
|
||||
if shake {
|
||||
td, head, genesis := pm.blockchain.Status()
|
||||
|
21
eth/peer.go
21
eth/peer.go
@ -34,6 +34,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
errClosed = errors.New("peer set is closed")
|
||||
errAlreadyRegistered = errors.New("peer is already registered")
|
||||
errNotRegistered = errors.New("peer is not registered")
|
||||
)
|
||||
@ -351,8 +352,9 @@ func (p *peer) String() string {
|
||||
// peerSet represents the collection of active peers currently participating in
|
||||
// the Ethereum sub-protocol.
|
||||
type peerSet struct {
|
||||
peers map[string]*peer
|
||||
lock sync.RWMutex
|
||||
peers map[string]*peer
|
||||
lock sync.RWMutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// newPeerSet creates a new peer set to track the active participants.
|
||||
@ -368,6 +370,9 @@ func (ps *peerSet) Register(p *peer) error {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
if ps.closed {
|
||||
return errClosed
|
||||
}
|
||||
if _, ok := ps.peers[p.id]; ok {
|
||||
return errAlreadyRegistered
|
||||
}
|
||||
@ -450,3 +455,15 @@ func (ps *peerSet) BestPeer() *peer {
|
||||
}
|
||||
return bestPeer
|
||||
}
|
||||
|
||||
// Close disconnects all peers.
|
||||
// No new peers can be registered after Close has returned.
|
||||
func (ps *peerSet) Close() {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
for _, p := range ps.peers {
|
||||
p.Disconnect(p2p.DiscQuitting)
|
||||
}
|
||||
ps.closed = true
|
||||
}
|
||||
|
@ -97,6 +97,7 @@ func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
|
||||
func testRecvTransactions(t *testing.T, protocol int) {
|
||||
txAdded := make(chan []*types.Transaction)
|
||||
pm := newTestProtocolManagerMust(t, false, 0, nil, txAdded)
|
||||
pm.synced = 1 // mark synced to accept transactions
|
||||
p, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer pm.Stop()
|
||||
defer p.close()
|
||||
|
11
eth/sync.go
11
eth/sync.go
@ -18,6 +18,7 @@ package eth
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -148,7 +149,7 @@ func (pm *ProtocolManager) syncer() {
|
||||
// Force a sync even if not enough peers are present
|
||||
go pm.synchronise(pm.peers.BestPeer())
|
||||
|
||||
case <-pm.quitSync:
|
||||
case <-pm.noMorePeers:
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -167,18 +168,20 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||
}
|
||||
// Otherwise try to sync with the downloader
|
||||
mode := downloader.FullSync
|
||||
if pm.fastSync {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
mode = downloader.FastSync
|
||||
}
|
||||
if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil {
|
||||
return
|
||||
}
|
||||
atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done
|
||||
|
||||
// If fast sync was enabled, and we synced up, disable it
|
||||
if pm.fastSync {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
// Disable fast sync if we indeed have something in our chain
|
||||
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
|
||||
pm.fastSync = false
|
||||
atomic.StoreUint32(&pm.fastSync, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -29,12 +30,12 @@ import (
|
||||
func TestFastSyncDisabling(t *testing.T) {
|
||||
// Create a pristine protocol manager, check that fast sync is left enabled
|
||||
pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil)
|
||||
if !pmEmpty.fastSync {
|
||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
|
||||
t.Fatalf("fast sync disabled on pristine blockchain")
|
||||
}
|
||||
// Create a full protocol manager, check that fast sync gets disabled
|
||||
pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil)
|
||||
if pmFull.fastSync {
|
||||
if atomic.LoadUint32(&pmFull.fastSync) == 1 {
|
||||
t.Fatalf("fast sync not disabled on non-empty blockchain")
|
||||
}
|
||||
// Sync up the two peers
|
||||
@ -47,7 +48,7 @@ func TestFastSyncDisabling(t *testing.T) {
|
||||
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
|
||||
|
||||
// Check that fast sync was disabled
|
||||
if pmEmpty.fastSync {
|
||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 1 {
|
||||
t.Fatalf("fast sync not disabled after successful synchronisation")
|
||||
}
|
||||
}
|
||||
|
@ -66,6 +66,9 @@ func (mux *TypeMux) Subscribe(types ...interface{}) Subscription {
|
||||
mux.mutex.Lock()
|
||||
defer mux.mutex.Unlock()
|
||||
if mux.stopped {
|
||||
// set the status to closed so that calling Unsubscribe after this
|
||||
// call will short curuit
|
||||
sub.closed = true
|
||||
close(sub.postC)
|
||||
} else {
|
||||
if mux.subm == nil {
|
||||
|
@ -25,6 +25,14 @@ import (
|
||||
|
||||
type testEvent int
|
||||
|
||||
func TestSubCloseUnsub(t *testing.T) {
|
||||
// the point of this test is **not** to panic
|
||||
var mux TypeMux
|
||||
mux.Stop()
|
||||
sub := mux.Subscribe(int(0))
|
||||
sub.Unsubscribe()
|
||||
}
|
||||
|
||||
func TestSub(t *testing.T) {
|
||||
mux := new(TypeMux)
|
||||
defer mux.Stop()
|
||||
|
@ -51,7 +51,7 @@ type HandlerT struct {
|
||||
traceFile string
|
||||
}
|
||||
|
||||
// Verbosity sets the glog verbosity floor.
|
||||
// Verbosity sets the glog verbosity ceiling.
|
||||
// The verbosity of individual packages and source files
|
||||
// can be raised using Vmodule.
|
||||
func (*HandlerT) Verbosity(level int) {
|
||||
@ -131,14 +131,14 @@ func (h *HandlerT) StopCPUProfile() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Trace turns on tracing for nsec seconds and writes
|
||||
// GoTrace turns on tracing for nsec seconds and writes
|
||||
// trace data to file.
|
||||
func (h *HandlerT) Trace(file string, nsec uint) error {
|
||||
if err := h.StartTrace(file); err != nil {
|
||||
func (h *HandlerT) GoTrace(file string, nsec uint) error {
|
||||
if err := h.StartGoTrace(file); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(time.Duration(nsec) * time.Second)
|
||||
h.StopTrace()
|
||||
h.StopGoTrace()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ func Setup(ctx *cli.Context) error {
|
||||
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
|
||||
Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name))
|
||||
if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" {
|
||||
if err := Handler.StartTrace(traceFile); err != nil {
|
||||
if err := Handler.StartGoTrace(traceFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -114,5 +114,5 @@ func Setup(ctx *cli.Context) error {
|
||||
// respective file.
|
||||
func Exit() {
|
||||
Handler.StopCPUProfile()
|
||||
Handler.StopTrace()
|
||||
Handler.StopGoTrace()
|
||||
}
|
||||
|
@ -27,8 +27,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
)
|
||||
|
||||
// StartTrace turns on tracing, writing to the given file.
|
||||
func (h *HandlerT) StartTrace(file string) error {
|
||||
// StartGoTrace turns on tracing, writing to the given file.
|
||||
func (h *HandlerT) StartGoTrace(file string) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
if h.traceW != nil {
|
||||
@ -49,7 +49,7 @@ func (h *HandlerT) StartTrace(file string) error {
|
||||
}
|
||||
|
||||
// StopTrace stops an ongoing trace.
|
||||
func (h *HandlerT) StopTrace() error {
|
||||
func (h *HandlerT) StopGoTrace() error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
trace.Stop()
|
||||
|
@ -22,10 +22,10 @@ package debug
|
||||
|
||||
import "errors"
|
||||
|
||||
func (*HandlerT) StartTrace(string) error {
|
||||
func (*HandlerT) StartGoTrace(string) error {
|
||||
return errors.New("tracing is not supported on Go < 1.5")
|
||||
}
|
||||
|
||||
func (*HandlerT) StopTrace() error {
|
||||
func (*HandlerT) StopGoTrace() error {
|
||||
return errors.New("tracing is not supported on Go < 1.5")
|
||||
}
|
||||
|
@ -17,12 +17,13 @@
|
||||
package jsre
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCompleteKeywords(t *testing.T) {
|
||||
re := New("")
|
||||
re := New("", os.Stdout)
|
||||
re.Run(`
|
||||
function theClass() {
|
||||
this.foo = 3;
|
@ -3911,7 +3911,12 @@ var outputSyncingFormatter = function(result) {
|
||||
result.startingBlock = utils.toDecimal(result.startingBlock);
|
||||
result.currentBlock = utils.toDecimal(result.currentBlock);
|
||||
result.highestBlock = utils.toDecimal(result.highestBlock);
|
||||
|
||||
if (result.knownStates !== undefined) {
|
||||
result.knownStates = utils.toDecimal(result.knownStates);
|
||||
}
|
||||
if (result.pulledStates !== undefined) {
|
||||
result.pulledStates = utils.toDecimal(result.pulledStates);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
@ -21,9 +21,9 @@ import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -40,9 +40,10 @@ It provides some helper functions to
|
||||
*/
|
||||
type JSRE struct {
|
||||
assetPath string
|
||||
output io.Writer
|
||||
evalQueue chan *evalReq
|
||||
stopEventLoop chan bool
|
||||
loopWg sync.WaitGroup
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
// jsTimer is a single timer instance with a callback function
|
||||
@ -60,13 +61,14 @@ type evalReq struct {
|
||||
}
|
||||
|
||||
// runtime must be stopped with Stop() after use and cannot be used after stopping
|
||||
func New(assetPath string) *JSRE {
|
||||
func New(assetPath string, output io.Writer) *JSRE {
|
||||
re := &JSRE{
|
||||
assetPath: assetPath,
|
||||
output: output,
|
||||
closed: make(chan struct{}),
|
||||
evalQueue: make(chan *evalReq),
|
||||
stopEventLoop: make(chan bool),
|
||||
}
|
||||
re.loopWg.Add(1)
|
||||
go re.runEventLoop()
|
||||
re.Set("loadScript", re.loadScript)
|
||||
re.Set("inspect", prettyPrintJS)
|
||||
@ -95,6 +97,8 @@ func randomSource() *rand.Rand {
|
||||
// functions should be used if and only if running a routine that was already
|
||||
// called from JS through an RPC call.
|
||||
func (self *JSRE) runEventLoop() {
|
||||
defer close(self.closed)
|
||||
|
||||
vm := otto.New()
|
||||
r := randomSource()
|
||||
vm.SetRandomSource(r.Float64)
|
||||
@ -210,8 +214,6 @@ loop:
|
||||
timer.timer.Stop()
|
||||
delete(registry, timer)
|
||||
}
|
||||
|
||||
self.loopWg.Done()
|
||||
}
|
||||
|
||||
// Do executes the given function on the JS event loop.
|
||||
@ -224,8 +226,11 @@ func (self *JSRE) Do(fn func(*otto.Otto)) {
|
||||
|
||||
// stops the event loop before exit, optionally waits for all timers to expire
|
||||
func (self *JSRE) Stop(waitForCallbacks bool) {
|
||||
self.stopEventLoop <- waitForCallbacks
|
||||
self.loopWg.Wait()
|
||||
select {
|
||||
case <-self.closed:
|
||||
case self.stopEventLoop <- waitForCallbacks:
|
||||
<-self.closed
|
||||
}
|
||||
}
|
||||
|
||||
// Exec(file) loads and runs the contents of a file
|
||||
@ -235,7 +240,14 @@ func (self *JSRE) Exec(file string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Do(func(vm *otto.Otto) { _, err = vm.Run(code) })
|
||||
var script *otto.Script
|
||||
self.Do(func(vm *otto.Otto) {
|
||||
script, err = vm.Compile(file, code)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = vm.Run(script)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -285,19 +297,21 @@ func (self *JSRE) loadScript(call otto.FunctionCall) otto.Value {
|
||||
return otto.TrueValue()
|
||||
}
|
||||
|
||||
// EvalAndPrettyPrint evaluates code and pretty prints the result to
|
||||
// standard output.
|
||||
func (self *JSRE) EvalAndPrettyPrint(code string) (err error) {
|
||||
// Evaluate executes code and pretty prints the result to the specified output
|
||||
// stream.
|
||||
func (self *JSRE) Evaluate(code string, w io.Writer) error {
|
||||
var fail error
|
||||
|
||||
self.Do(func(vm *otto.Otto) {
|
||||
var val otto.Value
|
||||
val, err = vm.Run(code)
|
||||
val, err := vm.Run(code)
|
||||
if err != nil {
|
||||
return
|
||||
prettyError(vm, err, w)
|
||||
} else {
|
||||
prettyPrint(vm, val, w)
|
||||
}
|
||||
prettyPrint(vm, val)
|
||||
fmt.Println()
|
||||
fmt.Fprintln(w)
|
||||
})
|
||||
return err
|
||||
return fail
|
||||
}
|
||||
|
||||
// Compile compiles and then runs a piece of JS code.
|
@ -51,7 +51,7 @@ func newWithTestJS(t *testing.T, testjs string) (*JSRE, string) {
|
||||
t.Fatal("cannot create test.js:", err)
|
||||
}
|
||||
}
|
||||
return New(dir), dir
|
||||
return New(dir, os.Stdout), dir
|
||||
}
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
@ -102,7 +102,7 @@ func TestNatto(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBind(t *testing.T) {
|
||||
jsre := New("")
|
||||
jsre := New("", os.Stdout)
|
||||
defer jsre.Stop(false)
|
||||
|
||||
jsre.Bind("no", &testNativeObjectBinding{})
|
@ -18,6 +18,7 @@ package jsre
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -32,10 +33,11 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
functionColor = color.New(color.FgMagenta)
|
||||
specialColor = color.New(color.Bold)
|
||||
numberColor = color.New(color.FgRed)
|
||||
stringColor = color.New(color.FgGreen)
|
||||
FunctionColor = color.New(color.FgMagenta).SprintfFunc()
|
||||
SpecialColor = color.New(color.Bold).SprintfFunc()
|
||||
NumberColor = color.New(color.FgRed).SprintfFunc()
|
||||
StringColor = color.New(color.FgGreen).SprintfFunc()
|
||||
ErrorColor = color.New(color.FgHiRed).SprintfFunc()
|
||||
)
|
||||
|
||||
// these fields are hidden when printing objects.
|
||||
@ -50,19 +52,39 @@ var boringKeys = map[string]bool{
|
||||
}
|
||||
|
||||
// prettyPrint writes value to standard output.
|
||||
func prettyPrint(vm *otto.Otto, value otto.Value) {
|
||||
ppctx{vm}.printValue(value, 0, false)
|
||||
func prettyPrint(vm *otto.Otto, value otto.Value, w io.Writer) {
|
||||
ppctx{vm: vm, w: w}.printValue(value, 0, false)
|
||||
}
|
||||
|
||||
func prettyPrintJS(call otto.FunctionCall) otto.Value {
|
||||
// prettyError writes err to standard output.
|
||||
func prettyError(vm *otto.Otto, err error, w io.Writer) {
|
||||
failure := err.Error()
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
failure = ottoErr.String()
|
||||
}
|
||||
fmt.Fprint(w, ErrorColor("%s", failure))
|
||||
}
|
||||
|
||||
// jsErrorString adds a backtrace to errors generated by otto.
|
||||
func jsErrorString(err error) string {
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
return ottoErr.String()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func prettyPrintJS(call otto.FunctionCall, w io.Writer) otto.Value {
|
||||
for _, v := range call.ArgumentList {
|
||||
prettyPrint(call.Otto, v)
|
||||
fmt.Println()
|
||||
prettyPrint(call.Otto, v, w)
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
return otto.UndefinedValue()
|
||||
}
|
||||
|
||||
type ppctx struct{ vm *otto.Otto }
|
||||
type ppctx struct {
|
||||
vm *otto.Otto
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (ctx ppctx) indent(level int) string {
|
||||
return strings.Repeat(indentString, level)
|
||||
@ -73,22 +95,22 @@ func (ctx ppctx) printValue(v otto.Value, level int, inArray bool) {
|
||||
case v.IsObject():
|
||||
ctx.printObject(v.Object(), level, inArray)
|
||||
case v.IsNull():
|
||||
specialColor.Print("null")
|
||||
fmt.Fprint(ctx.w, SpecialColor("null"))
|
||||
case v.IsUndefined():
|
||||
specialColor.Print("undefined")
|
||||
fmt.Fprint(ctx.w, SpecialColor("undefined"))
|
||||
case v.IsString():
|
||||
s, _ := v.ToString()
|
||||
stringColor.Printf("%q", s)
|
||||
fmt.Fprint(ctx.w, StringColor("%q", s))
|
||||
case v.IsBoolean():
|
||||
b, _ := v.ToBoolean()
|
||||
specialColor.Printf("%t", b)
|
||||
fmt.Fprint(ctx.w, SpecialColor("%t", b))
|
||||
case v.IsNaN():
|
||||
numberColor.Printf("NaN")
|
||||
fmt.Fprint(ctx.w, NumberColor("NaN"))
|
||||
case v.IsNumber():
|
||||
s, _ := v.ToString()
|
||||
numberColor.Printf("%s", s)
|
||||
fmt.Fprint(ctx.w, NumberColor("%s", s))
|
||||
default:
|
||||
fmt.Printf("<unprintable>")
|
||||
fmt.Fprint(ctx.w, "<unprintable>")
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,75 +120,75 @@ func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) {
|
||||
lv, _ := obj.Get("length")
|
||||
len, _ := lv.ToInteger()
|
||||
if len == 0 {
|
||||
fmt.Printf("[]")
|
||||
fmt.Fprintf(ctx.w, "[]")
|
||||
return
|
||||
}
|
||||
if level > maxPrettyPrintLevel {
|
||||
fmt.Print("[...]")
|
||||
fmt.Fprint(ctx.w, "[...]")
|
||||
return
|
||||
}
|
||||
fmt.Print("[")
|
||||
fmt.Fprint(ctx.w, "[")
|
||||
for i := int64(0); i < len; i++ {
|
||||
el, err := obj.Get(strconv.FormatInt(i, 10))
|
||||
if err == nil {
|
||||
ctx.printValue(el, level+1, true)
|
||||
}
|
||||
if i < len-1 {
|
||||
fmt.Printf(", ")
|
||||
fmt.Fprintf(ctx.w, ", ")
|
||||
}
|
||||
}
|
||||
fmt.Print("]")
|
||||
fmt.Fprint(ctx.w, "]")
|
||||
|
||||
case "Object":
|
||||
// Print values from bignumber.js as regular numbers.
|
||||
if ctx.isBigNumber(obj) {
|
||||
numberColor.Print(toString(obj))
|
||||
fmt.Fprint(ctx.w, NumberColor("%s", toString(obj)))
|
||||
return
|
||||
}
|
||||
// Otherwise, print all fields indented, but stop if we're too deep.
|
||||
keys := ctx.fields(obj)
|
||||
if len(keys) == 0 {
|
||||
fmt.Print("{}")
|
||||
fmt.Fprint(ctx.w, "{}")
|
||||
return
|
||||
}
|
||||
if level > maxPrettyPrintLevel {
|
||||
fmt.Print("{...}")
|
||||
fmt.Fprint(ctx.w, "{...}")
|
||||
return
|
||||
}
|
||||
fmt.Println("{")
|
||||
fmt.Fprintln(ctx.w, "{")
|
||||
for i, k := range keys {
|
||||
v, _ := obj.Get(k)
|
||||
fmt.Printf("%s%s: ", ctx.indent(level+1), k)
|
||||
fmt.Fprintf(ctx.w, "%s%s: ", ctx.indent(level+1), k)
|
||||
ctx.printValue(v, level+1, false)
|
||||
if i < len(keys)-1 {
|
||||
fmt.Printf(",")
|
||||
fmt.Fprintf(ctx.w, ",")
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Fprintln(ctx.w)
|
||||
}
|
||||
if inArray {
|
||||
level--
|
||||
}
|
||||
fmt.Printf("%s}", ctx.indent(level))
|
||||
fmt.Fprintf(ctx.w, "%s}", ctx.indent(level))
|
||||
|
||||
case "Function":
|
||||
// Use toString() to display the argument list if possible.
|
||||
if robj, err := obj.Call("toString"); err != nil {
|
||||
functionColor.Print("function()")
|
||||
fmt.Fprint(ctx.w, FunctionColor("function()"))
|
||||
} else {
|
||||
desc := strings.Trim(strings.Split(robj.String(), "{")[0], " \t\n")
|
||||
desc = strings.Replace(desc, " (", "(", 1)
|
||||
functionColor.Print(desc)
|
||||
fmt.Fprint(ctx.w, FunctionColor("%s", desc))
|
||||
}
|
||||
|
||||
case "RegExp":
|
||||
stringColor.Print(toString(obj))
|
||||
fmt.Fprint(ctx.w, StringColor("%s", toString(obj)))
|
||||
|
||||
default:
|
||||
if v, _ := obj.Get("toString"); v.IsFunction() && level <= maxPrettyPrintLevel {
|
||||
s, _ := obj.Call("toString")
|
||||
fmt.Printf("<%s %s>", obj.Class(), s.String())
|
||||
fmt.Fprintf(ctx.w, "<%s %s>", obj.Class(), s.String())
|
||||
} else {
|
||||
fmt.Printf("<%s>", obj.Class())
|
||||
fmt.Fprintf(ctx.w, "<%s>", obj.Class())
|
||||
}
|
||||
}
|
||||
}
|
@ -18,43 +18,17 @@
|
||||
package web3ext
|
||||
|
||||
var Modules = map[string]string{
|
||||
"txpool": TxPool_JS,
|
||||
"admin": Admin_JS,
|
||||
"eth": Eth_JS,
|
||||
"miner": Miner_JS,
|
||||
"debug": Debug_JS,
|
||||
"net": Net_JS,
|
||||
"admin": Admin_JS,
|
||||
"debug": Debug_JS,
|
||||
"eth": Eth_JS,
|
||||
"miner": Miner_JS,
|
||||
"net": Net_JS,
|
||||
"personal": Personal_JS,
|
||||
"rpc": RPC_JS,
|
||||
"shh": Shh_JS,
|
||||
"txpool": TxPool_JS,
|
||||
}
|
||||
|
||||
const TxPool_JS = `
|
||||
web3._extend({
|
||||
property: 'txpool',
|
||||
methods:
|
||||
[
|
||||
],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'content',
|
||||
getter: 'txpool_content'
|
||||
}),
|
||||
new web3._extend.Property({
|
||||
name: 'inspect',
|
||||
getter: 'txpool_inspect'
|
||||
}),
|
||||
new web3._extend.Property({
|
||||
name: 'status',
|
||||
getter: 'txpool_status',
|
||||
outputFormatter: function(status) {
|
||||
status.pending = web3._extend.utils.toDecimal(status.pending);
|
||||
status.queued = web3._extend.utils.toDecimal(status.queued);
|
||||
return status;
|
||||
}
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Admin_JS = `
|
||||
web3._extend({
|
||||
property: 'admin',
|
||||
@ -175,74 +149,6 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Eth_JS = `
|
||||
web3._extend({
|
||||
property: 'eth',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'sign',
|
||||
call: 'eth_sign',
|
||||
params: 2,
|
||||
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'resend',
|
||||
call: 'eth_resend',
|
||||
params: 3,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, web3._extend.utils.fromDecimal, web3._extend.utils.fromDecimal]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getNatSpec',
|
||||
call: 'eth_getNatSpec',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'signTransaction',
|
||||
call: 'eth_signTransaction',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'submitTransaction',
|
||||
call: 'eth_submitTransaction',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
})
|
||||
],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'pendingTransactions',
|
||||
getter: 'eth_pendingTransactions',
|
||||
outputFormatter: function(txs) {
|
||||
var formatted = [];
|
||||
for (var i = 0; i < txs.length; i++) {
|
||||
formatted.push(web3._extend.formatters.outputTransactionFormatter(txs[i]));
|
||||
formatted[i].blockHash = null;
|
||||
}
|
||||
return formatted;
|
||||
}
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Net_JS = `
|
||||
web3._extend({
|
||||
property: 'net',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'version',
|
||||
getter: 'net_version'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Debug_JS = `
|
||||
web3._extend({
|
||||
property: 'debug',
|
||||
@ -351,18 +257,18 @@ web3._extend({
|
||||
params: 0
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'trace',
|
||||
call: 'debug_trace',
|
||||
name: 'goTrace',
|
||||
call: 'debug_goTrace',
|
||||
params: 2
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'startTrace',
|
||||
call: 'debug_startTrace',
|
||||
name: 'startGoTrace',
|
||||
call: 'debug_startGoTrace',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'stopTrace',
|
||||
call: 'debug_stopTrace',
|
||||
name: 'stopGoTrace',
|
||||
call: 'debug_stopGoTrace',
|
||||
params: 0
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
@ -395,6 +301,60 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Eth_JS = `
|
||||
web3._extend({
|
||||
property: 'eth',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'sign',
|
||||
call: 'eth_sign',
|
||||
params: 2,
|
||||
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'resend',
|
||||
call: 'eth_resend',
|
||||
params: 3,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, web3._extend.utils.fromDecimal, web3._extend.utils.fromDecimal]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getNatSpec',
|
||||
call: 'eth_getNatSpec',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'signTransaction',
|
||||
call: 'eth_signTransaction',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'submitTransaction',
|
||||
call: 'eth_submitTransaction',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
})
|
||||
],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'pendingTransactions',
|
||||
getter: 'eth_pendingTransactions',
|
||||
outputFormatter: function(txs) {
|
||||
var formatted = [];
|
||||
for (var i = 0; i < txs.length; i++) {
|
||||
formatted.push(web3._extend.formatters.outputTransactionFormatter(txs[i]));
|
||||
formatted[i].blockHash = null;
|
||||
}
|
||||
return formatted;
|
||||
}
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Miner_JS = `
|
||||
web3._extend({
|
||||
property: 'miner',
|
||||
@ -425,7 +385,7 @@ web3._extend({
|
||||
name: 'setGasPrice',
|
||||
call: 'miner_setGasPrice',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.utils.fromDecial]
|
||||
inputFormatter: [web3._extend.utils.fromDecimal]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'startAutoDAG',
|
||||
@ -448,6 +408,54 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Net_JS = `
|
||||
web3._extend({
|
||||
property: 'net',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'version',
|
||||
getter: 'net_version'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Personal_JS = `
|
||||
web3._extend({
|
||||
property: 'personal',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'importRawKey',
|
||||
call: 'personal_importRawKey',
|
||||
params: 2
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'signAndSendTransaction',
|
||||
call: 'personal_signAndSendTransaction',
|
||||
params: 2,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, null]
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const RPC_JS = `
|
||||
web3._extend({
|
||||
property: 'rpc',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'modules',
|
||||
getter: 'rpc_modules'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Shh_JS = `
|
||||
web3._extend({
|
||||
property: 'shh',
|
||||
@ -456,7 +464,35 @@ web3._extend({
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'version',
|
||||
getter: 'shh_version'
|
||||
getter: 'shh_version',
|
||||
outputFormatter: web3._extend.utils.toDecimal
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const TxPool_JS = `
|
||||
web3._extend({
|
||||
property: 'txpool',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'content',
|
||||
getter: 'txpool_content'
|
||||
}),
|
||||
new web3._extend.Property({
|
||||
name: 'inspect',
|
||||
getter: 'txpool_inspect'
|
||||
}),
|
||||
new web3._extend.Property({
|
||||
name: 'status',
|
||||
getter: 'txpool_status',
|
||||
outputFormatter: function(status) {
|
||||
status.pending = web3._extend.utils.toDecimal(status.pending);
|
||||
status.queued = web3._extend.utils.toDecimal(status.queued);
|
||||
return status;
|
||||
}
|
||||
})
|
||||
]
|
||||
});
|
||||
|
@ -94,10 +94,13 @@ type worker struct {
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
// update loop
|
||||
mux *event.TypeMux
|
||||
events event.Subscription
|
||||
wg sync.WaitGroup
|
||||
|
||||
agents map[Agent]struct{}
|
||||
recv chan *Result
|
||||
mux *event.TypeMux
|
||||
quit chan struct{}
|
||||
pow pow.PoW
|
||||
|
||||
eth core.Backend
|
||||
@ -138,13 +141,13 @@ func newWorker(config *core.ChainConfig, coinbase common.Address, eth core.Backe
|
||||
possibleUncles: make(map[common.Hash]*types.Block),
|
||||
coinbase: coinbase,
|
||||
txQueue: make(map[common.Hash]*types.Transaction),
|
||||
quit: make(chan struct{}),
|
||||
agents: make(map[Agent]struct{}),
|
||||
fullValidation: false,
|
||||
}
|
||||
worker.events = worker.mux.Subscribe(core.ChainHeadEvent{}, core.ChainSideEvent{}, core.TxPreEvent{})
|
||||
go worker.update()
|
||||
go worker.wait()
|
||||
|
||||
go worker.wait()
|
||||
worker.commitNewWork()
|
||||
|
||||
return worker
|
||||
@ -184,9 +187,10 @@ func (self *worker) start() {
|
||||
}
|
||||
|
||||
func (self *worker) stop() {
|
||||
self.wg.Wait()
|
||||
|
||||
self.mu.Lock()
|
||||
defer self.mu.Unlock()
|
||||
|
||||
if atomic.LoadInt32(&self.mining) == 1 {
|
||||
// Stop all agents.
|
||||
for agent := range self.agents {
|
||||
@ -217,36 +221,22 @@ func (self *worker) unregister(agent Agent) {
|
||||
}
|
||||
|
||||
func (self *worker) update() {
|
||||
eventSub := self.mux.Subscribe(core.ChainHeadEvent{}, core.ChainSideEvent{}, core.TxPreEvent{})
|
||||
defer eventSub.Unsubscribe()
|
||||
|
||||
eventCh := eventSub.Chan()
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-eventCh:
|
||||
if !ok {
|
||||
// Event subscription closed, set the channel to nil to stop spinning
|
||||
eventCh = nil
|
||||
continue
|
||||
for event := range self.events.Chan() {
|
||||
// A real event arrived, process interesting content
|
||||
switch ev := event.Data.(type) {
|
||||
case core.ChainHeadEvent:
|
||||
self.commitNewWork()
|
||||
case core.ChainSideEvent:
|
||||
self.uncleMu.Lock()
|
||||
self.possibleUncles[ev.Block.Hash()] = ev.Block
|
||||
self.uncleMu.Unlock()
|
||||
case core.TxPreEvent:
|
||||
// Apply transaction to the pending state if we're not mining
|
||||
if atomic.LoadInt32(&self.mining) == 0 {
|
||||
self.currentMu.Lock()
|
||||
self.current.commitTransactions(self.mux, types.Transactions{ev.Tx}, self.gasPrice, self.chain)
|
||||
self.currentMu.Unlock()
|
||||
}
|
||||
// A real event arrived, process interesting content
|
||||
switch ev := event.Data.(type) {
|
||||
case core.ChainHeadEvent:
|
||||
self.commitNewWork()
|
||||
case core.ChainSideEvent:
|
||||
self.uncleMu.Lock()
|
||||
self.possibleUncles[ev.Block.Hash()] = ev.Block
|
||||
self.uncleMu.Unlock()
|
||||
case core.TxPreEvent:
|
||||
// Apply transaction to the pending state if we're not mining
|
||||
if atomic.LoadInt32(&self.mining) == 0 {
|
||||
self.currentMu.Lock()
|
||||
self.current.commitTransactions(self.mux, types.Transactions{ev.Tx}, self.gasPrice, self.chain)
|
||||
self.currentMu.Unlock()
|
||||
}
|
||||
}
|
||||
case <-self.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
12
node/api.go
12
node/api.go
@ -68,7 +68,11 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *rpc.HexNumber, cors *st
|
||||
}
|
||||
|
||||
if host == nil {
|
||||
host = &api.node.httpHost
|
||||
h := common.DefaultHTTPHost
|
||||
if api.node.httpHost != "" {
|
||||
h = api.node.httpHost
|
||||
}
|
||||
host = &h
|
||||
}
|
||||
if port == nil {
|
||||
port = rpc.NewHexNumber(api.node.httpPort)
|
||||
@ -113,7 +117,11 @@ func (api *PrivateAdminAPI) StartWS(host *string, port *rpc.HexNumber, allowedOr
|
||||
}
|
||||
|
||||
if host == nil {
|
||||
host = &api.node.wsHost
|
||||
h := common.DefaultWSHost
|
||||
if api.node.wsHost != "" {
|
||||
h = api.node.wsHost
|
||||
}
|
||||
host = &h
|
||||
}
|
||||
if port == nil {
|
||||
port = rpc.NewHexNumber(api.node.wsPort)
|
||||
|
10
node/node.go
10
node/node.go
@ -49,7 +49,7 @@ type Node struct {
|
||||
datadir string // Path to the currently used data directory
|
||||
eventmux *event.TypeMux // Event multiplexer used between the services of a stack
|
||||
|
||||
serverConfig *p2p.Server // Configuration of the underlying P2P networking layer
|
||||
serverConfig p2p.Config
|
||||
server *p2p.Server // Currently running P2P networking layer
|
||||
|
||||
serviceFuncs []ServiceConstructor // Service constructors (in dependency order)
|
||||
@ -97,7 +97,7 @@ func New(conf *Config) (*Node, error) {
|
||||
}
|
||||
return &Node{
|
||||
datadir: conf.DataDir,
|
||||
serverConfig: &p2p.Server{
|
||||
serverConfig: p2p.Config{
|
||||
PrivateKey: conf.NodeKey(),
|
||||
Name: conf.Name,
|
||||
Discovery: !conf.NoDiscovery,
|
||||
@ -151,9 +151,7 @@ func (n *Node) Start() error {
|
||||
return ErrNodeRunning
|
||||
}
|
||||
// Otherwise copy and specialize the P2P configuration
|
||||
running := new(p2p.Server)
|
||||
*running = *n.serverConfig
|
||||
|
||||
running := &p2p.Server{Config: n.serverConfig}
|
||||
services := make(map[reflect.Type]Service)
|
||||
for _, constructor := range n.serviceFuncs {
|
||||
// Create a new context for the particular service
|
||||
@ -311,7 +309,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
||||
glog.V(logger.Error).Infof("IPC accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation | rpc.OptionSubscriptions)
|
||||
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
|
||||
}
|
||||
}()
|
||||
// All listeners booted successfully
|
||||
|
@ -68,7 +68,7 @@ type ServiceConstructor func(ctx *ServiceContext) (Service, error)
|
||||
// - Restart logic is not required as the node will create a fresh instance
|
||||
// every time a service is started.
|
||||
type Service interface {
|
||||
// Protocol retrieves the P2P protocols the service wishes to start.
|
||||
// Protocols retrieves the P2P protocols the service wishes to start.
|
||||
Protocols() []p2p.Protocol
|
||||
|
||||
// APIs retrieves the list of RPC descriptors the service provides
|
||||
|
@ -478,7 +478,8 @@ func TestDialResolve(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now run the task, it should resolve the ID once.
|
||||
srv := &Server{ntab: table, Dialer: &net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}}
|
||||
config := Config{Dialer: &net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}}
|
||||
srv := &Server{ntab: table, Config: config}
|
||||
tasks[0].Do(srv)
|
||||
if !reflect.DeepEqual(table.resolveCalls, []discover.NodeID{dest.ID}) {
|
||||
t.Fatalf("wrong resolve calls, got %v", table.resolveCalls)
|
||||
|
@ -25,6 +25,7 @@ package discover
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
@ -457,6 +458,9 @@ func (tab *Table) bondall(nodes []*Node) (result []*Node) {
|
||||
// If pinged is true, the remote node has just pinged us and one half
|
||||
// of the process can be skipped.
|
||||
func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) (*Node, error) {
|
||||
if id == tab.self.ID {
|
||||
return nil, errors.New("is self")
|
||||
}
|
||||
// Retrieve a previously known node and any recent findnode failures
|
||||
node, fails := tab.db.node(id), 0
|
||||
if node != nil {
|
||||
|
@ -54,12 +54,8 @@ var errServerStopped = errors.New("server stopped")
|
||||
|
||||
var srvjslog = logger.NewJsonLogger()
|
||||
|
||||
// Server manages all peer connections.
|
||||
//
|
||||
// The fields of Server are used as configuration parameters.
|
||||
// You should set them before starting the Server. Fields may not be
|
||||
// modified while the server is running.
|
||||
type Server struct {
|
||||
// Config holds Server options.
|
||||
type Config struct {
|
||||
// This field must be set to a valid secp256k1 private key.
|
||||
PrivateKey *ecdsa.PrivateKey
|
||||
|
||||
@ -120,6 +116,12 @@ type Server struct {
|
||||
|
||||
// If NoDial is true, the server will not dial any peers.
|
||||
NoDial bool
|
||||
}
|
||||
|
||||
// Server manages all peer connections.
|
||||
type Server struct {
|
||||
// Config fields may not be modified while the server is running.
|
||||
Config
|
||||
|
||||
// Hooks for testing. These are useful because we can inhibit
|
||||
// the whole protocol stack.
|
||||
@ -398,12 +400,11 @@ type dialer interface {
|
||||
func (srv *Server) run(dialstate dialer) {
|
||||
defer srv.loopWG.Done()
|
||||
var (
|
||||
peers = make(map[discover.NodeID]*Peer)
|
||||
trusted = make(map[discover.NodeID]bool, len(srv.TrustedNodes))
|
||||
|
||||
tasks []task
|
||||
pendingTasks []task
|
||||
peers = make(map[discover.NodeID]*Peer)
|
||||
trusted = make(map[discover.NodeID]bool, len(srv.TrustedNodes))
|
||||
taskdone = make(chan task, maxActiveDialTasks)
|
||||
runningTasks []task
|
||||
queuedTasks []task // tasks that can't run yet
|
||||
)
|
||||
// Put trusted nodes into a map to speed up checks.
|
||||
// Trusted peers are loaded on startup and cannot be
|
||||
@ -412,39 +413,39 @@ func (srv *Server) run(dialstate dialer) {
|
||||
trusted[n.ID] = true
|
||||
}
|
||||
|
||||
// Some task list helpers.
|
||||
// removes t from runningTasks
|
||||
delTask := func(t task) {
|
||||
for i := range tasks {
|
||||
if tasks[i] == t {
|
||||
tasks = append(tasks[:i], tasks[i+1:]...)
|
||||
for i := range runningTasks {
|
||||
if runningTasks[i] == t {
|
||||
runningTasks = append(runningTasks[:i], runningTasks[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
scheduleTasks := func(new []task) {
|
||||
pt := append(pendingTasks, new...)
|
||||
start := maxActiveDialTasks - len(tasks)
|
||||
if len(pt) < start {
|
||||
start = len(pt)
|
||||
// starts until max number of active tasks is satisfied
|
||||
startTasks := func(ts []task) (rest []task) {
|
||||
i := 0
|
||||
for ; len(runningTasks) < maxActiveDialTasks && i < len(ts); i++ {
|
||||
t := ts[i]
|
||||
glog.V(logger.Detail).Infoln("new task:", t)
|
||||
go func() { t.Do(srv); taskdone <- t }()
|
||||
runningTasks = append(runningTasks, t)
|
||||
}
|
||||
if start > 0 {
|
||||
tasks = append(tasks, pt[:start]...)
|
||||
for _, t := range pt[:start] {
|
||||
t := t
|
||||
glog.V(logger.Detail).Infoln("new task:", t)
|
||||
go func() { t.Do(srv); taskdone <- t }()
|
||||
}
|
||||
copy(pt, pt[start:])
|
||||
pendingTasks = pt[:len(pt)-start]
|
||||
return ts[i:]
|
||||
}
|
||||
scheduleTasks := func() {
|
||||
// Start from queue first.
|
||||
queuedTasks = append(queuedTasks[:0], startTasks(queuedTasks)...)
|
||||
// Query dialer for new tasks and start as many as possible now.
|
||||
if len(runningTasks) < maxActiveDialTasks {
|
||||
nt := dialstate.newTasks(len(runningTasks)+len(queuedTasks), peers, time.Now())
|
||||
queuedTasks = append(queuedTasks, startTasks(nt)...)
|
||||
}
|
||||
}
|
||||
|
||||
running:
|
||||
for {
|
||||
// Query the dialer for new tasks and launch them.
|
||||
now := time.Now()
|
||||
nt := dialstate.newTasks(len(pendingTasks)+len(tasks), peers, now)
|
||||
scheduleTasks(nt)
|
||||
scheduleTasks()
|
||||
|
||||
select {
|
||||
case <-srv.quit:
|
||||
@ -466,7 +467,7 @@ running:
|
||||
// can update its state and remove it from the active
|
||||
// tasks list.
|
||||
glog.V(logger.Detail).Infoln("<-taskdone:", t)
|
||||
dialstate.taskDone(t, now)
|
||||
dialstate.taskDone(t, time.Now())
|
||||
delTask(t)
|
||||
case c := <-srv.posthandshake:
|
||||
// A connection has passed the encryption handshake so
|
||||
@ -513,7 +514,7 @@ running:
|
||||
// Wait for peers to shut down. Pending connections and tasks are
|
||||
// not handled here and will terminate soon-ish because srv.quit
|
||||
// is closed.
|
||||
glog.V(logger.Detail).Infof("ignoring %d pending tasks at spindown", len(tasks))
|
||||
glog.V(logger.Detail).Infof("ignoring %d pending tasks at spindown", len(runningTasks))
|
||||
for len(peers) > 0 {
|
||||
p := <-srv.delpeer
|
||||
glog.V(logger.Detail).Infoln("<-delpeer (spindown):", p)
|
||||
|
@ -67,11 +67,14 @@ func (c *testTransport) close(err error) {
|
||||
}
|
||||
|
||||
func startTestServer(t *testing.T, id discover.NodeID, pf func(*Peer)) *Server {
|
||||
config := Config{
|
||||
Name: "test",
|
||||
MaxPeers: 10,
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
PrivateKey: newkey(),
|
||||
}
|
||||
server := &Server{
|
||||
Name: "test",
|
||||
MaxPeers: 10,
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
PrivateKey: newkey(),
|
||||
Config: config,
|
||||
newPeerHook: pf,
|
||||
newTransport: func(fd net.Conn) transport { return newTestTransport(id, fd) },
|
||||
}
|
||||
@ -200,10 +203,10 @@ func TestServerTaskScheduling(t *testing.T) {
|
||||
// The Server in this test isn't actually running
|
||||
// because we're only interested in what run does.
|
||||
srv := &Server{
|
||||
MaxPeers: 10,
|
||||
quit: make(chan struct{}),
|
||||
ntab: fakeTable{},
|
||||
running: true,
|
||||
Config: Config{MaxPeers: 10},
|
||||
quit: make(chan struct{}),
|
||||
ntab: fakeTable{},
|
||||
running: true,
|
||||
}
|
||||
srv.loopWG.Add(1)
|
||||
go func() {
|
||||
@ -235,6 +238,56 @@ func TestServerTaskScheduling(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// This test checks that Server doesn't drop tasks,
|
||||
// even if newTasks returns more than the maximum number of tasks.
|
||||
func TestServerManyTasks(t *testing.T) {
|
||||
alltasks := make([]task, 300)
|
||||
for i := range alltasks {
|
||||
alltasks[i] = &testTask{index: i}
|
||||
}
|
||||
|
||||
var (
|
||||
srv = &Server{quit: make(chan struct{}), ntab: fakeTable{}, running: true}
|
||||
done = make(chan *testTask)
|
||||
start, end = 0, 0
|
||||
)
|
||||
defer srv.Stop()
|
||||
srv.loopWG.Add(1)
|
||||
go srv.run(taskgen{
|
||||
newFunc: func(running int, peers map[discover.NodeID]*Peer) []task {
|
||||
start, end = end, end+maxActiveDialTasks+10
|
||||
if end > len(alltasks) {
|
||||
end = len(alltasks)
|
||||
}
|
||||
return alltasks[start:end]
|
||||
},
|
||||
doneFunc: func(tt task) {
|
||||
done <- tt.(*testTask)
|
||||
},
|
||||
})
|
||||
|
||||
doneset := make(map[int]bool)
|
||||
timeout := time.After(2 * time.Second)
|
||||
for len(doneset) < len(alltasks) {
|
||||
select {
|
||||
case tt := <-done:
|
||||
if doneset[tt.index] {
|
||||
t.Errorf("task %d got done more than once", tt.index)
|
||||
} else {
|
||||
doneset[tt.index] = true
|
||||
}
|
||||
case <-timeout:
|
||||
t.Errorf("%d of %d tasks got done within 2s", len(doneset), len(alltasks))
|
||||
for i := 0; i < len(alltasks); i++ {
|
||||
if !doneset[i] {
|
||||
t.Logf("task %d not done", i)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type taskgen struct {
|
||||
newFunc func(running int, peers map[discover.NodeID]*Peer) []task
|
||||
doneFunc func(task)
|
||||
@ -264,10 +317,12 @@ func (t *testTask) Do(srv *Server) {
|
||||
func TestServerAtCap(t *testing.T) {
|
||||
trustedID := randomID()
|
||||
srv := &Server{
|
||||
PrivateKey: newkey(),
|
||||
MaxPeers: 10,
|
||||
NoDial: true,
|
||||
TrustedNodes: []*discover.Node{{ID: trustedID}},
|
||||
Config: Config{
|
||||
PrivateKey: newkey(),
|
||||
MaxPeers: 10,
|
||||
NoDial: true,
|
||||
TrustedNodes: []*discover.Node{{ID: trustedID}},
|
||||
},
|
||||
}
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("could not start: %v", err)
|
||||
@ -365,10 +420,12 @@ func TestServerSetupConn(t *testing.T) {
|
||||
|
||||
for i, test := range tests {
|
||||
srv := &Server{
|
||||
PrivateKey: srvkey,
|
||||
MaxPeers: 10,
|
||||
NoDial: true,
|
||||
Protocols: []Protocol{discard},
|
||||
Config: Config{
|
||||
PrivateKey: srvkey,
|
||||
MaxPeers: 10,
|
||||
NoDial: true,
|
||||
Protocols: []Protocol{discard},
|
||||
},
|
||||
newTransport: func(fd net.Conn) transport { return test.tt },
|
||||
}
|
||||
if !test.dontstart {
|
||||
|
432
release/contract.go
Normal file
432
release/contract.go
Normal file
File diff suppressed because one or more lines are too long
249
release/contract.sol
Normal file
249
release/contract.sol
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// ReleaseOracle is an Ethereum contract to store the current and previous
|
||||
// versions of the go-ethereum implementation. Its goal is to allow Geth to
|
||||
// check for new releases automatically without the need to consult a central
|
||||
// repository.
|
||||
//
|
||||
// The contract takes a vote based approach on both assigning authorised signers
|
||||
// as well as signing off on new Geth releases.
|
||||
//
|
||||
// Note, when a signer is demoted, the currently pending release is auto-nuked.
|
||||
// The reason is to prevent suprises where a demotion actually tilts the votes
|
||||
// in favor of one voter party and pushing out a new release as a consequence of
|
||||
// a simple demotion.
|
||||
contract ReleaseOracle {
|
||||
// Votes is an internal data structure to count votes on a specific proposal
|
||||
struct Votes {
|
||||
address[] pass; // List of signers voting to pass a proposal
|
||||
address[] fail; // List of signers voting to fail a proposal
|
||||
}
|
||||
|
||||
// Version is the version details of a particular Geth release
|
||||
struct Version {
|
||||
uint32 major; // Major version component of the release
|
||||
uint32 minor; // Minor version component of the release
|
||||
uint32 patch; // Patch version component of the release
|
||||
bytes20 commit; // Git SHA1 commit hash of the release
|
||||
|
||||
uint64 time; // Timestamp of the release approval
|
||||
Votes votes; // Votes that passed this release
|
||||
}
|
||||
|
||||
// Oracle authorization details
|
||||
mapping(address => bool) authorised; // Set of accounts allowed to vote on updating the contract
|
||||
address[] voters; // List of addresses currently accepted as signers
|
||||
|
||||
// Various proposals being voted on
|
||||
mapping(address => Votes) authProps; // Currently running user authorization proposals
|
||||
address[] authPend; // List of addresses being voted on (map indexes)
|
||||
|
||||
Version verProp; // Currently proposed release being voted on
|
||||
Version[] releases; // All the positively voted releases
|
||||
|
||||
// isSigner is a modifier to authorize contract transactions.
|
||||
modifier isSigner() {
|
||||
if (authorised[msg.sender]) {
|
||||
_
|
||||
}
|
||||
}
|
||||
|
||||
// Constructor to assign the initial set of signers.
|
||||
function ReleaseOracle(address[] signers) {
|
||||
// If no signers were specified, assign the creator as the sole signer
|
||||
if (signers.length == 0) {
|
||||
authorised[msg.sender] = true;
|
||||
voters.push(msg.sender);
|
||||
return;
|
||||
}
|
||||
// Otherwise assign the individual signers one by one
|
||||
for (uint i = 0; i < signers.length; i++) {
|
||||
authorised[signers[i]] = true;
|
||||
voters.push(signers[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// signers is an accessor method to retrieve all te signers (public accessor
|
||||
// generates an indexed one, not a retreive-all version).
|
||||
function signers() constant returns(address[]) {
|
||||
return voters;
|
||||
}
|
||||
|
||||
// authProposals retrieves the list of addresses that authorization proposals
|
||||
// are currently being voted on.
|
||||
function authProposals() constant returns(address[]) {
|
||||
return authPend;
|
||||
}
|
||||
|
||||
// authVotes retrieves the current authorization votes for a particular user
|
||||
// to promote him into the list of signers, or demote him from there.
|
||||
function authVotes(address user) constant returns(address[] promote, address[] demote) {
|
||||
return (authProps[user].pass, authProps[user].fail);
|
||||
}
|
||||
|
||||
// currentVersion retrieves the semantic version, commit hash and release time
|
||||
// of the currently votec active release.
|
||||
function currentVersion() constant returns (uint32 major, uint32 minor, uint32 patch, bytes20 commit, uint time) {
|
||||
if (releases.length == 0) {
|
||||
return (0, 0, 0, 0, 0);
|
||||
}
|
||||
var release = releases[releases.length - 1];
|
||||
|
||||
return (release.major, release.minor, release.patch, release.commit, release.time);
|
||||
}
|
||||
|
||||
// proposedVersion retrieves the semantic version, commit hash and the current
|
||||
// votes for the next proposed release.
|
||||
function proposedVersion() constant returns (uint32 major, uint32 minor, uint32 patch, bytes20 commit, address[] pass, address[] fail) {
|
||||
return (verProp.major, verProp.minor, verProp.patch, verProp.commit, verProp.votes.pass, verProp.votes.fail);
|
||||
}
|
||||
|
||||
// promote pitches in on a voting campaign to promote a new user to a signer
|
||||
// position.
|
||||
function promote(address user) {
|
||||
updateSigner(user, true);
|
||||
}
|
||||
|
||||
// demote pitches in on a voting campaign to demote an authorised user from
|
||||
// its signer position.
|
||||
function demote(address user) {
|
||||
updateSigner(user, false);
|
||||
}
|
||||
|
||||
// release votes for a particular version to be included as the next release.
|
||||
function release(uint32 major, uint32 minor, uint32 patch, bytes20 commit) {
|
||||
updateRelease(major, minor, patch, commit, true);
|
||||
}
|
||||
|
||||
// nuke votes for the currently proposed version to not be included as the next
|
||||
// release. Nuking doesn't require a specific version number for simplicity.
|
||||
function nuke() {
|
||||
updateRelease(0, 0, 0, 0, false);
|
||||
}
|
||||
|
||||
// updateSigner marks a vote for changing the status of an Ethereum user, either
|
||||
// for or against the user being an authorised signer.
|
||||
function updateSigner(address user, bool authorize) internal isSigner {
|
||||
// Gather the current votes and ensure we don't double vote
|
||||
Votes votes = authProps[user];
|
||||
for (uint i = 0; i < votes.pass.length; i++) {
|
||||
if (votes.pass[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < votes.fail.length; i++) {
|
||||
if (votes.fail[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// If no authorization proposal is open, add the user to the index for later lookups
|
||||
if (votes.pass.length == 0 && votes.fail.length == 0) {
|
||||
authPend.push(user);
|
||||
}
|
||||
// Cast the vote and return if the proposal cannot be resolved yet
|
||||
if (authorize) {
|
||||
votes.pass.push(msg.sender);
|
||||
if (votes.pass.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
votes.fail.push(msg.sender);
|
||||
if (votes.fail.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Proposal resolved in our favor, execute whatever we voted on
|
||||
if (authorize && !authorised[user]) {
|
||||
authorised[user] = true;
|
||||
voters.push(user);
|
||||
} else if (!authorize && authorised[user]) {
|
||||
authorised[user] = false;
|
||||
|
||||
for (i = 0; i < voters.length; i++) {
|
||||
if (voters[i] == user) {
|
||||
voters[i] = voters[voters.length - 1];
|
||||
voters.length--;
|
||||
|
||||
delete verProp; // Nuke any version proposal (no suprise releases!)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finally delete the resolved proposal, index and garbage collect
|
||||
delete authProps[user];
|
||||
|
||||
for (i = 0; i < authPend.length; i++) {
|
||||
if (authPend[i] == user) {
|
||||
authPend[i] = authPend[authPend.length - 1];
|
||||
authPend.length--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateRelease votes for a particular version to be included as the next release,
|
||||
// or for the currently proposed release to be nuked out.
|
||||
function updateRelease(uint32 major, uint32 minor, uint32 patch, bytes20 commit, bool release) internal isSigner {
|
||||
// Skip nuke votes if no proposal is pending
|
||||
if (!release && verProp.votes.pass.length == 0) {
|
||||
return;
|
||||
}
|
||||
// Mark a new release if no proposal is pending
|
||||
if (verProp.votes.pass.length == 0) {
|
||||
verProp.major = major;
|
||||
verProp.minor = minor;
|
||||
verProp.patch = patch;
|
||||
verProp.commit = commit;
|
||||
}
|
||||
// Make sure positive votes match the current proposal
|
||||
if (release && (verProp.major != major || verProp.minor != minor || verProp.patch != patch || verProp.commit != commit)) {
|
||||
return;
|
||||
}
|
||||
// Gather the current votes and ensure we don't double vote
|
||||
Votes votes = verProp.votes;
|
||||
for (uint i = 0; i < votes.pass.length; i++) {
|
||||
if (votes.pass[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < votes.fail.length; i++) {
|
||||
if (votes.fail[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Cast the vote and return if the proposal cannot be resolved yet
|
||||
if (release) {
|
||||
votes.pass.push(msg.sender);
|
||||
if (votes.pass.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
votes.fail.push(msg.sender);
|
||||
if (votes.fail.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Proposal resolved in our favor, execute whatever we voted on
|
||||
if (release) {
|
||||
verProp.time = uint64(now);
|
||||
releases.push(verProp);
|
||||
delete verProp;
|
||||
} else {
|
||||
delete verProp;
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user