Compare commits
60 Commits
Author | SHA1 | Date | |
---|---|---|---|
0f036f6209 | |||
71a89b7c75 | |||
ecb8e23e88 | |||
058c5fe960 | |||
a29bdf547c | |||
44b912ec64 | |||
3d69970c15 | |||
8b90a49f3d | |||
c046126c87 | |||
cd134178f7 | |||
4918c820c6 | |||
5904d58a96 | |||
7c90a2e42e | |||
c39de61a0a | |||
af53767e16 | |||
7632acf6b4 | |||
9ccb70da7b | |||
8fefee7132 | |||
7a4073a758 | |||
ab522d3bc7 | |||
c45c424073 | |||
8d2775e3d7 | |||
170036289b | |||
8ebbd9b7c7 | |||
7df36e5ec1 | |||
5fb29fd45f | |||
90beb6112e | |||
efa2b3da7e | |||
e3b3c298df | |||
3752507a11 | |||
a269a713d6 | |||
27df30f30f | |||
311f5a0ed1 | |||
68ae6b52e9 | |||
1776c717bf | |||
0f6e3e873a | |||
7a7a5acc9f | |||
66d74dfb75 | |||
b950a2977c | |||
8ea3c88e44 | |||
94ad694a26 | |||
4c6953606e | |||
fc0638f9d8 | |||
4b11f207cb | |||
7e5c49cafa | |||
efcfa2209b | |||
aa18aad7ad | |||
594328c112 | |||
2e6b9c141b | |||
b38cea6654 | |||
dd083aa34e | |||
f213a9d8e8 | |||
6826b2040f | |||
2a3657d8d9 | |||
566af6ef92 | |||
290e851f57 | |||
8f96d66241 | |||
4b9de75623 | |||
d52a693f80 | |||
8241fa5227 |
2
.github/CONTRIBUTING.md
vendored
2
.github/CONTRIBUTING.md
vendored
@ -8,7 +8,7 @@ and help.
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits who do not comply with the coding standards
|
||||
send a pull request. Commits which do not comply with the coding standards
|
||||
are ignored (use gofmt!). If you send pull requests make absolute sure that you
|
||||
commit on the `develop` branch and that you do not merge to master.
|
||||
Commits that are directly based on master are simply ignored.
|
||||
|
@ -1,10 +1,12 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4.2
|
||||
- 1.5.4
|
||||
- 1.6.2
|
||||
install:
|
||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||
# - go get github.com/golang/lint/golint
|
||||
# - go get golang.org/x/tools/cmd/vet
|
||||
# - go get golang.org/x/tools/cmd/vet
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
before_script:
|
||||
# - gofmt -l -w .
|
||||
@ -24,6 +26,6 @@ notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
|
||||
on_success: change
|
||||
on_success: change
|
||||
on_failure: always
|
||||
on_start: false
|
||||
on_start: false
|
||||
|
12
README.md
12
README.md
@ -36,6 +36,10 @@ Once the dependencies are installed, run
|
||||
|
||||
make geth
|
||||
|
||||
or, to build the full suite of utilities:
|
||||
|
||||
make all
|
||||
|
||||
## Executables
|
||||
|
||||
The go-ethereum project comes with several wrappers/executables found in the `cmd` directory.
|
||||
@ -58,14 +62,14 @@ anyone on the internet, and are grateful for even the smallest of fixes!
|
||||
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
|
||||
for the maintainers to review and merge into the main code base. If you wish to submit more
|
||||
complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum)
|
||||
to ensure those changes are in line with the general philosopy of the project and/or get some
|
||||
to ensure those changes are in line with the general philosophy of the project and/or get some
|
||||
early feedback which can make both your efforts much lighter as well as our review and merge
|
||||
procedures quick and simple.
|
||||
|
||||
Please make sure your contributions adhere to our coding guidlines:
|
||||
Please make sure your contributions adhere to our coding guidelines:
|
||||
|
||||
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
|
||||
* Code must be documented adherign to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
|
||||
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
|
||||
* Pull requests need to be based on and opened against the `develop` branch.
|
||||
* Commit messages should be prefixed with the package(s) they modify.
|
||||
* E.g. "eth, rpc: make trace configs optional"
|
||||
@ -82,3 +86,5 @@ included in our repository in the `COPYING.LESSER` file.
|
||||
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the
|
||||
[GNU General Public License v3.0](http://www.gnu.org/licenses/gpl-3.0.en.html), also included
|
||||
in our repository in the `COPYING` file.
|
||||
|
||||
|
||||
|
@ -238,8 +238,16 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(v).Elem()
|
||||
typ := value.Type()
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
if len(method.Outputs) > 1 {
|
||||
switch value.Kind() {
|
||||
@ -268,6 +276,25 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
|
||||
}
|
||||
|
||||
// if the slice already contains values, set those instead of the interface slice itself.
|
||||
if value.Len() > 0 {
|
||||
if len(method.Outputs) > value.Len() {
|
||||
return fmt.Errorf("abi: cannot marshal in to slices of unequal size (require: %v, got: %v)", len(method.Outputs), value.Len())
|
||||
}
|
||||
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(value.Index(i).Elem(), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a new slice and start appending the unmarshalled
|
||||
// values to the new interface slice.
|
||||
z := reflect.MakeSlice(typ, 0, len(method.Outputs))
|
||||
@ -296,34 +323,6 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// set attempts to assign src to dst by either setting, copying or otherwise.
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if !dstType.Elem().AssignableTo(r_byte) {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
|
||||
}
|
||||
|
||||
if dst.Len() < output.Type.SliceSize {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
dst.Set(src)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
|
@ -289,6 +289,37 @@ func TestSimpleMethodUnpack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
@ -27,15 +27,16 @@ import (
|
||||
// ErrNoCode is returned by call and transact operations for which the requested
|
||||
// recipient contract to operate on does not exist in the state db or does not
|
||||
// have any code associated with it (i.e. suicided).
|
||||
//
|
||||
// Please note, this error string is part of the RPC API and is expected by the
|
||||
// native contract bindings to signal this particular error. Do not change this
|
||||
// as it will break all dependent code!
|
||||
var ErrNoCode = errors.New("no contract code at given address")
|
||||
|
||||
// ContractCaller defines the methods needed to allow operating with contract on a read
|
||||
// only basis.
|
||||
type ContractCaller interface {
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// ContractCall executes an Ethereum contract call with the specified data as
|
||||
// the input. The pending flag requests execution against the pending block, not
|
||||
// the stable head of the chain.
|
||||
@ -55,6 +56,11 @@ type ContractTransactor interface {
|
||||
// execution of a transaction.
|
||||
SuggestGasPrice() (*big.Int, error)
|
||||
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// EstimateGasLimit tries to estimate the gas needed to execute a specific
|
||||
// transaction based on the current pending state of the backend blockchain.
|
||||
// There is no guarantee that this is the true gas limit requirement as other
|
||||
@ -68,7 +74,38 @@ type ContractTransactor interface {
|
||||
|
||||
// ContractBackend defines the methods needed to allow operating with contract
|
||||
// on a read-write basis.
|
||||
//
|
||||
// This interface is essentially the union of ContractCaller and ContractTransactor
|
||||
// but due to a bug in the Go compiler (https://github.com/golang/go/issues/6977),
|
||||
// we cannot simply list it as the two interfaces. The other solution is to add a
|
||||
// third interface containing the common methods, but that convolutes the user API
|
||||
// as it introduces yet another parameter to require for initialization.
|
||||
type ContractBackend interface {
|
||||
ContractCaller
|
||||
ContractTransactor
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// ContractCall executes an Ethereum contract call with the specified data as
|
||||
// the input. The pending flag requests execution against the pending block, not
|
||||
// the stable head of the chain.
|
||||
ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error)
|
||||
|
||||
// PendingAccountNonce retrieves the current pending nonce associated with an
|
||||
// account.
|
||||
PendingAccountNonce(account common.Address) (uint64, error)
|
||||
|
||||
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
|
||||
// execution of a transaction.
|
||||
SuggestGasPrice() (*big.Int, error)
|
||||
|
||||
// EstimateGasLimit tries to estimate the gas needed to execute a specific
|
||||
// transaction based on the current pending state of the backend blockchain.
|
||||
// There is no guarantee that this is the true gas limit requirement as other
|
||||
// transactions may be added or removed by miners, but it should provide a basis
|
||||
// for setting a reasonable default.
|
||||
EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error)
|
||||
|
||||
// SendTransaction injects the transaction into the pending pool for execution.
|
||||
SendTransaction(tx *types.Transaction) error
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ func (*nilBackend) ContractCall(common.Address, []byte, bool) ([]byte, error) {
|
||||
func (*nilBackend) EstimateGasLimit(common.Address, *common.Address, *big.Int, []byte) (*big.Int, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
func (*nilBackend) HasCode(common.Address, bool) (bool, error) { panic("not implemented") }
|
||||
func (*nilBackend) SuggestGasPrice() (*big.Int, error) { panic("not implemented") }
|
||||
func (*nilBackend) PendingAccountNonce(common.Address) (uint64, error) { panic("not implemented") }
|
||||
func (*nilBackend) SendTransaction(*types.Transaction) error { panic("not implemented") }
|
||||
|
@ -111,6 +111,26 @@ func (b *rpcBackend) request(method string, params []interface{}) (json.RawMessa
|
||||
return res.Result, nil
|
||||
}
|
||||
|
||||
// HasCode implements ContractVerifier.HasCode by retrieving any code associated
|
||||
// with the contract from the remote node, and checking its size.
|
||||
func (b *rpcBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
// Execute the RPC code retrieval
|
||||
block := "latest"
|
||||
if pending {
|
||||
block = "pending"
|
||||
}
|
||||
res, err := b.request("eth_getCode", []interface{}{contract.Hex(), block})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var hex string
|
||||
if err := json.Unmarshal(res, &hex); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Convert the response back to a Go byte slice and return
|
||||
return len(common.FromHex(hex)) > 0, nil
|
||||
}
|
||||
|
||||
// ContractCall implements ContractCaller.ContractCall, delegating the execution of
|
||||
// a contract call to the remote node, returning the reply to for local processing.
|
||||
func (b *rpcBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) {
|
||||
|
@ -78,6 +78,16 @@ func (b *SimulatedBackend) Rollback() {
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||
}
|
||||
|
||||
// HasCode implements ContractVerifier.HasCode, checking whether there is any
|
||||
// code associated with a certain account in the blockchain.
|
||||
func (b *SimulatedBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
if pending {
|
||||
return len(b.pendingState.GetCode(contract)) > 0, nil
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
return len(statedb.GetCode(contract)) > 0, nil
|
||||
}
|
||||
|
||||
// ContractCall implements ContractCaller.ContractCall, executing the specified
|
||||
// contract with the given input data.
|
||||
func (b *SimulatedBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) {
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -56,6 +57,9 @@ type BoundContract struct {
|
||||
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
|
||||
caller ContractCaller // Read interface to interact with the blockchain
|
||||
transactor ContractTransactor // Write interface to interact with the blockchain
|
||||
|
||||
latestHasCode uint32 // Cached verification that the latest state contains code for this contract
|
||||
pendingHasCode uint32 // Cached verification that the pending state contains code for this contract
|
||||
}
|
||||
|
||||
// NewBoundContract creates a low level contract interface through which calls
|
||||
@ -96,6 +100,19 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
|
||||
if opts == nil {
|
||||
opts = new(CallOpts)
|
||||
}
|
||||
// Make sure we have a contract to operate on, and bail out otherwise
|
||||
if (opts.Pending && atomic.LoadUint32(&c.pendingHasCode) == 0) || (!opts.Pending && atomic.LoadUint32(&c.latestHasCode) == 0) {
|
||||
if code, err := c.caller.HasCode(c.address, opts.Pending); err != nil {
|
||||
return err
|
||||
} else if !code {
|
||||
return ErrNoCode
|
||||
}
|
||||
if opts.Pending {
|
||||
atomic.StoreUint32(&c.pendingHasCode, 1)
|
||||
} else {
|
||||
atomic.StoreUint32(&c.latestHasCode, 1)
|
||||
}
|
||||
}
|
||||
// Pack the input, call and unpack the results
|
||||
input, err := c.abi.Pack(method, params...)
|
||||
if err != nil {
|
||||
@ -153,6 +170,16 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
}
|
||||
gasLimit := opts.GasLimit
|
||||
if gasLimit == nil {
|
||||
// Gas estimation cannot succeed without code for method invocations
|
||||
if contract != nil && atomic.LoadUint32(&c.pendingHasCode) == 0 {
|
||||
if code, err := c.transactor.HasCode(c.address, true); err != nil {
|
||||
return nil, err
|
||||
} else if !code {
|
||||
return nil, ErrNoCode
|
||||
}
|
||||
atomic.StoreUint32(&c.pendingHasCode, 1)
|
||||
}
|
||||
// If the contract surely has code (or code is not needed), estimate the transaction
|
||||
gasLimit, err = c.transactor.EstimateGasLimit(opts.From, contract, value, input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to exstimate gas needed: %v", err)
|
||||
|
@ -194,12 +194,44 @@ var bindTests = []struct {
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that plain values can be properly returned and deserialized
|
||||
{
|
||||
`Getter`,
|
||||
`
|
||||
contract Getter {
|
||||
function getter() constant returns (string, int, bytes32) {
|
||||
return ("Hi", 1, sha3(""));
|
||||
}
|
||||
}
|
||||
`,
|
||||
`606060405260dc8060106000396000f3606060405260e060020a6000350463993a04b78114601a575b005b600060605260c0604052600260809081527f486900000000000000000000000000000000000000000000000000000000000060a05260017fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060e0829052610100819052606060c0908152600261012081905281906101409060a09080838184600060046012f1505081517fffff000000000000000000000000000000000000000000000000000000000000169091525050604051610160819003945092505050f3`,
|
||||
`[{"constant":true,"inputs":[],"name":"getter","outputs":[{"name":"","type":"string"},{"name":"","type":"int256"},{"name":"","type":"bytes32"}],"type":"function"}]`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
|
||||
|
||||
// Deploy a tuple tester contract and execute a structured call on it
|
||||
_, _, getter, err := DeployGetter(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy getter contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
if str, num, _, err := getter.Getter(nil); err != nil {
|
||||
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
||||
} else if str != "Hi" || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", str, num, "Hi", 1)
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that tuples can be properly returned and deserialized
|
||||
{
|
||||
`Tupler`,
|
||||
`
|
||||
contract Tupler {
|
||||
function tuple() returns (string a, int b, bytes32 c) {
|
||||
function tuple() constant returns (string a, int b, bytes32 c) {
|
||||
return ("Hi", 1, sha3(""));
|
||||
}
|
||||
}
|
||||
@ -219,8 +251,10 @@ var bindTests = []struct {
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
if _, err := tupler.Tuple(nil); err != nil {
|
||||
if res, err := tupler.Tuple(nil); err != nil {
|
||||
t.Fatalf("Failed to call structure retriever: %v", err)
|
||||
} else if res.A != "Hi" || res.B.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", res.A, res.B, "Hi", 1)
|
||||
}
|
||||
`,
|
||||
},
|
||||
|
@ -211,7 +211,7 @@ package {{.Package}}
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}} = new({{bindtype .Type}})
|
||||
{{end}}
|
||||
){{end}}
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}[]interface{}{
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}&[]interface{}{
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}},
|
||||
{{end}}
|
||||
}{{end}}{{end}}
|
||||
|
@ -16,7 +16,10 @@
|
||||
|
||||
package abi
|
||||
|
||||
import "reflect"
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
// or finds a big.Int
|
||||
@ -62,3 +65,33 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
||||
reflect.Copy(slice, value)
|
||||
return slice
|
||||
}
|
||||
|
||||
// set attempts to assign src to dst by either setting, copying or otherwise.
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if !dstType.Elem().AssignableTo(r_byte) {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
|
||||
}
|
||||
|
||||
if dst.Len() < output.Type.SliceSize {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Ptr:
|
||||
return set(dst.Elem(), src, output)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -147,9 +147,21 @@ func (am *Manager) Sign(addr common.Address, hash []byte) (signature []byte, err
|
||||
return crypto.Sign(hash, unlockedKey.PrivateKey)
|
||||
}
|
||||
|
||||
// SignWithPassphrase signs hash if the private key matching the given address can be
|
||||
// decrypted with the given passphrase.
|
||||
func (am *Manager) SignWithPassphrase(addr common.Address, passphrase string, hash []byte) (signature []byte, err error) {
|
||||
_, key, err := am.getDecryptedKey(Account{Address: addr}, passphrase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer zeroKey(key.PrivateKey)
|
||||
return crypto.Sign(hash, key.PrivateKey)
|
||||
}
|
||||
|
||||
// Unlock unlocks the given account indefinitely.
|
||||
func (am *Manager) Unlock(a Account, keyAuth string) error {
|
||||
return am.TimedUnlock(a, keyAuth, 0)
|
||||
func (am *Manager) Unlock(a Account, passphrase string) error {
|
||||
return am.TimedUnlock(a, passphrase, 0)
|
||||
}
|
||||
|
||||
// Lock removes the private key with the given address from memory.
|
||||
|
@ -81,6 +81,34 @@ func TestSign(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignWithPassphrase(t *testing.T) {
|
||||
dir, am := tmpManager(t, true)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
pass := "passwd"
|
||||
acc, err := am.NewAccount(pass)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, unlocked := am.unlocked[acc.Address]; unlocked {
|
||||
t.Fatal("expected account to be locked")
|
||||
}
|
||||
|
||||
_, err = am.SignWithPassphrase(acc.Address, pass, testSigData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, unlocked := am.unlocked[acc.Address]; unlocked {
|
||||
t.Fatal("expected account to be locked")
|
||||
}
|
||||
|
||||
if _, err = am.SignWithPassphrase(acc.Address, "invalid passwd", testSigData); err == nil {
|
||||
t.Fatal("expected SignHash to fail with invalid password")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimedUnlock(t *testing.T) {
|
||||
dir, am := tmpManager(t, true)
|
||||
defer os.RemoveAll(dir)
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
@ -215,12 +216,12 @@ func getPassPhrase(prompt string, confirmation bool, i int, passwords []string)
|
||||
if prompt != "" {
|
||||
fmt.Println(prompt)
|
||||
}
|
||||
password, err := utils.Stdin.PasswordPrompt("Passphrase: ")
|
||||
password, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := utils.Stdin.PasswordPrompt("Repeat passphrase: ")
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -116,7 +117,7 @@ func exportChain(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
func removeDB(ctx *cli.Context) {
|
||||
confirm, err := utils.Stdin.ConfirmPrompt("Remove local database?")
|
||||
confirm, err := console.Stdin.PromptConfirm("Remove local database?")
|
||||
if err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
|
167
cmd/geth/consolecmd.go
Normal file
167
cmd/geth/consolecmd.go
Normal file
@ -0,0 +1,167 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
)
|
||||
|
||||
var (
|
||||
consoleCommand = cli.Command{
|
||||
Action: localConsole,
|
||||
Name: "console",
|
||||
Usage: `Geth Console: interactive JavaScript environment`,
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
}
|
||||
attachCommand = cli.Command{
|
||||
Action: remoteConsole,
|
||||
Name: "attach",
|
||||
Usage: `Geth Console: interactive JavaScript environment (connect to node)`,
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.
|
||||
This command allows to open a console on a running geth node.
|
||||
`,
|
||||
}
|
||||
javascriptCommand = cli.Command{
|
||||
Action: ephemeralConsole,
|
||||
Name: "js",
|
||||
Usage: `executes the given JavaScript files in the Geth JavaScript VM`,
|
||||
Description: `
|
||||
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
}
|
||||
)
|
||||
|
||||
// localConsole starts a new geth node, attaching a JavaScript console to it at the
|
||||
// same time.
|
||||
func localConsole(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Stop()
|
||||
|
||||
// Attach to the newly started node and start the JavaScript console
|
||||
client, err := node.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: node.DataDir(),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
}
|
||||
defer console.Stop(false)
|
||||
|
||||
// If only a short execution was requested, evaluate and return
|
||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||
console.Evaluate(script)
|
||||
return
|
||||
}
|
||||
// Otherwise print the welcome screen and enter interactive mode
|
||||
console.Welcome()
|
||||
console.Interactive()
|
||||
}
|
||||
|
||||
// remoteConsole will connect to a remote geth instance, attaching a JavaScript
|
||||
// console to it.
|
||||
func remoteConsole(ctx *cli.Context) {
|
||||
// Attach to a remotely running geth instance and start the JavaScript console
|
||||
client, err := utils.NewRemoteRPCClient(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("Unable to attach to remote geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: utils.MustMakeDataDir(ctx),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
}
|
||||
defer console.Stop(false)
|
||||
|
||||
// If only a short execution was requested, evaluate and return
|
||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||
console.Evaluate(script)
|
||||
return
|
||||
}
|
||||
// Otherwise print the welcome screen and enter interactive mode
|
||||
console.Welcome()
|
||||
console.Interactive()
|
||||
}
|
||||
|
||||
// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript
|
||||
// console to it, and each of the files specified as arguments and tears the
|
||||
// everything down.
|
||||
func ephemeralConsole(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Stop()
|
||||
|
||||
// Attach to the newly started node and start the JavaScript console
|
||||
client, err := node.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: node.DataDir(),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
}
|
||||
defer console.Stop(false)
|
||||
|
||||
// Evaluate each of the specified JavaScript files
|
||||
for _, file := range ctx.Args() {
|
||||
if err = console.Execute(file); err != nil {
|
||||
utils.Fatalf("Failed to execute %s: %v", file, err)
|
||||
}
|
||||
}
|
||||
// Wait for pending callbacks, but stop for Ctrl-C.
|
||||
abort := make(chan os.Signal, 1)
|
||||
signal.Notify(abort, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-abort
|
||||
os.Exit(0)
|
||||
}()
|
||||
console.Stop(true)
|
||||
}
|
162
cmd/geth/consolecmd_test.go
Normal file
162
cmd/geth/consolecmd_test.go
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// Tests that a node embedded within a console can be started up properly and
|
||||
// then terminated by closing the input stream.
|
||||
func TestConsoleWelcome(t *testing.T) {
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
|
||||
// Start a geth console, make sure it's cleaned up and terminate the console
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh",
|
||||
"console")
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
geth.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.setTemplateFunc("gover", runtime.Version)
|
||||
geth.setTemplateFunc("gethver", func() string { return verString })
|
||||
geth.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.setTemplateFunc("apis", func() []string {
|
||||
apis := append(strings.Split(rpc.DefaultIPCApis, ","), rpc.MetadataApi)
|
||||
sort.Strings(apis)
|
||||
return apis
|
||||
})
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
geth.expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}/{{gover}}
|
||||
coinbase: {{.Etherbase}}
|
||||
at block: 0 ({{niltime}})
|
||||
datadir: {{.Datadir}}
|
||||
modules:{{range apis}} {{.}}:1.0{{end}}
|
||||
|
||||
> {{.InputLine "exit"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
// Tests that a console can be attached to a running node via various means.
|
||||
func TestIPCAttachWelcome(t *testing.T) {
|
||||
// Configure the instance for IPC attachement
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
var ipc string
|
||||
if runtime.GOOS == "windows" {
|
||||
ipc = `\\.\pipe\geth` + strconv.Itoa(rand.Int())
|
||||
} else {
|
||||
ws := tmpdir(t)
|
||||
defer os.RemoveAll(ws)
|
||||
ipc = filepath.Join(ws, "geth.ipc")
|
||||
}
|
||||
// Note: we need --shh because testAttachWelcome checks for default
|
||||
// list of ipc modules and shh is included there.
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh", "--ipcpath", ipc)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ipc:"+ipc)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
func TestHTTPAttachWelcome(t *testing.T) {
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--rpc", "--rpcport", port)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "http://localhost:"+port)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
func TestWSAttachWelcome(t *testing.T) {
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P
|
||||
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--ws", "--wsport", port)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ws://localhost:"+port)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint string) {
|
||||
// Attach to a running geth note and terminate immediately
|
||||
attach := runGeth(t, "attach", endpoint)
|
||||
defer attach.expectExit()
|
||||
attach.stdin.Close()
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
attach.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.setTemplateFunc("gover", runtime.Version)
|
||||
attach.setTemplateFunc("gethver", func() string { return verString })
|
||||
attach.setTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.setTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.setTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.setTemplateFunc("apis", func() []string {
|
||||
var apis []string
|
||||
if strings.HasPrefix(endpoint, "ipc") {
|
||||
apis = append(strings.Split(rpc.DefaultIPCApis, ","), rpc.MetadataApi)
|
||||
} else {
|
||||
apis = append(strings.Split(rpc.DefaultHTTPApis, ","), rpc.MetadataApi)
|
||||
}
|
||||
sort.Strings(apis)
|
||||
return apis
|
||||
})
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
attach.expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}/{{gover}}
|
||||
coinbase: {{etherbase}}
|
||||
at block: 0 ({{niltime}}){{if ipc}}
|
||||
datadir: {{datadir}}{{end}}
|
||||
modules:{{range apis}} {{.}}:1.0{{end}}
|
||||
|
||||
> {{.InputLine "exit" }}
|
||||
`)
|
||||
attach.expectExit()
|
||||
}
|
427
cmd/geth/js.go
427
cmd/geth/js.go
@ -1,427 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/registrar"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||
re "github.com/ethereum/go-ethereum/jsre"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/peterh/liner"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
var (
|
||||
passwordRegexp = regexp.MustCompile("personal.[nu]")
|
||||
leadingSpace = regexp.MustCompile("^ ")
|
||||
onlyws = regexp.MustCompile("^\\s*$")
|
||||
exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$")
|
||||
)
|
||||
|
||||
type jsre struct {
|
||||
re *re.JSRE
|
||||
stack *node.Node
|
||||
wait chan *big.Int
|
||||
ps1 string
|
||||
atexit func()
|
||||
corsDomain string
|
||||
client rpc.Client
|
||||
}
|
||||
|
||||
func makeCompleter(re *jsre) liner.WordCompleter {
|
||||
return func(line string, pos int) (head string, completions []string, tail string) {
|
||||
if len(line) == 0 || pos == 0 {
|
||||
return "", nil, ""
|
||||
}
|
||||
// chuck data to relevant part for autocompletion, e.g. in case of nested lines eth.getBalance(eth.coinb<tab><tab>
|
||||
i := 0
|
||||
for i = pos - 1; i > 0; i-- {
|
||||
if line[i] == '.' || (line[i] >= 'a' && line[i] <= 'z') || (line[i] >= 'A' && line[i] <= 'Z') {
|
||||
continue
|
||||
}
|
||||
if i >= 3 && line[i] == '3' && line[i-3] == 'w' && line[i-2] == 'e' && line[i-1] == 'b' {
|
||||
continue
|
||||
}
|
||||
i += 1
|
||||
break
|
||||
}
|
||||
return line[:i], re.re.CompleteKeywords(line[i:pos]), line[pos:]
|
||||
}
|
||||
}
|
||||
|
||||
func newLightweightJSRE(docRoot string, client rpc.Client, datadir string, interactive bool) *jsre {
|
||||
js := &jsre{ps1: "> "}
|
||||
js.wait = make(chan *big.Int)
|
||||
js.client = client
|
||||
js.re = re.New(docRoot)
|
||||
if err := js.apiBindings(); err != nil {
|
||||
utils.Fatalf("Unable to initialize console - %v", err)
|
||||
}
|
||||
js.setupInput(datadir)
|
||||
return js
|
||||
}
|
||||
|
||||
func newJSRE(stack *node.Node, docRoot, corsDomain string, client rpc.Client, interactive bool) *jsre {
|
||||
js := &jsre{stack: stack, ps1: "> "}
|
||||
// set default cors domain used by startRpc from CLI flag
|
||||
js.corsDomain = corsDomain
|
||||
js.wait = make(chan *big.Int)
|
||||
js.client = client
|
||||
js.re = re.New(docRoot)
|
||||
if err := js.apiBindings(); err != nil {
|
||||
utils.Fatalf("Unable to connect - %v", err)
|
||||
}
|
||||
js.setupInput(stack.DataDir())
|
||||
return js
|
||||
}
|
||||
|
||||
func (self *jsre) setupInput(datadir string) {
|
||||
self.withHistory(datadir, func(hist *os.File) { utils.Stdin.ReadHistory(hist) })
|
||||
utils.Stdin.SetCtrlCAborts(true)
|
||||
utils.Stdin.SetWordCompleter(makeCompleter(self))
|
||||
utils.Stdin.SetTabCompletionStyle(liner.TabPrints)
|
||||
self.atexit = func() {
|
||||
self.withHistory(datadir, func(hist *os.File) {
|
||||
hist.Truncate(0)
|
||||
utils.Stdin.WriteHistory(hist)
|
||||
})
|
||||
utils.Stdin.Close()
|
||||
close(self.wait)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *jsre) batch(statement string) {
|
||||
err := self.re.EvalAndPrettyPrint(statement)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("%v", jsErrorString(err))
|
||||
}
|
||||
|
||||
if self.atexit != nil {
|
||||
self.atexit()
|
||||
}
|
||||
|
||||
self.re.Stop(false)
|
||||
}
|
||||
|
||||
// show summary of current geth instance
|
||||
func (self *jsre) welcome() {
|
||||
self.re.Run(`
|
||||
(function () {
|
||||
console.log('instance: ' + web3.version.node);
|
||||
console.log("coinbase: " + eth.coinbase);
|
||||
var ts = 1000 * eth.getBlock(eth.blockNumber).timestamp;
|
||||
console.log("at block: " + eth.blockNumber + " (" + new Date(ts) + ")");
|
||||
console.log(' datadir: ' + admin.datadir);
|
||||
})();
|
||||
`)
|
||||
if modules, err := self.supportedApis(); err == nil {
|
||||
loadedModules := make([]string, 0)
|
||||
for api, version := range modules {
|
||||
loadedModules = append(loadedModules, fmt.Sprintf("%s:%s", api, version))
|
||||
}
|
||||
sort.Strings(loadedModules)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *jsre) supportedApis() (map[string]string, error) {
|
||||
return self.client.SupportedModules()
|
||||
}
|
||||
|
||||
func (js *jsre) apiBindings() error {
|
||||
apis, err := js.supportedApis()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiNames := make([]string, 0, len(apis))
|
||||
for a, _ := range apis {
|
||||
apiNames = append(apiNames, a)
|
||||
}
|
||||
|
||||
jeth := utils.NewJeth(js.re, js.client)
|
||||
js.re.Set("jeth", struct{}{})
|
||||
t, _ := js.re.Get("jeth")
|
||||
jethObj := t.Object()
|
||||
|
||||
jethObj.Set("send", jeth.Send)
|
||||
jethObj.Set("sendAsync", jeth.Send)
|
||||
|
||||
err = js.re.Compile("bignumber.js", re.BigNumber_JS)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error loading bignumber.js: %v", err)
|
||||
}
|
||||
|
||||
err = js.re.Compile("web3.js", re.Web3_JS)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error loading web3.js: %v", err)
|
||||
}
|
||||
|
||||
_, err = js.re.Run("var Web3 = require('web3');")
|
||||
if err != nil {
|
||||
utils.Fatalf("Error requiring web3: %v", err)
|
||||
}
|
||||
|
||||
_, err = js.re.Run("var web3 = new Web3(jeth);")
|
||||
if err != nil {
|
||||
utils.Fatalf("Error setting web3 provider: %v", err)
|
||||
}
|
||||
|
||||
// load only supported API's in javascript runtime
|
||||
shortcuts := "var eth = web3.eth; var personal = web3.personal; "
|
||||
for _, apiName := range apiNames {
|
||||
if apiName == "web3" || apiName == "rpc" {
|
||||
continue // manually mapped or ignore
|
||||
}
|
||||
|
||||
if jsFile, ok := web3ext.Modules[apiName]; ok {
|
||||
if err = js.re.Compile(fmt.Sprintf("%s.js", apiName), jsFile); err == nil {
|
||||
shortcuts += fmt.Sprintf("var %s = web3.%s; ", apiName, apiName)
|
||||
} else {
|
||||
utils.Fatalf("Error loading %s.js: %v", apiName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = js.re.Run(shortcuts)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error setting namespaces: %v", err)
|
||||
}
|
||||
|
||||
js.re.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
|
||||
|
||||
// overrule some of the methods that require password as input and ask for it interactively
|
||||
p, err := js.re.Get("personal")
|
||||
if err != nil {
|
||||
fmt.Println("Unable to overrule sensitive methods in personal module")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Override the unlockAccount and newAccount methods on the personal object since these require user interaction.
|
||||
// Assign the jeth.unlockAccount and jeth.newAccount in the jsre the original web3 callbacks. These will be called
|
||||
// by the jeth.* methods after they got the password from the user and send the original web3 request to the backend.
|
||||
if persObj := p.Object(); persObj != nil { // make sure the personal api is enabled over the interface
|
||||
js.re.Run(`jeth.unlockAccount = personal.unlockAccount;`)
|
||||
persObj.Set("unlockAccount", jeth.UnlockAccount)
|
||||
js.re.Run(`jeth.newAccount = personal.newAccount;`)
|
||||
persObj.Set("newAccount", jeth.NewAccount)
|
||||
}
|
||||
|
||||
// The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer.
|
||||
// Bind these if the admin module is available.
|
||||
if a, err := js.re.Get("admin"); err == nil {
|
||||
if adminObj := a.Object(); adminObj != nil {
|
||||
adminObj.Set("sleepBlocks", jeth.SleepBlocks)
|
||||
adminObj.Set("sleep", jeth.Sleep)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *jsre) AskPassword() (string, bool) {
|
||||
pass, err := utils.Stdin.PasswordPrompt("Passphrase: ")
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
return pass, true
|
||||
}
|
||||
|
||||
func (self *jsre) ConfirmTransaction(tx string) bool {
|
||||
// Retrieve the Ethereum instance from the node
|
||||
var ethereum *eth.Ethereum
|
||||
if err := self.stack.Service(ðereum); err != nil {
|
||||
return false
|
||||
}
|
||||
// If natspec is enabled, ask for permission
|
||||
if ethereum.NatSpec && false /* disabled for now */ {
|
||||
// notice := natspec.GetNotice(self.xeth, tx, ethereum.HTTPClient())
|
||||
// fmt.Println(notice)
|
||||
// answer, _ := self.Prompt("Confirm Transaction [y/n]")
|
||||
// return strings.HasPrefix(strings.Trim(answer, " "), "y")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (self *jsre) UnlockAccount(addr []byte) bool {
|
||||
fmt.Printf("Please unlock account %x.\n", addr)
|
||||
pass, err := utils.Stdin.PasswordPrompt("Passphrase: ")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// TODO: allow retry
|
||||
var ethereum *eth.Ethereum
|
||||
if err := self.stack.Service(ðereum); err != nil {
|
||||
return false
|
||||
}
|
||||
a := accounts.Account{Address: common.BytesToAddress(addr)}
|
||||
if err := ethereum.AccountManager().Unlock(a, pass); err != nil {
|
||||
return false
|
||||
} else {
|
||||
fmt.Println("Account is now unlocked for this session.")
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// preloadJSFiles loads JS files that the user has specified with ctx.PreLoadJSFlag into
|
||||
// the JSRE. If not all files could be loaded it will return an error describing the error.
|
||||
func (self *jsre) preloadJSFiles(ctx *cli.Context) error {
|
||||
if ctx.GlobalString(utils.PreLoadJSFlag.Name) != "" {
|
||||
assetPath := ctx.GlobalString(utils.JSpathFlag.Name)
|
||||
jsFiles := strings.Split(ctx.GlobalString(utils.PreLoadJSFlag.Name), ",")
|
||||
for _, file := range jsFiles {
|
||||
filename := common.AbsolutePath(assetPath, strings.TrimSpace(file))
|
||||
if err := self.re.Exec(filename); err != nil {
|
||||
return fmt.Errorf("%s: %v", file, jsErrorString(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// jsErrorString adds a backtrace to errors generated by otto.
|
||||
func jsErrorString(err error) string {
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
return ottoErr.String()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func (self *jsre) interactive() {
|
||||
// Read input lines.
|
||||
prompt := make(chan string)
|
||||
inputln := make(chan string)
|
||||
go func() {
|
||||
defer close(inputln)
|
||||
for {
|
||||
line, err := utils.Stdin.Prompt(<-prompt)
|
||||
if err != nil {
|
||||
if err == liner.ErrPromptAborted { // ctrl-C
|
||||
self.resetPrompt()
|
||||
inputln <- ""
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
inputln <- line
|
||||
}
|
||||
}()
|
||||
// Wait for Ctrl-C, too.
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, os.Interrupt)
|
||||
|
||||
defer func() {
|
||||
if self.atexit != nil {
|
||||
self.atexit()
|
||||
}
|
||||
self.re.Stop(false)
|
||||
}()
|
||||
for {
|
||||
prompt <- self.ps1
|
||||
select {
|
||||
case <-sig:
|
||||
fmt.Println("caught interrupt, exiting")
|
||||
return
|
||||
case input, ok := <-inputln:
|
||||
if !ok || indentCount <= 0 && exit.MatchString(input) {
|
||||
return
|
||||
}
|
||||
if onlyws.MatchString(input) {
|
||||
continue
|
||||
}
|
||||
str += input + "\n"
|
||||
self.setIndent()
|
||||
if indentCount <= 0 {
|
||||
if mustLogInHistory(str) {
|
||||
utils.Stdin.AppendHistory(str[:len(str)-1])
|
||||
}
|
||||
self.parseInput(str)
|
||||
str = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustLogInHistory(input string) bool {
|
||||
return len(input) == 0 ||
|
||||
passwordRegexp.MatchString(input) ||
|
||||
!leadingSpace.MatchString(input)
|
||||
}
|
||||
|
||||
func (self *jsre) withHistory(datadir string, op func(*os.File)) {
|
||||
hist, err := os.OpenFile(filepath.Join(datadir, "history"), os.O_RDWR|os.O_CREATE, os.ModePerm)
|
||||
if err != nil {
|
||||
fmt.Printf("unable to open history file: %v\n", err)
|
||||
return
|
||||
}
|
||||
op(hist)
|
||||
hist.Close()
|
||||
}
|
||||
|
||||
func (self *jsre) parseInput(code string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Println("[native] error", r)
|
||||
}
|
||||
}()
|
||||
if err := self.re.EvalAndPrettyPrint(code); err != nil {
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
fmt.Println(ottoErr.String())
|
||||
} else {
|
||||
fmt.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var indentCount = 0
|
||||
var str = ""
|
||||
|
||||
func (self *jsre) resetPrompt() {
|
||||
indentCount = 0
|
||||
str = ""
|
||||
self.ps1 = "> "
|
||||
}
|
||||
|
||||
func (self *jsre) setIndent() {
|
||||
open := strings.Count(str, "{")
|
||||
open += strings.Count(str, "(")
|
||||
closed := strings.Count(str, "}")
|
||||
closed += strings.Count(str, ")")
|
||||
indentCount = open - closed
|
||||
if indentCount <= 0 {
|
||||
self.ps1 = "> "
|
||||
} else {
|
||||
self.ps1 = strings.Join(make([]string, indentCount*2), "..")
|
||||
self.ps1 += " "
|
||||
}
|
||||
}
|
@ -1,500 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/compiler"
|
||||
"github.com/ethereum/go-ethereum/common/httpclient"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
)
|
||||
|
||||
const (
|
||||
testSolcPath = ""
|
||||
solcVersion = "0.9.23"
|
||||
testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
testBalance = "10000000000000000000"
|
||||
// of empty string
|
||||
testHash = "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
)
|
||||
|
||||
var (
|
||||
versionRE = regexp.MustCompile(strconv.Quote(`"compilerVersion":"` + solcVersion + `"`))
|
||||
testNodeKey, _ = crypto.HexToECDSA("4b50fa71f5c3eeb8fdc452224b2395af2fcc3d125e06c32c82e048c0559db03f")
|
||||
testAccount, _ = crypto.HexToECDSA("e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674")
|
||||
testGenesis = `{"` + testAddress[2:] + `": {"balance": "` + testBalance + `"}}`
|
||||
)
|
||||
|
||||
type testjethre struct {
|
||||
*jsre
|
||||
lastConfirm string
|
||||
client *httpclient.HTTPClient
|
||||
}
|
||||
|
||||
// Temporary disabled while natspec hasn't been migrated
|
||||
//func (self *testjethre) ConfirmTransaction(tx string) bool {
|
||||
// var ethereum *eth.Ethereum
|
||||
// self.stack.Service(ðereum)
|
||||
//
|
||||
// if ethereum.NatSpec {
|
||||
// self.lastConfirm = natspec.GetNotice(self.xeth, tx, self.client)
|
||||
// }
|
||||
// return true
|
||||
//}
|
||||
|
||||
func testJEthRE(t *testing.T) (string, *testjethre, *node.Node) {
|
||||
return testREPL(t, nil)
|
||||
}
|
||||
|
||||
func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *node.Node) {
|
||||
tmp, err := ioutil.TempDir("", "geth-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Create a networkless protocol stack
|
||||
stack, err := node.New(&node.Config{DataDir: tmp, PrivateKey: testNodeKey, Name: "test", NoDiscovery: true})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node: %v", err)
|
||||
}
|
||||
// Initialize and register the Ethereum protocol
|
||||
accman := accounts.NewPlaintextManager(filepath.Join(tmp, "keystore"))
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{
|
||||
Address: common.HexToAddress(testAddress),
|
||||
Balance: common.String2Big(testBalance),
|
||||
})
|
||||
ethConf := ð.Config{
|
||||
ChainConfig: &core.ChainConfig{HomesteadBlock: new(big.Int)},
|
||||
TestGenesisState: db,
|
||||
AccountManager: accman,
|
||||
DocRoot: "/",
|
||||
SolcPath: testSolcPath,
|
||||
PowTest: true,
|
||||
}
|
||||
if config != nil {
|
||||
config(ethConf)
|
||||
}
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return eth.New(ctx, ethConf)
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to register ethereum protocol: %v", err)
|
||||
}
|
||||
// Initialize all the keys for testing
|
||||
a, err := accman.ImportECDSA(testAccount, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := accman.Unlock(a, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Start the node and assemble the REPL tester
|
||||
if err := stack.Start(); err != nil {
|
||||
t.Fatalf("failed to start test stack: %v", err)
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
stack.Service(ðereum)
|
||||
|
||||
assetPath := filepath.Join(os.Getenv("GOPATH"), "src", "github.com", "ethereum", "go-ethereum", "cmd", "mist", "assets", "ext")
|
||||
client, err := stack.Attach()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach to node: %v", err)
|
||||
}
|
||||
tf := &testjethre{client: ethereum.HTTPClient()}
|
||||
repl := newJSRE(stack, assetPath, "", client, false)
|
||||
tf.jsre = repl
|
||||
return tmp, tf, stack
|
||||
}
|
||||
|
||||
func TestNodeInfo(t *testing.T) {
|
||||
t.Skip("broken after p2p update")
|
||||
tmp, repl, ethereum := testJEthRE(t)
|
||||
defer ethereum.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
want := `{"DiscPort":0,"IP":"0.0.0.0","ListenAddr":"","Name":"test","NodeID":"4cb2fc32924e94277bf94b5e4c983beedb2eabd5a0bc941db32202735c6625d020ca14a5963d1738af43b6ac0a711d61b1a06de931a499fe2aa0b1a132a902b5","NodeUrl":"enode://4cb2fc32924e94277bf94b5e4c983beedb2eabd5a0bc941db32202735c6625d020ca14a5963d1738af43b6ac0a711d61b1a06de931a499fe2aa0b1a132a902b5@0.0.0.0:0","TCPPort":0,"Td":"131072"}`
|
||||
checkEvalJSON(t, repl, `admin.nodeInfo`, want)
|
||||
}
|
||||
|
||||
func TestAccounts(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
checkEvalJSON(t, repl, `eth.accounts`, `["`+testAddress+`"]`)
|
||||
checkEvalJSON(t, repl, `eth.coinbase`, `"`+testAddress+`"`)
|
||||
val, err := repl.re.Run(`jeth.newAccount("password")`)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
}
|
||||
addr := val.String()
|
||||
if !regexp.MustCompile(`0x[0-9a-f]{40}`).MatchString(addr) {
|
||||
t.Errorf("address not hex: %q", addr)
|
||||
}
|
||||
|
||||
checkEvalJSON(t, repl, `eth.accounts`, `["`+testAddress+`","`+addr+`"]`)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockChain(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
// get current block dump before export/import.
|
||||
val, err := repl.re.Run("JSON.stringify(debug.dumpBlock(eth.blockNumber))")
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
}
|
||||
beforeExport := val.String()
|
||||
|
||||
// do the export
|
||||
extmp, err := ioutil.TempDir("", "geth-test-export")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(extmp)
|
||||
tmpfile := filepath.Join(extmp, "export.chain")
|
||||
tmpfileq := strconv.Quote(tmpfile)
|
||||
|
||||
var ethereum *eth.Ethereum
|
||||
node.Service(ðereum)
|
||||
ethereum.BlockChain().Reset()
|
||||
|
||||
checkEvalJSON(t, repl, `admin.exportChain(`+tmpfileq+`)`, `true`)
|
||||
if _, err := os.Stat(tmpfile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check import, verify that dumpBlock gives the same result.
|
||||
checkEvalJSON(t, repl, `admin.importChain(`+tmpfileq+`)`, `true`)
|
||||
checkEvalJSON(t, repl, `debug.dumpBlock(eth.blockNumber)`, beforeExport)
|
||||
}
|
||||
|
||||
func TestMining(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
checkEvalJSON(t, repl, `eth.mining`, `false`)
|
||||
}
|
||||
|
||||
func TestRPC(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
checkEvalJSON(t, repl, `admin.startRPC("127.0.0.1", 5004, "*", "web3,eth,net")`, `true`)
|
||||
}
|
||||
|
||||
func TestCheckTestAccountBalance(t *testing.T) {
|
||||
t.Skip() // i don't think it tests the correct behaviour here. it's actually testing
|
||||
// internals which shouldn't be tested. This now fails because of a change in the core
|
||||
// and i have no means to fix this, sorry - @obscuren
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
repl.re.Run(`primary = "` + testAddress + `"`)
|
||||
checkEvalJSON(t, repl, `eth.getBalance(primary)`, `"`+testBalance+`"`)
|
||||
}
|
||||
|
||||
func TestSignature(t *testing.T) {
|
||||
tmp, repl, node := testJEthRE(t)
|
||||
defer node.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
val, err := repl.re.Run(`eth.sign("` + testAddress + `", "` + testHash + `")`)
|
||||
|
||||
// This is a very preliminary test, lacking actual signature verification
|
||||
if err != nil {
|
||||
t.Errorf("Error running js: %v", err)
|
||||
return
|
||||
}
|
||||
output := val.String()
|
||||
t.Logf("Output: %v", output)
|
||||
|
||||
regex := regexp.MustCompile(`^0x[0-9a-f]{130}$`)
|
||||
if !regex.MatchString(output) {
|
||||
t.Errorf("Signature is not 65 bytes represented in hexadecimal.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestContract(t *testing.T) {
|
||||
t.Skip("contract testing is implemented with mining in ethash test mode. This takes about 7seconds to run. Unskip and run on demand")
|
||||
coinbase := common.HexToAddress(testAddress)
|
||||
tmp, repl, ethereum := testREPL(t, func(conf *eth.Config) {
|
||||
conf.Etherbase = coinbase
|
||||
conf.PowTest = true
|
||||
})
|
||||
if err := ethereum.Start(); err != nil {
|
||||
t.Errorf("error starting ethereum: %v", err)
|
||||
return
|
||||
}
|
||||
defer ethereum.Stop()
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
// Temporary disabled while registrar isn't migrated
|
||||
//reg := registrar.New(repl.xeth)
|
||||
//_, err := reg.SetGlobalRegistrar("", coinbase)
|
||||
//if err != nil {
|
||||
// t.Errorf("error setting HashReg: %v", err)
|
||||
//}
|
||||
//_, err = reg.SetHashReg("", coinbase)
|
||||
//if err != nil {
|
||||
// t.Errorf("error setting HashReg: %v", err)
|
||||
//}
|
||||
//_, err = reg.SetUrlHint("", coinbase)
|
||||
//if err != nil {
|
||||
// t.Errorf("error setting HashReg: %v", err)
|
||||
//}
|
||||
/* TODO:
|
||||
* lookup receipt and contract addresses by tx hash
|
||||
* name registration for HashReg and UrlHint addresses
|
||||
* mine those transactions
|
||||
* then set once more SetHashReg SetUrlHint
|
||||
*/
|
||||
|
||||
source := `contract test {\n` +
|
||||
" /// @notice Will multiply `a` by 7." + `\n` +
|
||||
` function multiply(uint a) returns(uint d) {\n` +
|
||||
` return a * 7;\n` +
|
||||
` }\n` +
|
||||
`}\n`
|
||||
|
||||
if checkEvalJSON(t, repl, `admin.stopNatSpec()`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
contractInfo, err := ioutil.ReadFile("info_test.json")
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
if checkEvalJSON(t, repl, `primary = eth.accounts[0]`, `"`+testAddress+`"`) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `source = "`+source+`"`, `"`+source+`"`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// if solc is found with right version, test it, otherwise read from file
|
||||
sol, err := compiler.New("")
|
||||
if err != nil {
|
||||
t.Logf("solc not found: mocking contract compilation step")
|
||||
} else if sol.Version() != solcVersion {
|
||||
t.Logf("WARNING: solc different version found (%v, test written for %v, may need to update)", sol.Version(), solcVersion)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
info, err := ioutil.ReadFile("info_test.json")
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
_, err = repl.re.Run(`contract = JSON.parse(` + strconv.Quote(string(info)) + `)`)
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
} else {
|
||||
if checkEvalJSON(t, repl, `contract = eth.compile.solidity(source).test`, string(contractInfo)) != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if checkEvalJSON(t, repl, `contract.code`, `"0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if checkEvalJSON(
|
||||
t, repl,
|
||||
`contractaddress = eth.sendTransaction({from: primary, data: contract.code})`,
|
||||
`"0x46d69d55c3c4b86a924a92c9fc4720bb7bce1d74"`,
|
||||
) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !processTxs(repl, t, 8) {
|
||||
return
|
||||
}
|
||||
|
||||
callSetup := `abiDef = JSON.parse('[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}]');
|
||||
Multiply7 = eth.contract(abiDef);
|
||||
multiply7 = Multiply7.at(contractaddress);
|
||||
`
|
||||
_, err = repl.re.Run(callSetup)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error setting up contract, got %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
expNotice := ""
|
||||
if repl.lastConfirm != expNotice {
|
||||
t.Errorf("incorrect confirmation message: expected %v, got %v", expNotice, repl.lastConfirm)
|
||||
return
|
||||
}
|
||||
|
||||
if checkEvalJSON(t, repl, `admin.startNatSpec()`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary })`, `"0x4ef9088431a8033e4580d00e4eb2487275e031ff4163c7529df0ef45af17857b"`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !processTxs(repl, t, 1) {
|
||||
return
|
||||
}
|
||||
|
||||
expNotice = `About to submit transaction (no NatSpec info found for contract: content hash not found for '0x87e2802265838c7f14bb69eecd2112911af6767907a702eeaa445239fb20711b'): {"params":[{"to":"0x46d69d55c3c4b86a924a92c9fc4720bb7bce1d74","data": "0xc6888fa10000000000000000000000000000000000000000000000000000000000000006"}]}`
|
||||
if repl.lastConfirm != expNotice {
|
||||
t.Errorf("incorrect confirmation message: expected\n%v, got\n%v", expNotice, repl.lastConfirm)
|
||||
return
|
||||
}
|
||||
|
||||
var contentHash = `"0x86d2b7cf1e72e9a7a3f8d96601f0151742a2f780f1526414304fbe413dc7f9bd"`
|
||||
if sol != nil && solcVersion != sol.Version() {
|
||||
modContractInfo := versionRE.ReplaceAll(contractInfo, []byte(`"compilerVersion":"`+sol.Version()+`"`))
|
||||
fmt.Printf("modified contractinfo:\n%s\n", modContractInfo)
|
||||
contentHash = `"` + common.ToHex(crypto.Keccak256([]byte(modContractInfo))) + `"`
|
||||
}
|
||||
if checkEvalJSON(t, repl, `filename = "/tmp/info.json"`, `"/tmp/info.json"`) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `contentHash = admin.saveInfo(contract.info, filename)`, contentHash) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `admin.register(primary, contractaddress, contentHash)`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
if checkEvalJSON(t, repl, `admin.registerUrl(primary, contentHash, "file://"+filename)`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if checkEvalJSON(t, repl, `admin.startNatSpec()`, `true`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !processTxs(repl, t, 3) {
|
||||
return
|
||||
}
|
||||
|
||||
if checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary })`, `"0x66d7635c12ad0b231e66da2f987ca3dfdca58ffe49c6442aa55960858103fd0c"`) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !processTxs(repl, t, 1) {
|
||||
return
|
||||
}
|
||||
|
||||
expNotice = "Will multiply 6 by 7."
|
||||
if repl.lastConfirm != expNotice {
|
||||
t.Errorf("incorrect confirmation message: expected\n%v, got\n%v", expNotice, repl.lastConfirm)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func pendingTransactions(repl *testjethre, t *testing.T) (txc int64, err error) {
|
||||
var ethereum *eth.Ethereum
|
||||
repl.stack.Service(ðereum)
|
||||
|
||||
txs := ethereum.TxPool().GetTransactions()
|
||||
return int64(len(txs)), nil
|
||||
}
|
||||
|
||||
func processTxs(repl *testjethre, t *testing.T, expTxc int) bool {
|
||||
var txc int64
|
||||
var err error
|
||||
for i := 0; i < 50; i++ {
|
||||
txc, err = pendingTransactions(repl, t)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error checking pending transactions: %v", err)
|
||||
return false
|
||||
}
|
||||
if expTxc < int(txc) {
|
||||
t.Errorf("too many pending transactions: expected %v, got %v", expTxc, txc)
|
||||
return false
|
||||
} else if expTxc == int(txc) {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if int(txc) != expTxc {
|
||||
t.Errorf("incorrect number of pending transactions, expected %v, got %v", expTxc, txc)
|
||||
return false
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
repl.stack.Service(ðereum)
|
||||
|
||||
err = ethereum.StartMining(runtime.NumCPU(), "")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error mining: %v", err)
|
||||
return false
|
||||
}
|
||||
defer ethereum.StopMining()
|
||||
|
||||
timer := time.NewTimer(100 * time.Second)
|
||||
blockNr := ethereum.BlockChain().CurrentBlock().Number()
|
||||
height := new(big.Int).Add(blockNr, big.NewInt(1))
|
||||
repl.wait <- height
|
||||
select {
|
||||
case <-timer.C:
|
||||
// if times out make sure the xeth loop does not block
|
||||
go func() {
|
||||
select {
|
||||
case repl.wait <- nil:
|
||||
case <-repl.wait:
|
||||
}
|
||||
}()
|
||||
case <-repl.wait:
|
||||
}
|
||||
txc, err = pendingTransactions(repl, t)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error checking pending transactions: %v", err)
|
||||
return false
|
||||
}
|
||||
if txc != 0 {
|
||||
t.Errorf("%d trasactions were not mined", txc)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkEvalJSON(t *testing.T, re *testjethre, expr, want string) error {
|
||||
val, err := re.re.Run("JSON.stringify(" + expr + ")")
|
||||
if err == nil && val.String() != want {
|
||||
err = fmt.Errorf("Output mismatch for `%s`:\ngot: %s\nwant: %s", expr, val.String(), want)
|
||||
}
|
||||
if err != nil {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
file = filepath.Base(file)
|
||||
fmt.Printf("\t%s:%d: %v\n", file, line, err)
|
||||
t.Fail()
|
||||
}
|
||||
return err
|
||||
}
|
156
cmd/geth/main.go
156
cmd/geth/main.go
@ -22,7 +22,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@ -33,6 +32,7 @@ import (
|
||||
"github.com/ethereum/ethash"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
@ -47,11 +47,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
clientIdentifier = "Geth" // Client identifier to advertise over the network
|
||||
versionMajor = 1 // Major version component of the current release
|
||||
versionMinor = 5 // Minor version component of the current release
|
||||
versionPatch = 0 // Patch version component of the current release
|
||||
versionMeta = "unstable" // Version metadata to append to the version string
|
||||
clientIdentifier = "Geth" // Client identifier to advertise over the network
|
||||
versionMajor = 1 // Major version component of the current release
|
||||
versionMinor = 4 // Minor version component of the current release
|
||||
versionPatch = 6 // Patch version component of the current release
|
||||
versionMeta = "stable" // Version metadata to append to the version string
|
||||
|
||||
versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle
|
||||
)
|
||||
@ -95,6 +95,9 @@ func init() {
|
||||
monitorCommand,
|
||||
accountCommand,
|
||||
walletCommand,
|
||||
consoleCommand,
|
||||
attachCommand,
|
||||
javascriptCommand,
|
||||
{
|
||||
Action: makedag,
|
||||
Name: "makedag",
|
||||
@ -138,36 +141,6 @@ The output of this command is supposed to be machine-readable.
|
||||
The init command initialises a new genesis block and definition for the network.
|
||||
This is a destructive action and changes the network in which you will be
|
||||
participating.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: console,
|
||||
Name: "console",
|
||||
Usage: `Geth Console: interactive JavaScript environment`,
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: attach,
|
||||
Name: "attach",
|
||||
Usage: `Geth Console: interactive JavaScript environment (connect to node)`,
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.
|
||||
This command allows to open a console on a running geth node.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: execScripts,
|
||||
Name: "js",
|
||||
Usage: `executes the given JavaScript files in the Geth JavaScript VM`,
|
||||
Description: `
|
||||
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
},
|
||||
}
|
||||
@ -214,7 +187,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
utils.IPCApiFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreLoadJSFlag,
|
||||
utils.PreloadJSFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.TestNetFlag,
|
||||
@ -244,6 +217,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
|
||||
// This should be the only place where reporting is enabled
|
||||
// because it is not intended to run while testing.
|
||||
// In addition to this check, bad block reports are sent only
|
||||
// for chains with the main network genesis block and network id 1.
|
||||
eth.EnableBadBlockReporting = true
|
||||
|
||||
utils.SetupNetwork(ctx)
|
||||
|
||||
// Deprecation warning.
|
||||
@ -257,7 +236,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
logger.Flush()
|
||||
debug.Exit()
|
||||
utils.Stdin.Close() // Resets terminal mode.
|
||||
console.Stdin.Close() // Resets terminal mode.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -298,36 +277,6 @@ func geth(ctx *cli.Context) {
|
||||
node.Wait()
|
||||
}
|
||||
|
||||
// attach will connect to a running geth instance attaching a JavaScript console and to it.
|
||||
func attach(ctx *cli.Context) {
|
||||
// attach to a running geth instance
|
||||
client, err := utils.NewRemoteRPCClient(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("Unable to attach to geth: %v", err)
|
||||
}
|
||||
|
||||
repl := newLightweightJSRE(
|
||||
ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
client,
|
||||
ctx.GlobalString(utils.DataDirFlag.Name),
|
||||
true,
|
||||
)
|
||||
|
||||
// preload user defined JS files into the console
|
||||
err = repl.preloadJSFiles(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to preload JS file %v", err)
|
||||
}
|
||||
|
||||
// in case the exec flag holds a JS statement execute it and return
|
||||
if ctx.GlobalString(utils.ExecFlag.Name) != "" {
|
||||
repl.batch(ctx.GlobalString(utils.ExecFlag.Name))
|
||||
} else {
|
||||
repl.welcome()
|
||||
repl.interactive()
|
||||
}
|
||||
}
|
||||
|
||||
// initGenesis will initialise the given JSON format genesis file and writes it as
|
||||
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
||||
func initGenesis(ctx *cli.Context) {
|
||||
@ -353,77 +302,6 @@ func initGenesis(ctx *cli.Context) {
|
||||
glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash())
|
||||
}
|
||||
|
||||
// console starts a new geth node, attaching a JavaScript console to it at the
|
||||
// same time.
|
||||
func console(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
|
||||
// Attach to the newly started node, and either execute script or become interactive
|
||||
client, err := node.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
repl := newJSRE(node,
|
||||
ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
||||
client, true)
|
||||
|
||||
// preload user defined JS files into the console
|
||||
err = repl.preloadJSFiles(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// in case the exec flag holds a JS statement execute it and return
|
||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||
repl.batch(script)
|
||||
} else {
|
||||
repl.welcome()
|
||||
repl.interactive()
|
||||
}
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
// execScripts starts a new geth node based on the CLI flags, and executes each
|
||||
// of the JavaScript files specified as command arguments.
|
||||
func execScripts(ctx *cli.Context) {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Stop()
|
||||
|
||||
// Attach to the newly started node and execute the given scripts
|
||||
client, err := node.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
repl := newJSRE(node,
|
||||
ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
||||
client, false)
|
||||
|
||||
// Run all given files.
|
||||
for _, file := range ctx.Args() {
|
||||
if err = repl.re.Exec(file); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
utils.Fatalf("JavaScript Error: %v", jsErrorString(err))
|
||||
}
|
||||
// JS files loaded successfully.
|
||||
// Wait for pending callbacks, but stop for Ctrl-C.
|
||||
abort := make(chan os.Signal, 1)
|
||||
signal.Notify(abort, os.Interrupt)
|
||||
go func() {
|
||||
<-abort
|
||||
repl.re.Stop(false)
|
||||
}()
|
||||
repl.re.Stop(true)
|
||||
}
|
||||
|
||||
// startNode boots up the system node and all registered protocols, after which
|
||||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||
// miner.
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -28,6 +27,7 @@ import (
|
||||
"regexp"
|
||||
"sync"
|
||||
"testing"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -45,6 +45,7 @@ type testgeth struct {
|
||||
// template variables for expect
|
||||
Datadir string
|
||||
Executable string
|
||||
Etherbase string
|
||||
Func template.FuncMap
|
||||
|
||||
removeDatadir bool
|
||||
@ -67,11 +68,15 @@ func init() {
|
||||
func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
tt := &testgeth{T: t, Executable: os.Args[0]}
|
||||
for i, arg := range args {
|
||||
if arg == "-datadir" || arg == "--datadir" {
|
||||
switch {
|
||||
case arg == "-datadir" || arg == "--datadir":
|
||||
if i < len(args)-1 {
|
||||
tt.Datadir = args[i+1]
|
||||
}
|
||||
break
|
||||
case arg == "-etherbase" || arg == "--etherbase":
|
||||
if i < len(args)-1 {
|
||||
tt.Etherbase = args[i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
if tt.Datadir == "" {
|
||||
|
@ -101,7 +101,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.JSpathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreLoadJSFlag,
|
||||
utils.PreloadJSFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -302,7 +302,7 @@ var (
|
||||
Name: "exec",
|
||||
Usage: "Execute JavaScript statement (only in combination with console/attach)",
|
||||
}
|
||||
PreLoadJSFlag = cli.StringFlag{
|
||||
PreloadJSFlag = cli.StringFlag{
|
||||
Name: "preload",
|
||||
Usage: "Comma separated list of JavaScript files to preload into the console",
|
||||
}
|
||||
@ -864,3 +864,20 @@ func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database
|
||||
}
|
||||
return chain, chainDb
|
||||
}
|
||||
|
||||
// MakeConsolePreloads retrieves the absolute paths for the console JavaScript
|
||||
// scripts to preload before starting.
|
||||
func MakeConsolePreloads(ctx *cli.Context) []string {
|
||||
// Skip preloading if there's nothing to preload
|
||||
if ctx.GlobalString(PreloadJSFlag.Name) == "" {
|
||||
return nil
|
||||
}
|
||||
// Otherwise resolve absolute paths and return them
|
||||
preloads := []string{}
|
||||
|
||||
assets := ctx.GlobalString(JSpathFlag.Name)
|
||||
for _, file := range strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") {
|
||||
preloads = append(preloads, common.AbsolutePath(assets, strings.TrimSpace(file)))
|
||||
}
|
||||
return preloads
|
||||
}
|
||||
|
@ -1,98 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/peterh/liner"
|
||||
)
|
||||
|
||||
// Holds the stdin line reader.
|
||||
// Only this reader may be used for input because it keeps
|
||||
// an internal buffer.
|
||||
var Stdin = newUserInputReader()
|
||||
|
||||
type userInputReader struct {
|
||||
*liner.State
|
||||
warned bool
|
||||
supported bool
|
||||
normalMode liner.ModeApplier
|
||||
rawMode liner.ModeApplier
|
||||
}
|
||||
|
||||
func newUserInputReader() *userInputReader {
|
||||
r := new(userInputReader)
|
||||
// Get the original mode before calling NewLiner.
|
||||
// This is usually regular "cooked" mode where characters echo.
|
||||
normalMode, _ := liner.TerminalMode()
|
||||
// Turn on liner. It switches to raw mode.
|
||||
r.State = liner.NewLiner()
|
||||
rawMode, err := liner.TerminalMode()
|
||||
if err != nil || !liner.TerminalSupported() {
|
||||
r.supported = false
|
||||
} else {
|
||||
r.supported = true
|
||||
r.normalMode = normalMode
|
||||
r.rawMode = rawMode
|
||||
// Switch back to normal mode while we're not prompting.
|
||||
normalMode.ApplyMode()
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *userInputReader) Prompt(prompt string) (string, error) {
|
||||
if r.supported {
|
||||
r.rawMode.ApplyMode()
|
||||
defer r.normalMode.ApplyMode()
|
||||
} else {
|
||||
// liner tries to be smart about printing the prompt
|
||||
// and doesn't print anything if input is redirected.
|
||||
// Un-smart it by printing the prompt always.
|
||||
fmt.Print(prompt)
|
||||
prompt = ""
|
||||
defer fmt.Println()
|
||||
}
|
||||
return r.State.Prompt(prompt)
|
||||
}
|
||||
|
||||
func (r *userInputReader) PasswordPrompt(prompt string) (passwd string, err error) {
|
||||
if r.supported {
|
||||
r.rawMode.ApplyMode()
|
||||
defer r.normalMode.ApplyMode()
|
||||
return r.State.PasswordPrompt(prompt)
|
||||
}
|
||||
if !r.warned {
|
||||
fmt.Println("!! Unsupported terminal, password will be echoed.")
|
||||
r.warned = true
|
||||
}
|
||||
// Just as in Prompt, handle printing the prompt here instead of relying on liner.
|
||||
fmt.Print(prompt)
|
||||
passwd, err = r.State.Prompt("")
|
||||
fmt.Println()
|
||||
return passwd, err
|
||||
}
|
||||
|
||||
func (r *userInputReader) ConfirmPrompt(prompt string) (bool, error) {
|
||||
prompt = prompt + " [y/N] "
|
||||
input, err := r.Prompt(prompt)
|
||||
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
@ -1,301 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/jsre"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
type Jeth struct {
|
||||
re *jsre.JSRE
|
||||
client rpc.Client
|
||||
}
|
||||
|
||||
// NewJeth create a new backend for the JSRE console
|
||||
func NewJeth(re *jsre.JSRE, client rpc.Client) *Jeth {
|
||||
return &Jeth{re, client}
|
||||
}
|
||||
|
||||
// err returns an error object for the given error code and message.
|
||||
func (self *Jeth) err(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) {
|
||||
m := rpc.JSONErrResponse{
|
||||
Version: "2.0",
|
||||
Id: id,
|
||||
Error: rpc.JSONError{
|
||||
Code: code,
|
||||
Message: msg,
|
||||
},
|
||||
}
|
||||
|
||||
errObj, _ := json.Marshal(m.Error)
|
||||
errRes, _ := json.Marshal(m)
|
||||
|
||||
call.Otto.Run("ret_error = " + string(errObj))
|
||||
res, _ := call.Otto.Run("ret_response = " + string(errRes))
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// UnlockAccount asks the user for the password and than executes the jeth.UnlockAccount callback in the jsre.
|
||||
// It will need the public address for the account to unlock as first argument.
|
||||
// The second argument is an optional string with the password. If not given the user is prompted for the password.
|
||||
// The third argument is an optional integer which specifies for how long the account will be unlocked (in seconds).
|
||||
func (self *Jeth) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
var account, passwd otto.Value
|
||||
duration := otto.NullValue()
|
||||
|
||||
if !call.Argument(0).IsString() {
|
||||
fmt.Println("first argument must be the account to unlock")
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
account = call.Argument(0)
|
||||
|
||||
// if password is not given or as null value -> ask user for password
|
||||
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
|
||||
fmt.Printf("Unlock account %s\n", account)
|
||||
if input, err := Stdin.PasswordPrompt("Passphrase: "); err != nil {
|
||||
throwJSExeception(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
}
|
||||
} else {
|
||||
if !call.Argument(1).IsString() {
|
||||
throwJSExeception("password must be a string")
|
||||
}
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
|
||||
// third argument is the duration how long the account must be unlocked.
|
||||
// verify that its a number.
|
||||
if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() {
|
||||
if !call.Argument(2).IsNumber() {
|
||||
throwJSExeception("unlock duration must be a number")
|
||||
}
|
||||
duration = call.Argument(2)
|
||||
}
|
||||
|
||||
// jeth.unlockAccount will send the request to the backend.
|
||||
if val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration); err == nil {
|
||||
return val
|
||||
} else {
|
||||
throwJSExeception(err.Error())
|
||||
}
|
||||
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
// NewAccount asks the user for the password and than executes the jeth.newAccount callback in the jsre
|
||||
func (self *Jeth) NewAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
var passwd string
|
||||
if len(call.ArgumentList) == 0 {
|
||||
var err error
|
||||
passwd, err = Stdin.PasswordPrompt("Passphrase: ")
|
||||
if err != nil {
|
||||
return otto.FalseValue()
|
||||
}
|
||||
passwd2, err := Stdin.PasswordPrompt("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
if passwd != passwd2 {
|
||||
fmt.Println("Passphrases don't match")
|
||||
return otto.FalseValue()
|
||||
}
|
||||
} else if len(call.ArgumentList) == 1 && call.Argument(0).IsString() {
|
||||
passwd, _ = call.Argument(0).ToString()
|
||||
} else {
|
||||
fmt.Println("expected 0 or 1 string argument")
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
ret, err := call.Otto.Call("jeth.newAccount", nil, passwd)
|
||||
if err == nil {
|
||||
return ret
|
||||
}
|
||||
fmt.Println(err)
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
// Send will serialize the first argument, send it to the node and returns the response.
|
||||
func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
// verify we got a batch request (array) or a single request (object)
|
||||
ro := call.Argument(0).Object()
|
||||
if ro == nil || (ro.Class() != "Array" && ro.Class() != "Object") {
|
||||
throwJSExeception("Internal Error: request must be an object or array")
|
||||
}
|
||||
|
||||
// convert otto vm arguments to go values by JSON serialising and parsing.
|
||||
data, err := call.Otto.Call("JSON.stringify", nil, ro)
|
||||
if err != nil {
|
||||
throwJSExeception(err.Error())
|
||||
}
|
||||
|
||||
jsonreq, _ := data.ToString()
|
||||
|
||||
// parse arguments to JSON rpc requests, either to an array (batch) or to a single request.
|
||||
var reqs []rpc.JSONRequest
|
||||
batch := true
|
||||
if err = json.Unmarshal([]byte(jsonreq), &reqs); err != nil {
|
||||
// single request?
|
||||
reqs = make([]rpc.JSONRequest, 1)
|
||||
if err = json.Unmarshal([]byte(jsonreq), &reqs[0]); err != nil {
|
||||
throwJSExeception("invalid request")
|
||||
}
|
||||
batch = false
|
||||
}
|
||||
|
||||
call.Otto.Set("response_len", len(reqs))
|
||||
call.Otto.Run("var ret_response = new Array(response_len);")
|
||||
|
||||
for i, req := range reqs {
|
||||
if err := self.client.Send(&req); err != nil {
|
||||
return self.err(call, -32603, err.Error(), req.Id)
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
if err = self.client.Recv(&result); err != nil {
|
||||
return self.err(call, -32603, err.Error(), req.Id)
|
||||
}
|
||||
|
||||
id, _ := result["id"]
|
||||
jsonver, _ := result["jsonrpc"]
|
||||
|
||||
call.Otto.Set("ret_id", id)
|
||||
call.Otto.Set("ret_jsonrpc", jsonver)
|
||||
call.Otto.Set("response_idx", i)
|
||||
|
||||
// call was successful
|
||||
if res, ok := result["result"]; ok {
|
||||
payload, _ := json.Marshal(res)
|
||||
call.Otto.Set("ret_result", string(payload))
|
||||
response, err = call.Otto.Run(`
|
||||
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) };
|
||||
`)
|
||||
continue
|
||||
}
|
||||
|
||||
// request returned an error
|
||||
if res, ok := result["error"]; ok {
|
||||
payload, _ := json.Marshal(res)
|
||||
call.Otto.Set("ret_result", string(payload))
|
||||
response, err = call.Otto.Run(`
|
||||
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, error: JSON.parse(ret_result) };
|
||||
`)
|
||||
continue
|
||||
}
|
||||
|
||||
return self.err(call, -32603, fmt.Sprintf("Invalid response"), new(int64))
|
||||
}
|
||||
|
||||
if !batch {
|
||||
call.Otto.Run("ret_response = ret_response[0];")
|
||||
}
|
||||
|
||||
// if a callback was given execute it.
|
||||
if call.Argument(1).IsObject() {
|
||||
call.Otto.Set("callback", call.Argument(1))
|
||||
call.Otto.Run(`
|
||||
if (Object.prototype.toString.call(callback) == '[object Function]') {
|
||||
callback(null, ret_response);
|
||||
}
|
||||
`)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// throwJSExeception panics on an otto value, the Otto VM will then throw msg as a javascript error.
|
||||
func throwJSExeception(msg interface{}) otto.Value {
|
||||
p, _ := otto.ToValue(msg)
|
||||
panic(p)
|
||||
}
|
||||
|
||||
// Sleep will halt the console for arg[0] seconds.
|
||||
func (self *Jeth) Sleep(call otto.FunctionCall) (response otto.Value) {
|
||||
if len(call.ArgumentList) >= 1 {
|
||||
if call.Argument(0).IsNumber() {
|
||||
sleep, _ := call.Argument(0).ToInteger()
|
||||
time.Sleep(time.Duration(sleep) * time.Second)
|
||||
return otto.TrueValue()
|
||||
}
|
||||
}
|
||||
return throwJSExeception("usage: sleep(<sleep in seconds>)")
|
||||
}
|
||||
|
||||
// SleepBlocks will wait for a specified number of new blocks or max for a
|
||||
// given of seconds. sleepBlocks(nBlocks[, maxSleep]).
|
||||
func (self *Jeth) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
|
||||
nBlocks := int64(0)
|
||||
maxSleep := int64(9999999999999999) // indefinitely
|
||||
|
||||
nArgs := len(call.ArgumentList)
|
||||
|
||||
if nArgs == 0 {
|
||||
throwJSExeception("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
|
||||
}
|
||||
|
||||
if nArgs >= 1 {
|
||||
if call.Argument(0).IsNumber() {
|
||||
nBlocks, _ = call.Argument(0).ToInteger()
|
||||
} else {
|
||||
throwJSExeception("expected number as first argument")
|
||||
}
|
||||
}
|
||||
|
||||
if nArgs >= 2 {
|
||||
if call.Argument(1).IsNumber() {
|
||||
maxSleep, _ = call.Argument(1).ToInteger()
|
||||
} else {
|
||||
throwJSExeception("expected number as second argument")
|
||||
}
|
||||
}
|
||||
|
||||
// go through the console, this will allow web3 to call the appropriate
|
||||
// callbacks if a delayed response or notification is received.
|
||||
currentBlockNr := func() int64 {
|
||||
result, err := call.Otto.Run("eth.blockNumber")
|
||||
if err != nil {
|
||||
throwJSExeception(err.Error())
|
||||
}
|
||||
blockNr, err := result.ToInteger()
|
||||
if err != nil {
|
||||
throwJSExeception(err.Error())
|
||||
}
|
||||
return blockNr
|
||||
}
|
||||
|
||||
targetBlockNr := currentBlockNr() + nBlocks
|
||||
deadline := time.Now().Add(time.Duration(maxSleep) * time.Second)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
if currentBlockNr() >= targetBlockNr {
|
||||
return otto.TrueValue()
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
return otto.FalseValue()
|
||||
}
|
@ -149,7 +149,6 @@ func (sol *Solidity) Compile(source string) (map[string]*Contract, error) {
|
||||
compilerOptions := strings.Join(params, " ")
|
||||
|
||||
cmd := exec.Command(sol.solcPath, params...)
|
||||
cmd.Dir = wd
|
||||
cmd.Stdin = strings.NewReader(source)
|
||||
cmd.Stderr = stderr
|
||||
|
||||
|
317
console/bridge.go
Normal file
317
console/bridge.go
Normal file
@ -0,0 +1,317 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
// bridge is a collection of JavaScript utility methods to bride the .js runtime
|
||||
// environment and the Go RPC connection backing the remote method calls.
|
||||
type bridge struct {
|
||||
client rpc.Client // RPC client to execute Ethereum requests through
|
||||
prompter UserPrompter // Input prompter to allow interactive user feedback
|
||||
printer io.Writer // Output writer to serialize any display strings to
|
||||
}
|
||||
|
||||
// newBridge creates a new JavaScript wrapper around an RPC client.
|
||||
func newBridge(client rpc.Client, prompter UserPrompter, printer io.Writer) *bridge {
|
||||
return &bridge{
|
||||
client: client,
|
||||
prompter: prompter,
|
||||
printer: printer,
|
||||
}
|
||||
}
|
||||
|
||||
// NewAccount is a wrapper around the personal.newAccount RPC method that uses a
|
||||
// non-echoing password prompt to aquire the passphrase and executes the original
|
||||
// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.
|
||||
func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
var (
|
||||
password string
|
||||
confirm string
|
||||
err error
|
||||
)
|
||||
switch {
|
||||
// No password was specified, prompt the user for it
|
||||
case len(call.ArgumentList) == 0:
|
||||
if password, err = b.prompter.PromptPassword("Passphrase: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
if confirm, err = b.prompter.PromptPassword("Repeat passphrase: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
if password != confirm {
|
||||
throwJSException("passphrases don't match!")
|
||||
}
|
||||
|
||||
// A single string password was specified, use that
|
||||
case len(call.ArgumentList) == 1 && call.Argument(0).IsString():
|
||||
password, _ = call.Argument(0).ToString()
|
||||
|
||||
// Otherwise fail with some error
|
||||
default:
|
||||
throwJSException("expected 0 or 1 string argument")
|
||||
}
|
||||
// Password aquired, execute the call and return
|
||||
ret, err := call.Otto.Call("jeth.newAccount", nil, password)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that
|
||||
// uses a non-echoing password prompt to aquire the passphrase and executes the
|
||||
// original RPC method (saved in jeth.unlockAccount) with it to actually execute
|
||||
// the RPC call.
|
||||
func (b *bridge) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
// Make sure we have an account specified to unlock
|
||||
if !call.Argument(0).IsString() {
|
||||
throwJSException("first argument must be the account to unlock")
|
||||
}
|
||||
account := call.Argument(0)
|
||||
|
||||
// If password is not given or is the null value, prompt the user for it
|
||||
var passwd otto.Value
|
||||
|
||||
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
|
||||
fmt.Fprintf(b.printer, "Unlock account %s\n", account)
|
||||
if input, err := b.prompter.PromptPassword("Passphrase: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
}
|
||||
} else {
|
||||
if !call.Argument(1).IsString() {
|
||||
throwJSException("password must be a string")
|
||||
}
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
// Third argument is the duration how long the account must be unlocked.
|
||||
duration := otto.NullValue()
|
||||
if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() {
|
||||
if !call.Argument(2).IsNumber() {
|
||||
throwJSException("unlock duration must be a number")
|
||||
}
|
||||
duration = call.Argument(2)
|
||||
}
|
||||
// Send the request to the backend and return
|
||||
val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// Sleep will block the console for the specified number of seconds.
|
||||
func (b *bridge) Sleep(call otto.FunctionCall) (response otto.Value) {
|
||||
if call.Argument(0).IsNumber() {
|
||||
sleep, _ := call.Argument(0).ToInteger()
|
||||
time.Sleep(time.Duration(sleep) * time.Second)
|
||||
return otto.TrueValue()
|
||||
}
|
||||
return throwJSException("usage: sleep(<number of seconds>)")
|
||||
}
|
||||
|
||||
// SleepBlocks will block the console for a specified number of new blocks optionally
|
||||
// until the given timeout is reached.
|
||||
func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
|
||||
var (
|
||||
blocks = int64(0)
|
||||
sleep = int64(9999999999999999) // indefinitely
|
||||
)
|
||||
// Parse the input parameters for the sleep
|
||||
nArgs := len(call.ArgumentList)
|
||||
if nArgs == 0 {
|
||||
throwJSException("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
|
||||
}
|
||||
if nArgs >= 1 {
|
||||
if call.Argument(0).IsNumber() {
|
||||
blocks, _ = call.Argument(0).ToInteger()
|
||||
} else {
|
||||
throwJSException("expected number as first argument")
|
||||
}
|
||||
}
|
||||
if nArgs >= 2 {
|
||||
if call.Argument(1).IsNumber() {
|
||||
sleep, _ = call.Argument(1).ToInteger()
|
||||
} else {
|
||||
throwJSException("expected number as second argument")
|
||||
}
|
||||
}
|
||||
// go through the console, this will allow web3 to call the appropriate
|
||||
// callbacks if a delayed response or notification is received.
|
||||
blockNumber := func() int64 {
|
||||
result, err := call.Otto.Run("eth.blockNumber")
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
block, err := result.ToInteger()
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
return block
|
||||
}
|
||||
// Poll the current block number until either it ot a timeout is reached
|
||||
targetBlockNr := blockNumber() + blocks
|
||||
deadline := time.Now().Add(time.Duration(sleep) * time.Second)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
if blockNumber() >= targetBlockNr {
|
||||
return otto.TrueValue()
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return otto.FalseValue()
|
||||
}
|
||||
|
||||
// Send will serialize the first argument, send it to the node and returns the response.
|
||||
func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
// Ensure that we've got a batch request (array) or a single request (object)
|
||||
arg := call.Argument(0).Object()
|
||||
if arg == nil || (arg.Class() != "Array" && arg.Class() != "Object") {
|
||||
throwJSException("request must be an object or array")
|
||||
}
|
||||
// Convert the otto VM arguments to Go values
|
||||
data, err := call.Otto.Call("JSON.stringify", nil, arg)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
reqjson, err := data.ToString()
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
|
||||
var (
|
||||
reqs []rpc.JSONRequest
|
||||
batch = true
|
||||
)
|
||||
if err = json.Unmarshal([]byte(reqjson), &reqs); err != nil {
|
||||
// single request?
|
||||
reqs = make([]rpc.JSONRequest, 1)
|
||||
if err = json.Unmarshal([]byte(reqjson), &reqs[0]); err != nil {
|
||||
throwJSException("invalid request")
|
||||
}
|
||||
batch = false
|
||||
}
|
||||
// Iteratively execute the requests
|
||||
call.Otto.Set("response_len", len(reqs))
|
||||
call.Otto.Run("var ret_response = new Array(response_len);")
|
||||
|
||||
for i, req := range reqs {
|
||||
// Execute the RPC request and parse the reply
|
||||
if err = b.client.Send(&req); err != nil {
|
||||
return newErrorResponse(call, -32603, err.Error(), req.Id)
|
||||
}
|
||||
result := make(map[string]interface{})
|
||||
if err = b.client.Recv(&result); err != nil {
|
||||
return newErrorResponse(call, -32603, err.Error(), req.Id)
|
||||
}
|
||||
// Feed the reply back into the JavaScript runtime environment
|
||||
id, _ := result["id"]
|
||||
jsonver, _ := result["jsonrpc"]
|
||||
|
||||
call.Otto.Set("ret_id", id)
|
||||
call.Otto.Set("ret_jsonrpc", jsonver)
|
||||
call.Otto.Set("response_idx", i)
|
||||
|
||||
if res, ok := result["result"]; ok {
|
||||
payload, _ := json.Marshal(res)
|
||||
call.Otto.Set("ret_result", string(payload))
|
||||
response, err = call.Otto.Run(`
|
||||
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) };
|
||||
`)
|
||||
continue
|
||||
}
|
||||
if res, ok := result["error"]; ok {
|
||||
payload, _ := json.Marshal(res)
|
||||
call.Otto.Set("ret_result", string(payload))
|
||||
response, err = call.Otto.Run(`
|
||||
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, error: JSON.parse(ret_result) };
|
||||
`)
|
||||
continue
|
||||
}
|
||||
return newErrorResponse(call, -32603, fmt.Sprintf("Invalid response"), new(int64))
|
||||
}
|
||||
// Convert single requests back from batch ones
|
||||
if !batch {
|
||||
call.Otto.Run("ret_response = ret_response[0];")
|
||||
}
|
||||
// Execute any registered callbacks
|
||||
if call.Argument(1).IsObject() {
|
||||
call.Otto.Set("callback", call.Argument(1))
|
||||
call.Otto.Run(`
|
||||
if (Object.prototype.toString.call(callback) == '[object Function]') {
|
||||
callback(null, ret_response);
|
||||
}
|
||||
`)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// throwJSException panics on an otto.Value. The Otto VM will recover from the
|
||||
// Go panic and throw msg as a JavaScript error.
|
||||
func throwJSException(msg interface{}) otto.Value {
|
||||
val, err := otto.ToValue(msg)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to serialize JavaScript exception %v: %v", msg, err)
|
||||
}
|
||||
panic(val)
|
||||
}
|
||||
|
||||
// newErrorResponse creates a JSON RPC error response for a specific request id,
|
||||
// containing the specified error code and error message. Beside returning the
|
||||
// error to the caller, it also sets the ret_error and ret_response JavaScript
|
||||
// variables.
|
||||
func newErrorResponse(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) {
|
||||
// Bundle the error into a JSON RPC call response
|
||||
res := rpc.JSONErrResponse{
|
||||
Version: rpc.JSONRPCVersion,
|
||||
Id: id,
|
||||
Error: rpc.JSONError{
|
||||
Code: code,
|
||||
Message: msg,
|
||||
},
|
||||
}
|
||||
// Serialize the error response into JavaScript variables
|
||||
errObj, err := json.Marshal(res.Error)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to serialize JSON RPC error: %v", err)
|
||||
}
|
||||
resObj, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to serialize JSON RPC error response: %v", err)
|
||||
}
|
||||
|
||||
if _, err = call.Otto.Run("ret_error = " + string(errObj)); err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to set `ret_error` to the occurred error: %v", err)
|
||||
}
|
||||
resVal, err := call.Otto.Run("ret_response = " + string(resObj))
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to set `ret_response` to the JSON RPC response: %v", err)
|
||||
}
|
||||
return resVal
|
||||
}
|
371
console/console.go
Normal file
371
console/console.go
Normal file
@ -0,0 +1,371 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/peterh/liner"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
var (
|
||||
passwordRegexp = regexp.MustCompile("personal.[nus]")
|
||||
onlyWhitespace = regexp.MustCompile("^\\s*$")
|
||||
exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$")
|
||||
)
|
||||
|
||||
// HistoryFile is the file within the data directory to store input scrollback.
|
||||
const HistoryFile = "history"
|
||||
|
||||
// DefaultPrompt is the default prompt line prefix to use for user input querying.
|
||||
const DefaultPrompt = "> "
|
||||
|
||||
// Config is te collection of configurations to fine tune the behavior of the
|
||||
// JavaScript console.
|
||||
type Config struct {
|
||||
DataDir string // Data directory to store the console history at
|
||||
DocRoot string // Filesystem path from where to load JavaScript files from
|
||||
Client rpc.Client // RPC client to execute Ethereum requests through
|
||||
Prompt string // Input prompt prefix string (defaults to DefaultPrompt)
|
||||
Prompter UserPrompter // Input prompter to allow interactive user feedback (defaults to TerminalPrompter)
|
||||
Printer io.Writer // Output writer to serialize any display strings to (defaults to os.Stdout)
|
||||
Preload []string // Absolute paths to JavaScript files to preload
|
||||
}
|
||||
|
||||
// Console is a JavaScript interpreted runtime environment. It is a fully fleged
|
||||
// JavaScript console attached to a running node via an external or in-process RPC
|
||||
// client.
|
||||
type Console struct {
|
||||
client rpc.Client // RPC client to execute Ethereum requests through
|
||||
jsre *jsre.JSRE // JavaScript runtime environment running the interpreter
|
||||
prompt string // Input prompt prefix string
|
||||
prompter UserPrompter // Input prompter to allow interactive user feedback
|
||||
histPath string // Absolute path to the console scrollback history
|
||||
history []string // Scroll history maintained by the console
|
||||
printer io.Writer // Output writer to serialize any display strings to
|
||||
}
|
||||
|
||||
func New(config Config) (*Console, error) {
|
||||
// Handle unset config values gracefully
|
||||
if config.Prompter == nil {
|
||||
config.Prompter = Stdin
|
||||
}
|
||||
if config.Prompt == "" {
|
||||
config.Prompt = DefaultPrompt
|
||||
}
|
||||
if config.Printer == nil {
|
||||
config.Printer = os.Stdout
|
||||
}
|
||||
// Initialize the console and return
|
||||
console := &Console{
|
||||
client: config.Client,
|
||||
jsre: jsre.New(config.DocRoot, config.Printer),
|
||||
prompt: config.Prompt,
|
||||
prompter: config.Prompter,
|
||||
printer: config.Printer,
|
||||
histPath: filepath.Join(config.DataDir, HistoryFile),
|
||||
}
|
||||
if err := console.init(config.Preload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return console, nil
|
||||
}
|
||||
|
||||
// init retrieves the available APIs from the remote RPC provider and initializes
|
||||
// the console's JavaScript namespaces based on the exposed modules.
|
||||
func (c *Console) init(preload []string) error {
|
||||
// Initialize the JavaScript <-> Go RPC bridge
|
||||
bridge := newBridge(c.client, c.prompter, c.printer)
|
||||
c.jsre.Set("jeth", struct{}{})
|
||||
|
||||
jethObj, _ := c.jsre.Get("jeth")
|
||||
jethObj.Object().Set("send", bridge.Send)
|
||||
jethObj.Object().Set("sendAsync", bridge.Send)
|
||||
|
||||
consoleObj, _ := c.jsre.Get("console")
|
||||
consoleObj.Object().Set("log", c.consoleOutput)
|
||||
consoleObj.Object().Set("error", c.consoleOutput)
|
||||
|
||||
// Load all the internal utility JavaScript libraries
|
||||
if err := c.jsre.Compile("bignumber.js", jsre.BigNumber_JS); err != nil {
|
||||
return fmt.Errorf("bignumber.js: %v", err)
|
||||
}
|
||||
if err := c.jsre.Compile("web3.js", jsre.Web3_JS); err != nil {
|
||||
return fmt.Errorf("web3.js: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var Web3 = require('web3');"); err != nil {
|
||||
return fmt.Errorf("web3 require: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var web3 = new Web3(jeth);"); err != nil {
|
||||
return fmt.Errorf("web3 provider: %v", err)
|
||||
}
|
||||
// Load the supported APIs into the JavaScript runtime environment
|
||||
apis, err := c.client.SupportedModules()
|
||||
if err != nil {
|
||||
return fmt.Errorf("api modules: %v", err)
|
||||
}
|
||||
flatten := "var eth = web3.eth; var personal = web3.personal; "
|
||||
for api := range apis {
|
||||
if api == "web3" {
|
||||
continue // manually mapped or ignore
|
||||
}
|
||||
if file, ok := web3ext.Modules[api]; ok {
|
||||
if err = c.jsre.Compile(fmt.Sprintf("%s.js", api), file); err != nil {
|
||||
return fmt.Errorf("%s.js: %v", api, err)
|
||||
}
|
||||
flatten += fmt.Sprintf("var %s = web3.%s; ", api, api)
|
||||
}
|
||||
}
|
||||
if _, err = c.jsre.Run(flatten); err != nil {
|
||||
return fmt.Errorf("namespace flattening: %v", err)
|
||||
}
|
||||
// Initialize the global name register (disabled for now)
|
||||
//c.jsre.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
|
||||
|
||||
// If the console is in interactive mode, instrument password related methods to query the user
|
||||
if c.prompter != nil {
|
||||
// Retrieve the account management object to instrument
|
||||
personal, err := c.jsre.Get("personal")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Override the unlockAccount and newAccount methods since these require user interaction.
|
||||
// Assign the jeth.unlockAccount and jeth.newAccount in the Console the original web3 callbacks.
|
||||
// These will be called by the jeth.* methods after they got the password from the user and send
|
||||
// the original web3 request to the backend.
|
||||
if obj := personal.Object(); obj != nil { // make sure the personal api is enabled over the interface
|
||||
if _, err = c.jsre.Run(`jeth.unlockAccount = personal.unlockAccount;`); err != nil {
|
||||
return fmt.Errorf("personal.unlockAccount: %v", err)
|
||||
}
|
||||
if _, err = c.jsre.Run(`jeth.newAccount = personal.newAccount;`); err != nil {
|
||||
return fmt.Errorf("personal.newAccount: %v", err)
|
||||
}
|
||||
obj.Set("unlockAccount", bridge.UnlockAccount)
|
||||
obj.Set("newAccount", bridge.NewAccount)
|
||||
}
|
||||
}
|
||||
// The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer.
|
||||
admin, err := c.jsre.Get("admin")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if obj := admin.Object(); obj != nil { // make sure the admin api is enabled over the interface
|
||||
obj.Set("sleepBlocks", bridge.SleepBlocks)
|
||||
obj.Set("sleep", bridge.Sleep)
|
||||
}
|
||||
// Preload any JavaScript files before starting the console
|
||||
for _, path := range preload {
|
||||
if err := c.jsre.Exec(path); err != nil {
|
||||
failure := err.Error()
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
failure = ottoErr.String()
|
||||
}
|
||||
return fmt.Errorf("%s: %v", path, failure)
|
||||
}
|
||||
}
|
||||
// Configure the console's input prompter for scrollback and tab completion
|
||||
if c.prompter != nil {
|
||||
if content, err := ioutil.ReadFile(c.histPath); err != nil {
|
||||
c.prompter.SetHistory(nil)
|
||||
} else {
|
||||
c.history = strings.Split(string(content), "\n")
|
||||
c.prompter.SetHistory(c.history)
|
||||
}
|
||||
c.prompter.SetWordCompleter(c.AutoCompleteInput)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consoleOutput is an override for the console.log and console.error methods to
|
||||
// stream the output into the configured output stream instead of stdout.
|
||||
func (c *Console) consoleOutput(call otto.FunctionCall) otto.Value {
|
||||
output := []string{}
|
||||
for _, argument := range call.ArgumentList {
|
||||
output = append(output, fmt.Sprintf("%v", argument))
|
||||
}
|
||||
fmt.Fprintln(c.printer, strings.Join(output, " "))
|
||||
return otto.Value{}
|
||||
}
|
||||
|
||||
// AutoCompleteInput is a pre-assembled word completer to be used by the user
|
||||
// input prompter to provide hints to the user about the methods available.
|
||||
func (c *Console) AutoCompleteInput(line string, pos int) (string, []string, string) {
|
||||
// No completions can be provided for empty inputs
|
||||
if len(line) == 0 || pos == 0 {
|
||||
return "", nil, ""
|
||||
}
|
||||
// Chunck data to relevant part for autocompletion
|
||||
// E.g. in case of nested lines eth.getBalance(eth.coinb<tab><tab>
|
||||
start := 0
|
||||
for start = pos - 1; start > 0; start-- {
|
||||
// Skip all methods and namespaces (i.e. including te dot)
|
||||
if line[start] == '.' || (line[start] >= 'a' && line[start] <= 'z') || (line[start] >= 'A' && line[start] <= 'Z') {
|
||||
continue
|
||||
}
|
||||
// Handle web3 in a special way (i.e. other numbers aren't auto completed)
|
||||
if start >= 3 && line[start-3:start] == "web3" {
|
||||
start -= 3
|
||||
continue
|
||||
}
|
||||
// We've hit an unexpected character, autocomplete form here
|
||||
start++
|
||||
break
|
||||
}
|
||||
return line[:start], c.jsre.CompleteKeywords(line[start:pos]), line[pos:]
|
||||
}
|
||||
|
||||
// Welcome show summary of current Geth instance and some metadata about the
|
||||
// console's available modules.
|
||||
func (c *Console) Welcome() {
|
||||
// Print some generic Geth metadata
|
||||
fmt.Fprintf(c.printer, "Welcome to the Geth JavaScript console!\n\n")
|
||||
c.jsre.Run(`
|
||||
console.log("instance: " + web3.version.node);
|
||||
console.log("coinbase: " + eth.coinbase);
|
||||
console.log("at block: " + eth.blockNumber + " (" + new Date(1000 * eth.getBlock(eth.blockNumber).timestamp) + ")");
|
||||
console.log(" datadir: " + admin.datadir);
|
||||
`)
|
||||
// List all the supported modules for the user to call
|
||||
if apis, err := c.client.SupportedModules(); err == nil {
|
||||
modules := make([]string, 0, len(apis))
|
||||
for api, version := range apis {
|
||||
modules = append(modules, fmt.Sprintf("%s:%s", api, version))
|
||||
}
|
||||
sort.Strings(modules)
|
||||
fmt.Fprintln(c.printer, " modules:", strings.Join(modules, " "))
|
||||
}
|
||||
fmt.Fprintln(c.printer)
|
||||
}
|
||||
|
||||
// Evaluate executes code and pretty prints the result to the specified output
|
||||
// stream.
|
||||
func (c *Console) Evaluate(statement string) error {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Fprintf(c.printer, "[native] error: %v\n", r)
|
||||
}
|
||||
}()
|
||||
if err := c.jsre.Evaluate(statement, c.printer); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Interactive starts an interactive user session, where input is propted from
|
||||
// the configured user prompter.
|
||||
func (c *Console) Interactive() {
|
||||
var (
|
||||
prompt = c.prompt // Current prompt line (used for multi-line inputs)
|
||||
indents = 0 // Current number of input indents (used for multi-line inputs)
|
||||
input = "" // Current user input
|
||||
scheduler = make(chan string) // Channel to send the next prompt on and receive the input
|
||||
)
|
||||
// Start a goroutine to listen for promt requests and send back inputs
|
||||
go func() {
|
||||
for {
|
||||
// Read the next user input
|
||||
line, err := c.prompter.PromptInput(<-scheduler)
|
||||
if err != nil {
|
||||
// In case of an error, either clear the prompt or fail
|
||||
if err == liner.ErrPromptAborted { // ctrl-C
|
||||
prompt, indents, input = c.prompt, 0, ""
|
||||
scheduler <- ""
|
||||
continue
|
||||
}
|
||||
close(scheduler)
|
||||
return
|
||||
}
|
||||
// User input retrieved, send for interpretation and loop
|
||||
scheduler <- line
|
||||
}
|
||||
}()
|
||||
// Monitor Ctrl-C too in case the input is empty and we need to bail
|
||||
abort := make(chan os.Signal, 1)
|
||||
signal.Notify(abort, os.Interrupt)
|
||||
|
||||
// Start sending prompts to the user and reading back inputs
|
||||
for {
|
||||
// Send the next prompt, triggering an input read and process the result
|
||||
scheduler <- prompt
|
||||
select {
|
||||
case <-abort:
|
||||
// User forcefully quite the console
|
||||
fmt.Fprintln(c.printer, "caught interrupt, exiting")
|
||||
return
|
||||
|
||||
case line, ok := <-scheduler:
|
||||
// User input was returned by the prompter, handle special cases
|
||||
if !ok || (indents <= 0 && exit.MatchString(line)) {
|
||||
return
|
||||
}
|
||||
if onlyWhitespace.MatchString(line) {
|
||||
continue
|
||||
}
|
||||
// Append the line to the input and check for multi-line interpretation
|
||||
input += line + "\n"
|
||||
|
||||
indents = strings.Count(input, "{") + strings.Count(input, "(") - strings.Count(input, "}") - strings.Count(input, ")")
|
||||
if indents <= 0 {
|
||||
prompt = c.prompt
|
||||
} else {
|
||||
prompt = strings.Repeat("..", indents*2) + " "
|
||||
}
|
||||
// If all the needed lines are present, save the command and run
|
||||
if indents <= 0 {
|
||||
if len(input) > 0 && input[0] != ' ' && !passwordRegexp.MatchString(input) {
|
||||
if command := strings.TrimSpace(input); len(c.history) == 0 || command != c.history[len(c.history)-1] {
|
||||
c.history = append(c.history, command)
|
||||
if c.prompter != nil {
|
||||
c.prompter.AppendHistory(command)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.Evaluate(input)
|
||||
input = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Execute runs the JavaScript file specified as the argument.
|
||||
func (c *Console) Execute(path string) error {
|
||||
return c.jsre.Exec(path)
|
||||
}
|
||||
|
||||
// Stop cleans up the console and terminates the runtime envorinment.
|
||||
func (c *Console) Stop(graceful bool) error {
|
||||
if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Chmod(c.histPath, 0600); err != nil { // Force 0600, even if it was different previously
|
||||
return err
|
||||
}
|
||||
c.jsre.Stop(graceful)
|
||||
return nil
|
||||
}
|
296
console/console_test.go
Normal file
296
console/console_test.go
Normal file
@ -0,0 +1,296 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
)
|
||||
|
||||
const (
|
||||
testInstance = "console-tester"
|
||||
testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
)
|
||||
|
||||
// hookedPrompter implements UserPrompter to simulate use input via channels.
|
||||
type hookedPrompter struct {
|
||||
scheduler chan string
|
||||
}
|
||||
|
||||
func (p *hookedPrompter) PromptInput(prompt string) (string, error) {
|
||||
// Send the prompt to the tester
|
||||
select {
|
||||
case p.scheduler <- prompt:
|
||||
case <-time.After(time.Second):
|
||||
return "", errors.New("prompt timeout")
|
||||
}
|
||||
// Retrieve the response and feed to the console
|
||||
select {
|
||||
case input := <-p.scheduler:
|
||||
return input, nil
|
||||
case <-time.After(time.Second):
|
||||
return "", errors.New("input timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *hookedPrompter) PromptPassword(prompt string) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
func (p *hookedPrompter) PromptConfirm(prompt string) (bool, error) {
|
||||
return false, errors.New("not implemented")
|
||||
}
|
||||
func (p *hookedPrompter) SetHistory(history []string) {}
|
||||
func (p *hookedPrompter) AppendHistory(command string) {}
|
||||
func (p *hookedPrompter) SetWordCompleter(completer WordCompleter) {}
|
||||
|
||||
// tester is a console test environment for the console tests to operate on.
|
||||
type tester struct {
|
||||
workspace string
|
||||
stack *node.Node
|
||||
ethereum *eth.Ethereum
|
||||
console *Console
|
||||
input *hookedPrompter
|
||||
output *bytes.Buffer
|
||||
|
||||
lastConfirm string
|
||||
}
|
||||
|
||||
// newTester creates a test environment based on which the console can operate.
|
||||
// Please ensure you call Close() on the returned tester to avoid leaks.
|
||||
func newTester(t *testing.T, confOverride func(*eth.Config)) *tester {
|
||||
// Create a temporary storage for the node keys and initialize it
|
||||
workspace, err := ioutil.TempDir("", "console-tester-")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temporary keystore: %v", err)
|
||||
}
|
||||
accman := accounts.NewPlaintextManager(filepath.Join(workspace, "keystore"))
|
||||
|
||||
// Create a networkless protocol stack and start an Ethereum service within
|
||||
stack, err := node.New(&node.Config{DataDir: workspace, Name: testInstance, NoDiscovery: true})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node: %v", err)
|
||||
}
|
||||
ethConf := ð.Config{
|
||||
ChainConfig: &core.ChainConfig{HomesteadBlock: new(big.Int)},
|
||||
Etherbase: common.HexToAddress(testAddress),
|
||||
AccountManager: accman,
|
||||
PowTest: true,
|
||||
}
|
||||
if confOverride != nil {
|
||||
confOverride(ethConf)
|
||||
}
|
||||
if err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return eth.New(ctx, ethConf) }); err != nil {
|
||||
t.Fatalf("failed to register Ethereum protocol: %v", err)
|
||||
}
|
||||
// Start the node and assemble the JavaScript console around it
|
||||
if err = stack.Start(); err != nil {
|
||||
t.Fatalf("failed to start test stack: %v", err)
|
||||
}
|
||||
client, err := stack.Attach()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach to node: %v", err)
|
||||
}
|
||||
prompter := &hookedPrompter{scheduler: make(chan string)}
|
||||
printer := new(bytes.Buffer)
|
||||
|
||||
console, err := New(Config{
|
||||
DataDir: stack.DataDir(),
|
||||
DocRoot: "testdata",
|
||||
Client: client,
|
||||
Prompter: prompter,
|
||||
Printer: printer,
|
||||
Preload: []string{"preload.js"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create JavaScript console: %v", err)
|
||||
}
|
||||
// Create the final tester and return
|
||||
var ethereum *eth.Ethereum
|
||||
stack.Service(ðereum)
|
||||
|
||||
return &tester{
|
||||
workspace: workspace,
|
||||
stack: stack,
|
||||
ethereum: ethereum,
|
||||
console: console,
|
||||
input: prompter,
|
||||
output: printer,
|
||||
}
|
||||
}
|
||||
|
||||
// Close cleans up any temporary data folders and held resources.
|
||||
func (env *tester) Close(t *testing.T) {
|
||||
if err := env.console.Stop(false); err != nil {
|
||||
t.Errorf("failed to stop embedded console: %v", err)
|
||||
}
|
||||
if err := env.stack.Stop(); err != nil {
|
||||
t.Errorf("failed to stop embedded node: %v", err)
|
||||
}
|
||||
os.RemoveAll(env.workspace)
|
||||
}
|
||||
|
||||
// Tests that the node lists the correct welcome message, notably that it contains
|
||||
// the instance name, coinbase account, block number, data directory and supported
|
||||
// console modules.
|
||||
func TestWelcome(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Welcome()
|
||||
|
||||
output := string(tester.output.Bytes())
|
||||
if want := "Welcome"; !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing welcome message: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
if want := fmt.Sprintf("instance: %s", testInstance); !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing instance: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
if want := fmt.Sprintf("coinbase: %s", testAddress); !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
if want := "at block: 0"; !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing sync status: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
if want := fmt.Sprintf("datadir: %s", tester.workspace); !strings.Contains(output, want) {
|
||||
t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that JavaScript statement evaluation works as intended.
|
||||
func TestEvaluate(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Evaluate("2 + 2")
|
||||
if output := string(tester.output.Bytes()); !strings.Contains(output, "4") {
|
||||
t.Fatalf("statement evaluation failed: have %s, want %s", output, "4")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the console can be used in interactive mode.
|
||||
func TestInteractive(t *testing.T) {
|
||||
// Create a tester and run an interactive console in the background
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
go tester.console.Interactive()
|
||||
|
||||
// Wait for a promt and send a statement back
|
||||
select {
|
||||
case <-tester.input.scheduler:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("initial prompt timeout")
|
||||
}
|
||||
select {
|
||||
case tester.input.scheduler <- "2+2":
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("input feedback timeout")
|
||||
}
|
||||
// Wait for the second promt and ensure first statement was evaluated
|
||||
select {
|
||||
case <-tester.input.scheduler:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("secondary prompt timeout")
|
||||
}
|
||||
if output := string(tester.output.Bytes()); !strings.Contains(output, "4") {
|
||||
t.Fatalf("statement evaluation failed: have %s, want %s", output, "4")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that preloaded JavaScript files have been executed before user is given
|
||||
// input.
|
||||
func TestPreload(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Evaluate("preloaded")
|
||||
if output := string(tester.output.Bytes()); !strings.Contains(output, "some-preloaded-string") {
|
||||
t.Fatalf("preloaded variable missing: have %s, want %s", output, "some-preloaded-string")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that JavaScript scripts can be executes from the configured asset path.
|
||||
func TestExecute(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Execute("exec.js")
|
||||
|
||||
tester.console.Evaluate("execed")
|
||||
if output := string(tester.output.Bytes()); !strings.Contains(output, "some-executed-string") {
|
||||
t.Fatalf("execed variable missing: have %s, want %s", output, "some-executed-string")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the JavaScript objects returned by statement executions are properly
|
||||
// pretty printed instead of just displaing "[object]".
|
||||
func TestPrettyPrint(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
|
||||
tester.console.Evaluate("obj = {int: 1, string: 'two', list: [3, 3, 3], obj: {null: null, func: function(){}}}")
|
||||
|
||||
// Define some specially formatted fields
|
||||
var (
|
||||
one = jsre.NumberColor("1")
|
||||
two = jsre.StringColor("\"two\"")
|
||||
three = jsre.NumberColor("3")
|
||||
null = jsre.SpecialColor("null")
|
||||
fun = jsre.FunctionColor("function()")
|
||||
)
|
||||
// Assemble the actual output we're after and verify
|
||||
want := `{
|
||||
int: ` + one + `,
|
||||
list: [` + three + `, ` + three + `, ` + three + `],
|
||||
obj: {
|
||||
null: ` + null + `,
|
||||
func: ` + fun + `
|
||||
},
|
||||
string: ` + two + `
|
||||
}
|
||||
`
|
||||
if output := string(tester.output.Bytes()); output != want {
|
||||
t.Fatalf("pretty print mismatch: have %s, want %s", output, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the JavaScript exceptions are properly formatted and colored.
|
||||
func TestPrettyError(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
tester.console.Evaluate("throw 'hello'")
|
||||
|
||||
want := jsre.ErrorColor("hello") + "\n"
|
||||
if output := string(tester.output.Bytes()); output != want {
|
||||
t.Fatalf("pretty error mismatch: have %s, want %s", output, want)
|
||||
}
|
||||
}
|
165
console/prompter.go
Normal file
165
console/prompter.go
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/peterh/liner"
|
||||
)
|
||||
|
||||
// Stdin holds the stdin line reader (also using stdout for printing prompts).
|
||||
// Only this reader may be used for input because it keeps an internal buffer.
|
||||
var Stdin = newTerminalPrompter()
|
||||
|
||||
// UserPrompter defines the methods needed by the console to promt the user for
|
||||
// various types of inputs.
|
||||
type UserPrompter interface {
|
||||
// PromptInput displays the given prompt to the user and requests some textual
|
||||
// data to be entered, returning the input of the user.
|
||||
PromptInput(prompt string) (string, error)
|
||||
|
||||
// PromptPassword displays the given prompt to the user and requests some textual
|
||||
// data to be entered, but one which must not be echoed out into the terminal.
|
||||
// The method returns the input provided by the user.
|
||||
PromptPassword(prompt string) (string, error)
|
||||
|
||||
// PromptConfirm displays the given prompt to the user and requests a boolean
|
||||
// choice to be made, returning that choice.
|
||||
PromptConfirm(prompt string) (bool, error)
|
||||
|
||||
// SetHistory sets the the input scrollback history that the prompter will allow
|
||||
// the user to scoll back to.
|
||||
SetHistory(history []string)
|
||||
|
||||
// AppendHistory appends an entry to the scrollback history. It should be called
|
||||
// if and only if the prompt to append was a valid command.
|
||||
AppendHistory(command string)
|
||||
|
||||
// SetWordCompleter sets the completion function that the prompter will call to
|
||||
// fetch completion candidates when the user presses tab.
|
||||
SetWordCompleter(completer WordCompleter)
|
||||
}
|
||||
|
||||
// WordCompleter takes the currently edited line with the cursor position and
|
||||
// returns the completion candidates for the partial word to be completed. If
|
||||
// the line is "Hello, wo!!!" and the cursor is before the first '!', ("Hello,
|
||||
// wo!!!", 9) is passed to the completer which may returns ("Hello, ", {"world",
|
||||
// "Word"}, "!!!") to have "Hello, world!!!".
|
||||
type WordCompleter func(line string, pos int) (string, []string, string)
|
||||
|
||||
// terminalPrompter is a UserPrompter backed by the liner package. It supports
|
||||
// prompting the user for various input, among others for non-echoing password
|
||||
// input.
|
||||
type terminalPrompter struct {
|
||||
*liner.State
|
||||
warned bool
|
||||
supported bool
|
||||
normalMode liner.ModeApplier
|
||||
rawMode liner.ModeApplier
|
||||
}
|
||||
|
||||
// newTerminalPrompter creates a liner based user input prompter working off the
|
||||
// standard input and output streams.
|
||||
func newTerminalPrompter() *terminalPrompter {
|
||||
p := new(terminalPrompter)
|
||||
// Get the original mode before calling NewLiner.
|
||||
// This is usually regular "cooked" mode where characters echo.
|
||||
normalMode, _ := liner.TerminalMode()
|
||||
// Turn on liner. It switches to raw mode.
|
||||
p.State = liner.NewLiner()
|
||||
rawMode, err := liner.TerminalMode()
|
||||
if err != nil || !liner.TerminalSupported() {
|
||||
p.supported = false
|
||||
} else {
|
||||
p.supported = true
|
||||
p.normalMode = normalMode
|
||||
p.rawMode = rawMode
|
||||
// Switch back to normal mode while we're not prompting.
|
||||
normalMode.ApplyMode()
|
||||
}
|
||||
p.SetCtrlCAborts(true)
|
||||
p.SetTabCompletionStyle(liner.TabPrints)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// PromptInput displays the given prompt to the user and requests some textual
|
||||
// data to be entered, returning the input of the user.
|
||||
func (p *terminalPrompter) PromptInput(prompt string) (string, error) {
|
||||
if p.supported {
|
||||
p.rawMode.ApplyMode()
|
||||
defer p.normalMode.ApplyMode()
|
||||
} else {
|
||||
// liner tries to be smart about printing the prompt
|
||||
// and doesn't print anything if input is redirected.
|
||||
// Un-smart it by printing the prompt always.
|
||||
fmt.Print(prompt)
|
||||
prompt = ""
|
||||
defer fmt.Println()
|
||||
}
|
||||
return p.State.Prompt(prompt)
|
||||
}
|
||||
|
||||
// PromptPassword displays the given prompt to the user and requests some textual
|
||||
// data to be entered, but one which must not be echoed out into the terminal.
|
||||
// The method returns the input provided by the user.
|
||||
func (p *terminalPrompter) PromptPassword(prompt string) (passwd string, err error) {
|
||||
if p.supported {
|
||||
p.rawMode.ApplyMode()
|
||||
defer p.normalMode.ApplyMode()
|
||||
return p.State.PasswordPrompt(prompt)
|
||||
}
|
||||
if !p.warned {
|
||||
fmt.Println("!! Unsupported terminal, password will be echoed.")
|
||||
p.warned = true
|
||||
}
|
||||
// Just as in Prompt, handle printing the prompt here instead of relying on liner.
|
||||
fmt.Print(prompt)
|
||||
passwd, err = p.State.Prompt("")
|
||||
fmt.Println()
|
||||
return passwd, err
|
||||
}
|
||||
|
||||
// PromptConfirm displays the given prompt to the user and requests a boolean
|
||||
// choice to be made, returning that choice.
|
||||
func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {
|
||||
input, err := p.Prompt(prompt + " [y/N] ")
|
||||
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// SetHistory sets the the input scrollback history that the prompter will allow
|
||||
// the user to scoll back to.
|
||||
func (p *terminalPrompter) SetHistory(history []string) {
|
||||
p.State.ReadHistory(strings.NewReader(strings.Join(history, "\n")))
|
||||
}
|
||||
|
||||
// AppendHistory appends an entry to the scrollback history. It should be called
|
||||
// if and only if the prompt to append was a valid command.
|
||||
func (p *terminalPrompter) AppendHistory(command string) {
|
||||
p.State.AppendHistory(command)
|
||||
}
|
||||
|
||||
// SetWordCompleter sets the completion function that the prompter will call to
|
||||
// fetch completion candidates when the user presses tab.
|
||||
func (p *terminalPrompter) SetWordCompleter(completer WordCompleter) {
|
||||
p.State.SetWordCompleter(liner.WordCompleter(completer))
|
||||
}
|
1
console/testdata/exec.js
vendored
Normal file
1
console/testdata/exec.js
vendored
Normal file
@ -0,0 +1 @@
|
||||
var execed = "some-executed-string";
|
1
console/testdata/preload.js
vendored
Normal file
1
console/testdata/preload.js
vendored
Normal file
@ -0,0 +1 @@
|
||||
var preloaded = "some-preloaded-string";
|
@ -1,72 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// DisabledBadBlockReporting can be set to prevent blocks being reported.
|
||||
var DisableBadBlockReporting = true
|
||||
|
||||
// ReportBlock reports the block to the block reporting tool found at
|
||||
// badblocks.ethdev.com
|
||||
func ReportBlock(block *types.Block, err error) {
|
||||
if DisableBadBlockReporting {
|
||||
return
|
||||
}
|
||||
|
||||
const url = "https://badblocks.ethdev.com"
|
||||
|
||||
blockRlp, _ := rlp.EncodeToBytes(block)
|
||||
data := map[string]interface{}{
|
||||
"block": common.Bytes2Hex(blockRlp),
|
||||
"errortype": err.Error(),
|
||||
"hints": map[string]interface{}{
|
||||
"receipts": "NYI",
|
||||
"vmtrace": "NYI",
|
||||
},
|
||||
}
|
||||
jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "params": []interface{}{data}, "id": "1", "jsonrpc": "2.0"})
|
||||
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infoln("POST err:", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if glog.V(logger.Debug) {
|
||||
glog.Infoln("response Status:", resp.Status)
|
||||
glog.Infoln("response Headers:", resp.Header)
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
glog.Infoln("response Body:", string(body))
|
||||
}
|
||||
}
|
@ -292,7 +292,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
|
||||
// minimum difficulty can ever be (before exponential factor)
|
||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||
x = params.MinimumDifficulty
|
||||
x.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
// for the exponential factor
|
||||
@ -325,7 +325,7 @@ func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *b
|
||||
diff.Sub(parentDiff, adjust)
|
||||
}
|
||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||
diff = params.MinimumDifficulty
|
||||
diff.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||
|
@ -819,6 +819,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
tstart = time.Now()
|
||||
|
||||
nonceChecked = make([]bool, len(chain))
|
||||
statedb *state.StateDB
|
||||
)
|
||||
|
||||
// Start the parallel nonce verifier.
|
||||
@ -885,7 +886,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
|
||||
// Create a new statedb using the parent block and report an
|
||||
// error if it fails.
|
||||
statedb, err := state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb)
|
||||
if statedb == nil {
|
||||
statedb, err = state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb)
|
||||
} else {
|
||||
err = statedb.Reset(chain[i-1].Root())
|
||||
}
|
||||
if err != nil {
|
||||
reportBlock(block, err)
|
||||
return i, err
|
||||
@ -1117,15 +1122,12 @@ func (self *BlockChain) update() {
|
||||
}
|
||||
}
|
||||
|
||||
// reportBlock reports the given block and error using the canonical block
|
||||
// reporting tool. Reporting the block to the service is handled in a separate
|
||||
// goroutine.
|
||||
// reportBlock logs a bad block error.
|
||||
func reportBlock(block *types.Block, err error) {
|
||||
if glog.V(logger.Error) {
|
||||
glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex())
|
||||
glog.Errorf(" %v", err)
|
||||
}
|
||||
go ReportBlock(block, err)
|
||||
}
|
||||
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
|
@ -68,6 +68,28 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reset clears out all emphemeral state objects from the state db, but keeps
|
||||
// the underlying state trie to avoid reloading data for the next operations.
|
||||
func (self *StateDB) Reset(root common.Hash) error {
|
||||
var (
|
||||
err error
|
||||
tr = self.trie
|
||||
)
|
||||
if self.trie.Hash() != root {
|
||||
if tr, err = trie.NewSecure(root, self.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
*self = StateDB{
|
||||
db: self.db,
|
||||
trie: tr,
|
||||
stateObjects: make(map[string]*StateObject),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash]vm.Logs),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) {
|
||||
self.thash = thash
|
||||
self.bhash = bhash
|
||||
@ -127,7 +149,7 @@ func (self *StateDB) GetNonce(addr common.Address) uint64 {
|
||||
return stateObject.nonce
|
||||
}
|
||||
|
||||
return 0
|
||||
return StartingNonce
|
||||
}
|
||||
|
||||
func (self *StateDB) GetCode(addr common.Address) []byte {
|
||||
|
@ -368,6 +368,9 @@ func (self *TxPool) AddTransactions(txs []*types.Transaction) {
|
||||
// GetTransaction returns a transaction if it is contained in the pool
|
||||
// and nil otherwise.
|
||||
func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
|
||||
tp.mu.RLock()
|
||||
defer tp.mu.RUnlock()
|
||||
|
||||
// check the txs first
|
||||
if tx, ok := tp.pending[hash]; ok {
|
||||
return tx
|
||||
@ -421,12 +424,18 @@ func (self *TxPool) RemoveTransactions(txs types.Transactions) {
|
||||
self.mu.Lock()
|
||||
defer self.mu.Unlock()
|
||||
for _, tx := range txs {
|
||||
self.RemoveTx(tx.Hash())
|
||||
self.removeTx(tx.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTx removes the transaction with the given hash from the pool.
|
||||
func (pool *TxPool) RemoveTx(hash common.Hash) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
pool.removeTx(hash)
|
||||
}
|
||||
|
||||
func (pool *TxPool) removeTx(hash common.Hash) {
|
||||
// delete from pending pool
|
||||
delete(pool.pending, hash)
|
||||
// delete from queue
|
||||
|
@ -141,8 +141,10 @@ type Block struct {
|
||||
// of the chain up to and including the block.
|
||||
td *big.Int
|
||||
|
||||
// ReceivedAt is used by package eth to track block propagation time.
|
||||
ReceivedAt time.Time
|
||||
// These fields are used by package eth to track
|
||||
// inter-peer block relay.
|
||||
ReceivedAt time.Time
|
||||
ReceivedFrom interface{}
|
||||
}
|
||||
|
||||
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
|
||||
|
@ -39,12 +39,12 @@ func TestBloom(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, data := range positive {
|
||||
if !bloom.Test(new(big.Int).SetBytes([]byte(data))) {
|
||||
if !bloom.TestBytes([]byte(data)) {
|
||||
t.Error("expected", data, "to test true")
|
||||
}
|
||||
}
|
||||
for _, data := range negative {
|
||||
if bloom.Test(new(big.Int).SetBytes([]byte(data))) {
|
||||
if bloom.TestBytes([]byte(data)) {
|
||||
t.Error("did not expect", data, "to test true")
|
||||
}
|
||||
}
|
||||
|
199
eth/api.go
199
eth/api.go
@ -52,15 +52,6 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// errNoCode is returned by call and transact operations for which the requested
|
||||
// recipient contract to operate on does not exist in the state db or does not
|
||||
// have any code associated with it (i.e. suicided).
|
||||
//
|
||||
// Please note, this error string is part of the RPC API and is expected by the
|
||||
// native contract bindings to signal this particular error. Do not change this
|
||||
// as it will break all dependent code!
|
||||
var errNoCode = errors.New("no contract code at given address")
|
||||
|
||||
const defaultGas = uint64(90000)
|
||||
|
||||
// blockByNumber is a commonly used helper function which retrieves and returns
|
||||
@ -122,7 +113,7 @@ func (s *PublicEthereumAPI) GasPrice() *big.Int {
|
||||
// GetCompilers returns the collection of available smart contract compilers
|
||||
func (s *PublicEthereumAPI) GetCompilers() ([]string, error) {
|
||||
solc, err := s.e.Solc()
|
||||
if err != nil && solc != nil {
|
||||
if err == nil && solc != nil {
|
||||
return []string{"Solidity"}, nil
|
||||
}
|
||||
|
||||
@ -148,7 +139,7 @@ func (s *PublicEthereumAPI) Etherbase() (common.Address, error) {
|
||||
return s.e.Etherbase()
|
||||
}
|
||||
|
||||
// see Etherbase
|
||||
// Coinbase is the address that mining rewards will be send to (alias for Etherbase)
|
||||
func (s *PublicEthereumAPI) Coinbase() (common.Address, error) {
|
||||
return s.Etherbase()
|
||||
}
|
||||
@ -217,18 +208,17 @@ func (s *PublicMinerAPI) SubmitWork(nonce rpc.HexNumber, solution, digest common
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
func (s *PublicMinerAPI) GetWork() ([]string, error) {
|
||||
func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
|
||||
if !s.e.IsMining() {
|
||||
if err := s.e.StartMining(0, ""); err != nil {
|
||||
return nil, err
|
||||
return work, err
|
||||
}
|
||||
}
|
||||
if work, err := s.agent.GetWork(); err == nil {
|
||||
return work[:], nil
|
||||
} else {
|
||||
glog.Infof("%v\n", err)
|
||||
if work, err = s.agent.GetWork(); err == nil {
|
||||
return
|
||||
}
|
||||
return nil, fmt.Errorf("mining not ready")
|
||||
glog.V(logger.Debug).Infof("%v", err)
|
||||
return work, fmt.Errorf("mining not ready")
|
||||
}
|
||||
|
||||
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
|
||||
@ -423,14 +413,23 @@ func (s *PublicAccountAPI) Accounts() []accounts.Account {
|
||||
}
|
||||
|
||||
// PrivateAccountAPI provides an API to access accounts managed by this node.
|
||||
// It offers methods to create, (un)lock en list accounts.
|
||||
// It offers methods to create, (un)lock en list accounts. Some methods accept
|
||||
// passwords and are therefore considered private by default.
|
||||
type PrivateAccountAPI struct {
|
||||
am *accounts.Manager
|
||||
am *accounts.Manager
|
||||
txPool *core.TxPool
|
||||
txMu *sync.Mutex
|
||||
gpo *GasPriceOracle
|
||||
}
|
||||
|
||||
// NewPrivateAccountAPI create a new PrivateAccountAPI.
|
||||
func NewPrivateAccountAPI(am *accounts.Manager) *PrivateAccountAPI {
|
||||
return &PrivateAccountAPI{am}
|
||||
func NewPrivateAccountAPI(e *Ethereum) *PrivateAccountAPI {
|
||||
return &PrivateAccountAPI{
|
||||
am: e.accountManager,
|
||||
txPool: e.txPool,
|
||||
txMu: &e.txMu,
|
||||
gpo: e.gpo,
|
||||
}
|
||||
}
|
||||
|
||||
// ListAccounts will return a list of addresses for accounts this node manages.
|
||||
@ -452,6 +451,8 @@ func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error)
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
// ImportRawKey stores the given hex encoded ECDSA key into the key directory,
|
||||
// encrypting it with the passphrase.
|
||||
func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) {
|
||||
hexkey, err := hex.DecodeString(privkey)
|
||||
if err != nil {
|
||||
@ -482,6 +483,34 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool {
|
||||
return s.am.Lock(addr) == nil
|
||||
}
|
||||
|
||||
// SignAndSendTransaction will create a transaction from the given arguments and
|
||||
// tries to sign it with the key associated with args.To. If the given passwd isn't
|
||||
// able to decrypt the key it fails.
|
||||
func (s *PrivateAccountAPI) SignAndSendTransaction(args SendTxArgs, passwd string) (common.Hash, error) {
|
||||
args = prepareSendTxArgs(args, s.gpo)
|
||||
|
||||
s.txMu.Lock()
|
||||
defer s.txMu.Unlock()
|
||||
|
||||
if args.Nonce == nil {
|
||||
args.Nonce = rpc.NewHexNumber(s.txPool.State().GetNonce(args.From))
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
}
|
||||
|
||||
signature, err := s.am.SignWithPassphrase(args.From, passwd, tx.SigHash().Bytes())
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
return submitTransaction(s.txPool, tx, signature)
|
||||
}
|
||||
|
||||
// PublicBlockChainAPI provides an API to access the Ethereum blockchain.
|
||||
// It offers only methods that operate on public data that is freely available to anyone.
|
||||
type PublicBlockChainAPI struct {
|
||||
@ -645,15 +674,14 @@ func (s *PublicBlockChainAPI) NewBlocks(ctx context.Context, args NewBlocksArgs)
|
||||
// add a callback that is called on chain events which will format the block and notify the client
|
||||
s.muNewBlockSubscriptions.Lock()
|
||||
s.newBlockSubscriptions[subscription.ID()] = func(e core.ChainEvent) error {
|
||||
if notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails); err == nil {
|
||||
notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails)
|
||||
if err == nil {
|
||||
return subscription.Notify(notification)
|
||||
} else {
|
||||
glog.V(logger.Warn).Info("unable to format block %v\n", err)
|
||||
}
|
||||
glog.V(logger.Warn).Info("unable to format block %v\n", err)
|
||||
return nil
|
||||
}
|
||||
s.muNewBlockSubscriptions.Unlock()
|
||||
|
||||
return subscription, nil
|
||||
}
|
||||
|
||||
@ -700,6 +728,7 @@ func (m callmsg) Gas() *big.Int { return m.gas }
|
||||
func (m callmsg) Value() *big.Int { return m.value }
|
||||
func (m callmsg) Data() []byte { return m.data }
|
||||
|
||||
// CallArgs represents the arguments for a call.
|
||||
type CallArgs struct {
|
||||
From common.Address `json:"from"`
|
||||
To *common.Address `json:"to"`
|
||||
@ -717,12 +746,6 @@ func (s *PublicBlockChainAPI) doCall(args CallArgs, blockNr rpc.BlockNumber) (st
|
||||
}
|
||||
stateDb = stateDb.Copy()
|
||||
|
||||
// If there's no code to interact with, respond with an appropriate error
|
||||
if args.To != nil {
|
||||
if code := stateDb.GetCode(*args.To); len(code) == 0 {
|
||||
return "0x", nil, errNoCode
|
||||
}
|
||||
}
|
||||
// Retrieve the account state object to interact with
|
||||
var from *state.StateObject
|
||||
if args.From == (common.Address{}) {
|
||||
@ -911,7 +934,7 @@ type PublicTransactionPoolAPI struct {
|
||||
miner *miner.Miner
|
||||
am *accounts.Manager
|
||||
txPool *core.TxPool
|
||||
txMu sync.Mutex
|
||||
txMu *sync.Mutex
|
||||
muPendingTxSubs sync.Mutex
|
||||
pendingTxSubs map[string]rpc.Subscription
|
||||
}
|
||||
@ -925,6 +948,7 @@ func NewPublicTransactionPoolAPI(e *Ethereum) *PublicTransactionPoolAPI {
|
||||
bc: e.blockchain,
|
||||
am: e.accountManager,
|
||||
txPool: e.txPool,
|
||||
txMu: &e.txMu,
|
||||
miner: e.miner,
|
||||
pendingTxSubs: make(map[string]rpc.Subscription),
|
||||
}
|
||||
@ -1124,6 +1148,7 @@ func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transacti
|
||||
return tx.WithSignature(signature)
|
||||
}
|
||||
|
||||
// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool.
|
||||
type SendTxArgs struct {
|
||||
From common.Address `json:"from"`
|
||||
To *common.Address `json:"to"`
|
||||
@ -1134,18 +1159,47 @@ type SendTxArgs struct {
|
||||
Nonce *rpc.HexNumber `json:"nonce"`
|
||||
}
|
||||
|
||||
// SendTransaction will create a transaction for the given transaction argument, sign it and submit it to the
|
||||
// transaction pool.
|
||||
func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash, error) {
|
||||
// prepareSendTxArgs is a helper function that fills in default values for unspecified tx fields.
|
||||
func prepareSendTxArgs(args SendTxArgs, gpo *GasPriceOracle) SendTxArgs {
|
||||
if args.Gas == nil {
|
||||
args.Gas = rpc.NewHexNumber(defaultGas)
|
||||
}
|
||||
if args.GasPrice == nil {
|
||||
args.GasPrice = rpc.NewHexNumber(s.gpo.SuggestPrice())
|
||||
args.GasPrice = rpc.NewHexNumber(gpo.SuggestPrice())
|
||||
}
|
||||
if args.Value == nil {
|
||||
args.Value = rpc.NewHexNumber(0)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// submitTransaction is a helper function that submits tx to txPool and creates a log entry.
|
||||
func submitTransaction(txPool *core.TxPool, tx *types.Transaction, signature []byte) (common.Hash, error) {
|
||||
signedTx, err := tx.WithSignature(signature)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
txPool.SetLocal(signedTx)
|
||||
if err := txPool.Add(signedTx); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
if signedTx.To() == nil {
|
||||
from, _ := signedTx.From()
|
||||
addr := crypto.CreateAddress(from, signedTx.Nonce())
|
||||
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex())
|
||||
} else {
|
||||
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex())
|
||||
}
|
||||
|
||||
return signedTx.Hash(), nil
|
||||
}
|
||||
|
||||
// SendTransaction creates a transaction for the given argument, sign it and submit it to the
|
||||
// transaction pool.
|
||||
func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash, error) {
|
||||
args = prepareSendTxArgs(args, s.gpo)
|
||||
|
||||
s.txMu.Lock()
|
||||
defer s.txMu.Unlock()
|
||||
@ -1155,32 +1209,18 @@ func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
contractCreation := (args.To == nil)
|
||||
|
||||
if contractCreation {
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
}
|
||||
|
||||
signedTx, err := s.sign(args.From, tx)
|
||||
signature, err := s.am.Sign(args.From, tx.SigHash().Bytes())
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
s.txPool.SetLocal(signedTx)
|
||||
if err := s.txPool.Add(signedTx); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
if contractCreation {
|
||||
addr := crypto.CreateAddress(args.From, args.Nonce.Uint64())
|
||||
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex())
|
||||
} else {
|
||||
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex())
|
||||
}
|
||||
|
||||
return signedTx.Hash(), nil
|
||||
return submitTransaction(s.txPool, tx, signature)
|
||||
}
|
||||
|
||||
// SendRawTransaction will add the signed transaction to the transaction pool.
|
||||
@ -1217,6 +1257,7 @@ func (s *PublicTransactionPoolAPI) Sign(addr common.Address, hash common.Hash) (
|
||||
return common.ToHex(signature), error
|
||||
}
|
||||
|
||||
// SignTransactionArgs represents the arguments to sign a transaction.
|
||||
type SignTransactionArgs struct {
|
||||
From common.Address
|
||||
To *common.Address
|
||||
@ -1243,6 +1284,7 @@ type Tx struct {
|
||||
Hash common.Hash `json:"hash"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses JSON data into tx.
|
||||
func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
req := struct {
|
||||
To *common.Address `json:"to"`
|
||||
@ -1283,8 +1325,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
tx.GasPrice = rpc.NewHexNumber(int64(50000000000))
|
||||
}
|
||||
|
||||
contractCreation := (req.To == nil)
|
||||
if contractCreation {
|
||||
if req.To == nil {
|
||||
tx.tx = types.NewContractCreation(tx.Nonce.Uint64(), tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data)
|
||||
} else {
|
||||
tx.tx = types.NewTransaction(tx.Nonce.Uint64(), *tx.To, tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data)
|
||||
@ -1293,6 +1334,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignTransactionResult represents a RLP encoded signed transaction.
|
||||
type SignTransactionResult struct {
|
||||
Raw string `json:"raw"`
|
||||
Tx *Tx `json:"tx"`
|
||||
@ -1335,9 +1377,7 @@ func (s *PublicTransactionPoolAPI) SignTransaction(args SignTransactionArgs) (*S
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
contractCreation := (args.To == nil)
|
||||
|
||||
if contractCreation {
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
@ -1353,14 +1393,14 @@ func (s *PublicTransactionPoolAPI) SignTransaction(args SignTransactionArgs) (*S
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(tx)}, nil
|
||||
return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(signedTx)}, nil
|
||||
}
|
||||
|
||||
// PendingTransactions returns the transactions that are in the transaction pool and have a from address that is one of
|
||||
// the accounts this node manages.
|
||||
func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
|
||||
pending := s.txPool.GetTransactions()
|
||||
transactions := make([]*RPCTransaction, 0)
|
||||
transactions := make([]*RPCTransaction, 0, len(pending))
|
||||
for _, tx := range pending {
|
||||
from, _ := tx.FromFrontier()
|
||||
if s.am.HasAddress(from) {
|
||||
@ -1370,7 +1410,7 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
|
||||
return transactions
|
||||
}
|
||||
|
||||
// NewPendingTransaction creates a subscription that is triggered each time a transaction enters the transaction pool
|
||||
// NewPendingTransactions creates a subscription that is triggered each time a transaction enters the transaction pool
|
||||
// and is send from one of the transactions this nodes manages.
|
||||
func (s *PublicTransactionPoolAPI) NewPendingTransactions(ctx context.Context) (rpc.Subscription, error) {
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
@ -1410,8 +1450,7 @@ func (s *PublicTransactionPoolAPI) Resend(tx Tx, gasPrice, gasLimit *rpc.HexNumb
|
||||
}
|
||||
|
||||
var newTx *types.Transaction
|
||||
contractCreation := (tx.tx.To() == nil)
|
||||
if contractCreation {
|
||||
if tx.tx.To() == nil {
|
||||
newTx = types.NewContractCreation(tx.tx.Nonce(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data())
|
||||
} else {
|
||||
newTx = types.NewTransaction(tx.tx.Nonce(), *tx.tx.To(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data())
|
||||
@ -1612,7 +1651,7 @@ func (api *PrivateDebugAPI) ChaindbProperty(property string) (string, error) {
|
||||
return ldb.LDB().GetProperty(property)
|
||||
}
|
||||
|
||||
// BlockTraceResults is the returned value when replaying a block to check for
|
||||
// BlockTraceResult is the returned value when replaying a block to check for
|
||||
// consensus results and full VM trace logs for all included transactions.
|
||||
type BlockTraceResult struct {
|
||||
Validated bool `json:"validated"`
|
||||
@ -1647,7 +1686,7 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.Config) B
|
||||
return api.TraceBlock(blockRlp, config)
|
||||
}
|
||||
|
||||
// TraceProcessBlock processes the block by canonical block number.
|
||||
// TraceBlockByNumber processes the block by canonical block number.
|
||||
func (api *PrivateDebugAPI) TraceBlockByNumber(number uint64, config *vm.Config) BlockTraceResult {
|
||||
// Fetch the block that we aim to reprocess
|
||||
block := api.eth.BlockChain().GetBlockByNumber(number)
|
||||
@ -1752,15 +1791,6 @@ type structLogRes struct {
|
||||
Storage map[string]string `json:"storage"`
|
||||
}
|
||||
|
||||
// VmLoggerOptions are the options used for debugging transactions and capturing
|
||||
// specific data.
|
||||
type VmLoggerOptions struct {
|
||||
DisableMemory bool // disable memory capture
|
||||
DisableStack bool // disable stack capture
|
||||
DisableStorage bool // disable storage capture
|
||||
FullStorage bool // show full storage (slow)
|
||||
}
|
||||
|
||||
// formatLogs formats EVM returned structured logs for json output
|
||||
func formatLogs(structLogs []vm.StructLog) []structLogRes {
|
||||
formattedStructLogs := make([]structLogRes, len(structLogs))
|
||||
@ -1802,25 +1832,25 @@ func formatError(err error) string {
|
||||
|
||||
// TraceTransaction returns the structured logs created during the execution of EVM
|
||||
// and returns them as a JSON object.
|
||||
func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ExecutionResult, error) {
|
||||
func (api *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ExecutionResult, error) {
|
||||
if logger == nil {
|
||||
logger = new(vm.LogConfig)
|
||||
}
|
||||
// Retrieve the tx from the chain and the containing block
|
||||
tx, blockHash, _, txIndex := core.GetTransaction(s.eth.ChainDb(), txHash)
|
||||
tx, blockHash, _, txIndex := core.GetTransaction(api.eth.ChainDb(), txHash)
|
||||
if tx == nil {
|
||||
return nil, fmt.Errorf("transaction %x not found", txHash)
|
||||
}
|
||||
block := s.eth.BlockChain().GetBlock(blockHash)
|
||||
block := api.eth.BlockChain().GetBlock(blockHash)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("block %x not found", blockHash)
|
||||
}
|
||||
// Create the state database to mutate and eventually trace
|
||||
parent := s.eth.BlockChain().GetBlock(block.ParentHash())
|
||||
parent := api.eth.BlockChain().GetBlock(block.ParentHash())
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("block parent %x not found", block.ParentHash())
|
||||
}
|
||||
stateDb, err := state.New(parent.Root(), s.eth.ChainDb())
|
||||
stateDb, err := state.New(parent.Root(), api.eth.ChainDb())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1841,7 +1871,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
}
|
||||
// Mutate the state if we haven't reached the tracing transaction yet
|
||||
if uint64(idx) < txIndex {
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{})
|
||||
vmenv := core.NewEnv(stateDb, api.config, api.eth.BlockChain(), msg, block.Header(), vm.Config{})
|
||||
_, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mutation failed: %v", err)
|
||||
@ -1849,7 +1879,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
continue
|
||||
}
|
||||
// Otherwise trace the transaction and return
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
|
||||
vmenv := core.NewEnv(stateDb, api.config, api.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
|
||||
ret, gas, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tracing failed: %v", err)
|
||||
@ -1863,6 +1893,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
return nil, errors.New("database inconsistency")
|
||||
}
|
||||
|
||||
// TraceCall executes a call and returns the amount of gas, created logs and optionally returned values.
|
||||
func (s *PublicBlockChainAPI) TraceCall(args CallArgs, blockNr rpc.BlockNumber) (*ExecutionResult, error) {
|
||||
// Fetch the state associated with the block number
|
||||
stateDb, block, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb)
|
||||
@ -1931,12 +1962,12 @@ func (s *PublicNetAPI) Listening() bool {
|
||||
return true // always listening
|
||||
}
|
||||
|
||||
// Peercount returns the number of connected peers
|
||||
// PeerCount returns the number of connected peers
|
||||
func (s *PublicNetAPI) PeerCount() *rpc.HexNumber {
|
||||
return rpc.NewHexNumber(s.net.PeerCount())
|
||||
}
|
||||
|
||||
// ProtocolVersion returns the current ethereum protocol version.
|
||||
// Version returns the current ethereum protocol version.
|
||||
func (s *PublicNetAPI) Version() string {
|
||||
return fmt.Sprintf("%d", s.networkVersion)
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/ethash"
|
||||
@ -113,6 +114,7 @@ type Ethereum struct {
|
||||
|
||||
// Handlers
|
||||
txPool *core.TxPool
|
||||
txMu sync.Mutex
|
||||
blockchain *core.BlockChain
|
||||
accountManager *accounts.Manager
|
||||
pow *ethash.Ethash
|
||||
@ -293,7 +295,7 @@ func (s *Ethereum) APIs() []rpc.API {
|
||||
}, {
|
||||
Namespace: "personal",
|
||||
Version: "1.0",
|
||||
Service: NewPrivateAccountAPI(s.accountManager),
|
||||
Service: NewPrivateAccountAPI(s),
|
||||
Public: false,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
|
74
eth/bad_block.go
Normal file
74
eth/bad_block.go
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
// The Ethereum main network genesis block.
|
||||
defaultGenesisHash = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
||||
badBlocksURL = "https://badblocks.ethdev.com"
|
||||
)
|
||||
|
||||
var EnableBadBlockReporting = false
|
||||
|
||||
func sendBadBlockReport(block *types.Block, err error) {
|
||||
if !EnableBadBlockReporting {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
blockRLP, _ = rlp.EncodeToBytes(block)
|
||||
params = map[string]interface{}{
|
||||
"block": common.Bytes2Hex(blockRLP),
|
||||
"blockHash": block.Hash().Hex(),
|
||||
"errortype": err.Error(),
|
||||
"client": "go",
|
||||
}
|
||||
)
|
||||
if !block.ReceivedAt.IsZero() {
|
||||
params["receivedAt"] = block.ReceivedAt.UTC().String()
|
||||
}
|
||||
if p, ok := block.ReceivedFrom.(*peer); ok {
|
||||
params["receivedFrom"] = map[string]interface{}{
|
||||
"enode": fmt.Sprintf("enode://%x@%v", p.ID(), p.RemoteAddr()),
|
||||
"name": p.Name(),
|
||||
"protocolVersion": p.version,
|
||||
}
|
||||
}
|
||||
jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "id": "1", "jsonrpc": "2.0", "params": []interface{}{params}})
|
||||
client := http.Client{Timeout: 8 * time.Second}
|
||||
resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infoln(err)
|
||||
return
|
||||
}
|
||||
glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode)
|
||||
resp.Body.Close()
|
||||
}
|
18
eth/bind.go
18
eth/bind.go
@ -19,7 +19,6 @@ package eth
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -49,6 +48,17 @@ func NewContractBackend(eth *Ethereum) *ContractBackend {
|
||||
}
|
||||
}
|
||||
|
||||
// HasCode implements bind.ContractVerifier.HasCode by retrieving any code associated
|
||||
// with the contract from the local API, and checking its size.
|
||||
func (b *ContractBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
block := rpc.LatestBlockNumber
|
||||
if pending {
|
||||
block = rpc.PendingBlockNumber
|
||||
}
|
||||
out, err := b.bcapi.GetCode(contract, block)
|
||||
return len(common.FromHex(out)) > 0, err
|
||||
}
|
||||
|
||||
// ContractCall implements bind.ContractCaller executing an Ethereum contract
|
||||
// call with the specified data as the input. The pending flag requests execution
|
||||
// against the pending block, not the stable head of the chain.
|
||||
@ -64,9 +74,6 @@ func (b *ContractBackend) ContractCall(contract common.Address, data []byte, pen
|
||||
}
|
||||
// Execute the call and convert the output back to Go types
|
||||
out, err := b.bcapi.Call(args, block)
|
||||
if err == errNoCode {
|
||||
err = bind.ErrNoCode
|
||||
}
|
||||
return common.FromHex(out), err
|
||||
}
|
||||
|
||||
@ -95,9 +102,6 @@ func (b *ContractBackend) EstimateGasLimit(sender common.Address, contract *comm
|
||||
Value: *rpc.NewHexNumber(value),
|
||||
Data: common.ToHex(data),
|
||||
})
|
||||
if err == errNoCode {
|
||||
err = bind.ErrNoCode
|
||||
}
|
||||
return out.BigInt(), err
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -43,8 +43,9 @@ var (
|
||||
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
|
||||
)
|
||||
|
||||
// Reduce the block cache limit, otherwise the tests will be very heavy.
|
||||
// Reduce some of the parameters to make the tester faster.
|
||||
func init() {
|
||||
MaxForkAncestry = uint64(10000)
|
||||
blockCacheLimit = 1024
|
||||
}
|
||||
|
||||
@ -52,11 +53,15 @@ func init() {
|
||||
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
||||
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||
// reassembly.
|
||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||
// Generate the block chain
|
||||
blocks, receipts := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
|
||||
block.SetCoinbase(common.Address{seed})
|
||||
|
||||
// If a heavy chain is requested, delay blocks to raise difficulty
|
||||
if heavy {
|
||||
block.OffsetTime(-1)
|
||||
}
|
||||
// If the block number is multiple of 3, send a bonus transaction to the miner
|
||||
if parent == genesis && i%3 == 0 {
|
||||
tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
|
||||
@ -97,15 +102,19 @@ func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Recei
|
||||
|
||||
// makeChainFork creates two chains of length n, such that h1[:f] and
|
||||
// h2[:f] are different but have a common suffix of length n-f.
|
||||
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
|
||||
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
|
||||
// Create the common suffix
|
||||
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts)
|
||||
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts, false)
|
||||
|
||||
// Create the forks
|
||||
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]])
|
||||
// Create the forks, making the second heavyer if non balanced forks were requested
|
||||
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
|
||||
hashes1 = append(hashes1, hashes[1:]...)
|
||||
|
||||
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]])
|
||||
heavy := false
|
||||
if !balanced {
|
||||
heavy = true
|
||||
}
|
||||
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
|
||||
hashes2 = append(hashes2, hashes[1:]...)
|
||||
|
||||
for hash, header := range headers {
|
||||
@ -140,22 +149,25 @@ type downloadTester struct {
|
||||
peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
|
||||
peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains
|
||||
|
||||
peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newTester creates a new downloader test mocker.
|
||||
func newTester() *downloadTester {
|
||||
tester := &downloadTester{
|
||||
ownHashes: []common.Hash{genesis.Hash()},
|
||||
ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
|
||||
ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil},
|
||||
ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
|
||||
peerHashes: make(map[string][]common.Hash),
|
||||
peerHeaders: make(map[string]map[common.Hash]*types.Header),
|
||||
peerBlocks: make(map[string]map[common.Hash]*types.Block),
|
||||
peerReceipts: make(map[string]map[common.Hash]types.Receipts),
|
||||
peerChainTds: make(map[string]map[common.Hash]*big.Int),
|
||||
ownHashes: []common.Hash{genesis.Hash()},
|
||||
ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
|
||||
ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil},
|
||||
ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
|
||||
peerHashes: make(map[string][]common.Hash),
|
||||
peerHeaders: make(map[string]map[common.Hash]*types.Header),
|
||||
peerBlocks: make(map[string]map[common.Hash]*types.Block),
|
||||
peerReceipts: make(map[string]map[common.Hash]types.Receipts),
|
||||
peerChainTds: make(map[string]map[common.Hash]*big.Int),
|
||||
peerMissingStates: make(map[string]map[common.Hash]bool),
|
||||
}
|
||||
tester.stateDb, _ = ethdb.NewMemDatabase()
|
||||
tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
|
||||
@ -167,6 +179,12 @@ func newTester() *downloadTester {
|
||||
return tester
|
||||
}
|
||||
|
||||
// terminate aborts any operations on the embedded downloader and releases all
|
||||
// held resources.
|
||||
func (dl *downloadTester) terminate() {
|
||||
dl.downloader.Terminate()
|
||||
}
|
||||
|
||||
// sync starts synchronizing with a remote peer, blocking until it completes.
|
||||
func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
|
||||
dl.lock.RLock()
|
||||
@ -179,7 +197,17 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
|
||||
}
|
||||
}
|
||||
dl.lock.RUnlock()
|
||||
return dl.downloader.synchronise(id, hash, td, mode)
|
||||
|
||||
// Synchronise with the chosen peer and ensure proper cleanup afterwards
|
||||
err := dl.downloader.synchronise(id, hash, td, mode)
|
||||
select {
|
||||
case <-dl.downloader.cancelCh:
|
||||
// Ok, downloader fully cancelled after sync cycle
|
||||
default:
|
||||
// Downloader is still accepting packets, can block a peer up
|
||||
panic("downloader active post sync cycle") // panic will be caught by tester
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// hasHeader checks if a header is present in the testers canonical chain.
|
||||
@ -389,6 +417,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
|
||||
dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
|
||||
dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
|
||||
dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
|
||||
dl.peerMissingStates[id] = make(map[common.Hash]bool)
|
||||
|
||||
genesis := hashes[len(hashes)-1]
|
||||
if header := headers[genesis]; header != nil {
|
||||
@ -551,8 +580,8 @@ func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) fu
|
||||
hashes := dl.peerHashes[id]
|
||||
headers := dl.peerHeaders[id]
|
||||
result := make([]*types.Header, 0, amount)
|
||||
for i := 0; i < amount && len(hashes)-int(origin)-1-i >= 0; i++ {
|
||||
if header, ok := headers[hashes[len(hashes)-int(origin)-1-i]]; ok {
|
||||
for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
|
||||
if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
|
||||
result = append(result, header)
|
||||
}
|
||||
}
|
||||
@ -629,7 +658,9 @@ func (dl *downloadTester) peerGetNodeDataFn(id string, delay time.Duration) func
|
||||
results := make([][]byte, 0, len(hashes))
|
||||
for _, hash := range hashes {
|
||||
if data, err := testdb.Get(hash.Bytes()); err == nil {
|
||||
results = append(results, data)
|
||||
if !dl.peerMissingStates[id][hash] {
|
||||
results = append(results, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
go dl.downloader.DeliverNodeData(id, results)
|
||||
@ -712,9 +743,11 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
|
||||
// Synchronise with the peer and make sure all relevant data was retrieved
|
||||
@ -736,9 +769,11 @@ func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
|
||||
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a long block chain to download and the tester
|
||||
targetBlocks := 8 * blockCacheLimit
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
|
||||
// Wrap the importer to allow stepping
|
||||
@ -810,22 +845,24 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Tests that simple synchronization against a forked chain works correctly. In
|
||||
// this test common ancestor lookup should *not* be short circuited, and a full
|
||||
// binary search should be executed.
|
||||
func TestForkedSynchronisation61(t *testing.T) { testForkedSynchronisation(t, 61, FullSync) }
|
||||
func TestForkedSynchronisation62(t *testing.T) { testForkedSynchronisation(t, 62, FullSync) }
|
||||
func TestForkedSynchronisation63Full(t *testing.T) { testForkedSynchronisation(t, 63, FullSync) }
|
||||
func TestForkedSynchronisation63Fast(t *testing.T) { testForkedSynchronisation(t, 63, FastSync) }
|
||||
func TestForkedSynchronisation64Full(t *testing.T) { testForkedSynchronisation(t, 64, FullSync) }
|
||||
func TestForkedSynchronisation64Fast(t *testing.T) { testForkedSynchronisation(t, 64, FastSync) }
|
||||
func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(t, 64, LightSync) }
|
||||
func TestForkedSync61(t *testing.T) { testForkedSync(t, 61, FullSync) }
|
||||
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
|
||||
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
||||
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
||||
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
|
||||
func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
|
||||
func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := MaxHashFetch, 2*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
|
||||
|
||||
@ -842,6 +879,42 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
|
||||
}
|
||||
|
||||
// Tests that synchronising against a much shorter but much heavyer fork works
|
||||
// corrently and is not dropped.
|
||||
func TestHeavyForkedSync61(t *testing.T) { testHeavyForkedSync(t, 61, FullSync) }
|
||||
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := MaxHashFetch, 4*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("light", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and make sure that fork is pulled too
|
||||
if err := tester.sync("heavy", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
||||
func TestInactiveDownloader61(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -856,11 +929,85 @@ func TestInactiveDownloader61(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
||||
// long dead chains.
|
||||
func TestBoundedForkedSync61(t *testing.T) { testBoundedForkedSync(t, 61, FullSync) }
|
||||
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := 13, int(MaxForkAncestry+17)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head for short but heavy forks too. These are a bit special because they
|
||||
// take different ancestor lookup paths.
|
||||
func TestBoundedHeavyForkedSync61(t *testing.T) { testBoundedHeavyForkedSync(t, 61, FullSync) }
|
||||
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := 13, int(MaxForkAncestry+17)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming block headers and
|
||||
// bodies.
|
||||
func TestInactiveDownloader62(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Check that neither block headers nor bodies are accepted
|
||||
if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
|
||||
@ -875,7 +1022,9 @@ func TestInactiveDownloader62(t *testing.T) {
|
||||
// bodies and receipts.
|
||||
func TestInactiveDownloader63(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Check that neither block headers nor bodies are accepted
|
||||
if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
|
||||
@ -909,9 +1058,11 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
|
||||
if targetBlocks >= MaxHeaderFetch {
|
||||
targetBlocks = MaxHeaderFetch - 15
|
||||
}
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
|
||||
// Make sure canceling works with a pristine downloader
|
||||
@ -944,9 +1095,11 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create various peers with various parts of the chain
|
||||
targetPeers := 8
|
||||
targetBlocks := targetPeers*blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
for i := 0; i < targetPeers; i++ {
|
||||
id := fmt.Sprintf("peer #%d", i)
|
||||
tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts)
|
||||
@ -972,10 +1125,12 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Create peers of every type
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer 61", 61, hashes, nil, blocks, nil)
|
||||
tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
|
||||
tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
|
||||
@ -1010,9 +1165,11 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a block chain to download
|
||||
targetBlocks := 2*blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
|
||||
// Instrument the downloader to signal body requests
|
||||
@ -1063,9 +1220,10 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Attempt a full sync with an attacker feeding gapped headers
|
||||
tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
|
||||
@ -1095,9 +1253,10 @@ func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 6
|
||||
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Attempt a full sync with an attacker feeding shifted headers
|
||||
tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
|
||||
@ -1126,9 +1285,10 @@ func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(
|
||||
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := 3*fsHeaderSafetyNet + fsMinFullBlocks
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Attempt to sync with an attacker that feeds junk during the fast sync phase.
|
||||
// This should result in the last fsHeaderSafetyNet headers being rolled back.
|
||||
@ -1147,6 +1307,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
// rolled back, and also the pivot point being reverted to a non-block status.
|
||||
tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
|
||||
missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
|
||||
delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
|
||||
delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
|
||||
|
||||
if err := tester.sync("block-attack", nil, mode); err == nil {
|
||||
@ -1166,7 +1327,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
|
||||
missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
|
||||
|
||||
tester.downloader.noFast = false
|
||||
tester.downloader.fsPivotFails = 0
|
||||
tester.downloader.syncInitHook = func(uint64, uint64) {
|
||||
for i := missing; i <= len(hashes); i++ {
|
||||
delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
|
||||
@ -1185,6 +1346,8 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Errorf("fast sync pivot block #%d not rolled back", head)
|
||||
}
|
||||
}
|
||||
tester.downloader.fsPivotFails = fsCriticalTrials
|
||||
|
||||
// Synchronise with the valid peer and make sure sync succeeds. Since the last
|
||||
// rollback should also disable fast syncing for this process, verify that we
|
||||
// did a fresh full sync. Note, we can't assert anything about the receipts
|
||||
@ -1217,9 +1380,11 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil)
|
||||
defer tester.terminate()
|
||||
|
||||
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil, false)
|
||||
tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
|
||||
|
||||
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
|
||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
|
||||
}
|
||||
@ -1237,29 +1402,33 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
||||
result error
|
||||
drop bool
|
||||
}{
|
||||
{nil, false}, // Sync succeeded, all is well
|
||||
{errBusy, false}, // Sync is already in progress, no problem
|
||||
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
|
||||
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it
|
||||
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
|
||||
{errNoPeers, false}, // No peers to download from, soft race, no issue
|
||||
{errTimeout, true}, // No hashes received in due time, drop the peer
|
||||
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
|
||||
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
||||
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
||||
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
|
||||
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
|
||||
{errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{nil, false}, // Sync succeeded, all is well
|
||||
{errBusy, false}, // Sync is already in progress, no problem
|
||||
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
|
||||
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it
|
||||
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
|
||||
{errNoPeers, false}, // No peers to download from, soft race, no issue
|
||||
{errTimeout, true}, // No hashes received in due time, drop the peer
|
||||
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
|
||||
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
||||
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
||||
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
|
||||
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
|
||||
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
|
||||
{errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelHeaderProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||
}
|
||||
// Run the tests and check disconnection status
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
for i, tt := range tests {
|
||||
// Register a new peer and ensure it's presence
|
||||
id := fmt.Sprintf("test %d", i)
|
||||
@ -1294,13 +1463,15 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
progress := make(chan struct{})
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
||||
starting <- struct{}{}
|
||||
<-progress
|
||||
@ -1366,13 +1537,15 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a forked chain to simulate origin revertal
|
||||
common, fork := MaxHashFetch, 2*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
progress := make(chan struct{})
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
||||
starting <- struct{}{}
|
||||
<-progress
|
||||
@ -1441,13 +1614,15 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
progress := make(chan struct{})
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
||||
starting <- struct{}{}
|
||||
<-progress
|
||||
@ -1517,13 +1692,15 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small block chain
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
progress := make(chan struct{})
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
||||
starting <- struct{}{}
|
||||
<-progress
|
||||
@ -1590,7 +1767,7 @@ func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64,
|
||||
|
||||
func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil, false)
|
||||
fakeHeads := []*types.Header{{}, {}, {}, {}}
|
||||
for i := 0; i < 200; i++ {
|
||||
tester := newTester()
|
||||
@ -1610,7 +1787,7 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
impl := tester.peerGetAbsHeadersFn("peer", 0)
|
||||
go impl(from, count, skip, reverse)
|
||||
// None of the extra deliveries should block.
|
||||
timeout := time.After(5 * time.Second)
|
||||
timeout := time.After(15 * time.Second)
|
||||
for i := 0; i < cap(deliveriesDone); i++ {
|
||||
select {
|
||||
case <-deliveriesDone:
|
||||
@ -1623,5 +1800,48 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
if err := tester.sync("peer", nil, mode); err != nil {
|
||||
t.Errorf("sync failed: %v", err)
|
||||
}
|
||||
tester.terminate()
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that if fast sync aborts in the critical section, it can restart a few
|
||||
// times before giving up.
|
||||
func TestFastCriticalRestarts63(t *testing.T) { testFastCriticalRestarts(t, 63) }
|
||||
func TestFastCriticalRestarts64(t *testing.T) { testFastCriticalRestarts(t, 64) }
|
||||
|
||||
func testFastCriticalRestarts(t *testing.T, protocol int) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a large enough blockchin to actually fast sync on
|
||||
targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Create a tester peer with the critical section state roots missing (force failures)
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
for i := 0; i < fsPivotInterval; i++ {
|
||||
tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
|
||||
}
|
||||
// Synchronise with the peer a few times and make sure they fail until the retry limit
|
||||
for i := 0; i < fsCriticalTrials; i++ {
|
||||
// Attempt a sync and ensure it fails properly
|
||||
if err := tester.sync("peer", nil, FastSync); err == nil {
|
||||
t.Fatalf("failing fast sync succeeded: %v", err)
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond) // Make sure no in-flight requests remain
|
||||
|
||||
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
|
||||
if i == 0 {
|
||||
tester.lock.Lock()
|
||||
tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
|
||||
tester.lock.Unlock()
|
||||
}
|
||||
}
|
||||
// Retry limit exhausted, downloader will switch to full sync, should succeed
|
||||
if err := tester.sync("peer", nil, FastSync); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks in slow sync: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, targetBlocks+1)
|
||||
}
|
||||
|
@ -23,6 +23,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -31,8 +33,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
|
||||
throughputImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
|
||||
maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
|
||||
measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
|
||||
)
|
||||
|
||||
// Hash and block fetchers belonging to eth/61 and below
|
||||
@ -58,15 +60,20 @@ type peer struct {
|
||||
id string // Unique identifier of the peer
|
||||
head common.Hash // Hash of the peers latest known block
|
||||
|
||||
headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
|
||||
blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
|
||||
receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
|
||||
stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1)
|
||||
|
||||
headerThroughput float64 // Number of headers measured to be retrievable per second
|
||||
blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second
|
||||
receiptThroughput float64 // Number of receipts measured to be retrievable per second
|
||||
stateThroughput float64 // Number of node data pieces measured to be retrievable per second
|
||||
|
||||
blockStarted time.Time // Time instance when the last block (body)fetch was started
|
||||
rtt time.Duration // Request round trip time to track responsiveness (QoS)
|
||||
|
||||
headerStarted time.Time // Time instance when the last header fetch was started
|
||||
blockStarted time.Time // Time instance when the last block (body) fetch was started
|
||||
receiptStarted time.Time // Time instance when the last receipt fetch was started
|
||||
stateStarted time.Time // Time instance when the last node data fetch was started
|
||||
|
||||
@ -118,10 +125,12 @@ func (p *peer) Reset() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
atomic.StoreInt32(&p.headerIdle, 0)
|
||||
atomic.StoreInt32(&p.blockIdle, 0)
|
||||
atomic.StoreInt32(&p.receiptIdle, 0)
|
||||
atomic.StoreInt32(&p.stateIdle, 0)
|
||||
|
||||
p.headerThroughput = 0
|
||||
p.blockThroughput = 0
|
||||
p.receiptThroughput = 0
|
||||
p.stateThroughput = 0
|
||||
@ -151,6 +160,24 @@ func (p *peer) Fetch61(request *fetchRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchHeaders sends a header retrieval request to the remote peer.
|
||||
func (p *peer) FetchHeaders(from uint64, count int) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 62 {
|
||||
panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version))
|
||||
}
|
||||
// Short circuit if the peer is already fetching
|
||||
if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {
|
||||
return errAlreadyFetching
|
||||
}
|
||||
p.headerStarted = time.Now()
|
||||
|
||||
// Issue the header retrieval request (absolut upwards without gaps)
|
||||
go p.getAbsHeaders(from, count, 0, false)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchBodies sends a block body retrieval request to the remote peer.
|
||||
func (p *peer) FetchBodies(request *fetchRequest) error {
|
||||
// Sanity check the protocol version
|
||||
@ -217,6 +244,13 @@ func (p *peer) FetchNodeData(request *fetchRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
|
||||
// requests. Its estimated header retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
func (p *peer) SetHeadersIdle(delivered int) {
|
||||
p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle)
|
||||
}
|
||||
|
||||
// SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval
|
||||
// requests. Its estimated block retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
@ -260,35 +294,47 @@ func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, id
|
||||
return
|
||||
}
|
||||
// Otherwise update the throughput with a new measurement
|
||||
measured := float64(delivered) / (float64(time.Since(started)+1) / float64(time.Second)) // +1 (ns) to ensure non-zero divisor
|
||||
*throughput = (1-throughputImpact)*(*throughput) + throughputImpact*measured
|
||||
elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor
|
||||
measured := float64(delivered) / (float64(elapsed) / float64(time.Second))
|
||||
|
||||
*throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured
|
||||
p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed))
|
||||
}
|
||||
|
||||
// HeaderCapacity retrieves the peers header download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) HeaderCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch)))
|
||||
}
|
||||
|
||||
// BlockCapacity retrieves the peers block download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) BlockCapacity() int {
|
||||
func (p *peer) BlockCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return int(math.Max(1, math.Min(p.blockThroughput*float64(blockTargetRTT)/float64(time.Second), float64(MaxBlockFetch))))
|
||||
return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch)))
|
||||
}
|
||||
|
||||
// ReceiptCapacity retrieves the peers receipt download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) ReceiptCapacity() int {
|
||||
func (p *peer) ReceiptCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return int(math.Max(1, math.Min(p.receiptThroughput*float64(receiptTargetRTT)/float64(time.Second), float64(MaxReceiptFetch))))
|
||||
return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch)))
|
||||
}
|
||||
|
||||
// NodeDataCapacity retrieves the peers state download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) NodeDataCapacity() int {
|
||||
func (p *peer) NodeDataCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return int(math.Max(1, math.Min(p.stateThroughput*float64(stateTargetRTT)/float64(time.Second), float64(MaxStateFetch))))
|
||||
return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch)))
|
||||
}
|
||||
|
||||
// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
|
||||
@ -322,15 +368,17 @@ func (p *peer) String() string {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return fmt.Sprintf("Peer %s [%s]", p.id,
|
||||
fmt.Sprintf("blocks %3.2f/s, ", p.blockThroughput)+
|
||||
fmt.Sprintf("receipts %3.2f/s, ", p.receiptThroughput)+
|
||||
fmt.Sprintf("states %3.2f/s, ", p.stateThroughput)+
|
||||
fmt.Sprintf("lacking %4d", len(p.lacking)),
|
||||
)
|
||||
return fmt.Sprintf("Peer %s [%s]", p.id, strings.Join([]string{
|
||||
fmt.Sprintf("hs %3.2f/s", p.headerThroughput),
|
||||
fmt.Sprintf("bs %3.2f/s", p.blockThroughput),
|
||||
fmt.Sprintf("rs %3.2f/s", p.receiptThroughput),
|
||||
fmt.Sprintf("ss %3.2f/s", p.stateThroughput),
|
||||
fmt.Sprintf("miss %4d", len(p.lacking)),
|
||||
fmt.Sprintf("rtt %v", p.rtt),
|
||||
}, ", "))
|
||||
}
|
||||
|
||||
// peerSet represents the collection of active peer participating in the block
|
||||
// peerSet represents the collection of active peer participating in the chain
|
||||
// download procedure.
|
||||
type peerSet struct {
|
||||
peers map[string]*peer
|
||||
@ -359,9 +407,13 @@ func (ps *peerSet) Reset() {
|
||||
// peer is already known.
|
||||
//
|
||||
// The method also sets the starting throughput values of the new peer to the
|
||||
// average of all existing peers, to give it a realistic change of being used
|
||||
// average of all existing peers, to give it a realistic chance of being used
|
||||
// for data retrievals.
|
||||
func (ps *peerSet) Register(p *peer) error {
|
||||
// Retrieve the current median RTT as a sane default
|
||||
p.rtt = ps.medianRTT()
|
||||
|
||||
// Register the new peer with some meaningful defaults
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
@ -369,15 +421,17 @@ func (ps *peerSet) Register(p *peer) error {
|
||||
return errAlreadyRegistered
|
||||
}
|
||||
if len(ps.peers) > 0 {
|
||||
p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0
|
||||
p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0
|
||||
|
||||
for _, peer := range ps.peers {
|
||||
peer.lock.RLock()
|
||||
p.headerThroughput += peer.headerThroughput
|
||||
p.blockThroughput += peer.blockThroughput
|
||||
p.receiptThroughput += peer.receiptThroughput
|
||||
p.stateThroughput += peer.stateThroughput
|
||||
peer.lock.RUnlock()
|
||||
}
|
||||
p.headerThroughput /= float64(len(ps.peers))
|
||||
p.blockThroughput /= float64(len(ps.peers))
|
||||
p.receiptThroughput /= float64(len(ps.peers))
|
||||
p.stateThroughput /= float64(len(ps.peers))
|
||||
@ -441,6 +495,20 @@ func (ps *peerSet) BlockIdlePeers() ([]*peer, int) {
|
||||
return ps.idlePeers(61, 61, idle, throughput)
|
||||
}
|
||||
|
||||
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
|
||||
// within the active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
|
||||
idle := func(p *peer) bool {
|
||||
return atomic.LoadInt32(&p.headerIdle) == 0
|
||||
}
|
||||
throughput := func(p *peer) float64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
return p.headerThroughput
|
||||
}
|
||||
return ps.idlePeers(62, 64, idle, throughput)
|
||||
}
|
||||
|
||||
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
||||
// the active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) BodyIdlePeers() ([]*peer, int) {
|
||||
@ -508,3 +576,34 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer)
|
||||
}
|
||||
return idle, total
|
||||
}
|
||||
|
||||
// medianRTT returns the median RTT of te peerset, considering only the tuning
|
||||
// peers if there are more peers available.
|
||||
func (ps *peerSet) medianRTT() time.Duration {
|
||||
// Gather all the currnetly measured round trip times
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
rtts := make([]float64, 0, len(ps.peers))
|
||||
for _, p := range ps.peers {
|
||||
p.lock.RLock()
|
||||
rtts = append(rtts, float64(p.rtt))
|
||||
p.lock.RUnlock()
|
||||
}
|
||||
sort.Float64s(rtts)
|
||||
|
||||
median := rttMaxEstimate
|
||||
if qosTuningPeers <= len(rtts) {
|
||||
median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers
|
||||
} else if len(rtts) > 0 {
|
||||
median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos)
|
||||
}
|
||||
// Restrict the RTT into some QoS defaults, irrelevant of true RTT
|
||||
if median < rttMinEstimate {
|
||||
median = rttMinEstimate
|
||||
}
|
||||
if median > rttMaxEstimate {
|
||||
median = rttMaxEstimate
|
||||
}
|
||||
return median
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ import (
|
||||
|
||||
var (
|
||||
blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download
|
||||
maxInFlightStates = 4096 // Maximum number of state downloads to allow concurrently
|
||||
maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently
|
||||
)
|
||||
|
||||
var (
|
||||
@ -52,6 +52,7 @@ var (
|
||||
// fetchRequest is a currently running data retrieval operation.
|
||||
type fetchRequest struct {
|
||||
Peer *peer // Peer to which the request was sent
|
||||
From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
|
||||
Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority)
|
||||
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
|
||||
Time time.Time // Time when the request was made
|
||||
@ -79,6 +80,18 @@ type queue struct {
|
||||
|
||||
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
|
||||
|
||||
// Headers are "special", they download in batches, supported by a skeleton chain
|
||||
headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers
|
||||
headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for
|
||||
headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable
|
||||
headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
|
||||
headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches
|
||||
headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
|
||||
headerProced int // [eth/62] Number of headers already processed from the results
|
||||
headerOffset uint64 // [eth/62] Number of the first header in the result cache
|
||||
headerContCh chan bool // [eth/62] Channel to notify when header download finishes
|
||||
|
||||
// All data retrievals below are based on an already assembles header chain
|
||||
blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers
|
||||
blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for
|
||||
blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations
|
||||
@ -113,6 +126,8 @@ func newQueue(stateDb ethdb.Database) *queue {
|
||||
return &queue{
|
||||
hashPool: make(map[common.Hash]int),
|
||||
hashQueue: prque.New(),
|
||||
headerPendPool: make(map[string]*fetchRequest),
|
||||
headerContCh: make(chan bool),
|
||||
blockTaskPool: make(map[common.Hash]*types.Header),
|
||||
blockTaskQueue: prque.New(),
|
||||
blockPendPool: make(map[string]*fetchRequest),
|
||||
@ -149,6 +164,8 @@ func (q *queue) Reset() {
|
||||
|
||||
q.headerHead = common.Hash{}
|
||||
|
||||
q.headerPendPool = make(map[string]*fetchRequest)
|
||||
|
||||
q.blockTaskPool = make(map[common.Hash]*types.Header)
|
||||
q.blockTaskQueue.Reset()
|
||||
q.blockPendPool = make(map[string]*fetchRequest)
|
||||
@ -178,6 +195,14 @@ func (q *queue) Close() {
|
||||
q.active.Broadcast()
|
||||
}
|
||||
|
||||
// PendingHeaders retrieves the number of header requests pending for retrieval.
|
||||
func (q *queue) PendingHeaders() int {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.headerTaskQueue.Size()
|
||||
}
|
||||
|
||||
// PendingBlocks retrieves the number of block (body) requests pending for retrieval.
|
||||
func (q *queue) PendingBlocks() int {
|
||||
q.lock.Lock()
|
||||
@ -205,6 +230,15 @@ func (q *queue) PendingNodeData() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// InFlightHeaders retrieves whether there are header fetch requests currently
|
||||
// in flight.
|
||||
func (q *queue) InFlightHeaders() bool {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return len(q.headerPendPool) > 0
|
||||
}
|
||||
|
||||
// InFlightBlocks retrieves whether there are block fetch requests currently in
|
||||
// flight.
|
||||
func (q *queue) InFlightBlocks() bool {
|
||||
@ -317,6 +351,45 @@ func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash {
|
||||
return inserts
|
||||
}
|
||||
|
||||
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
|
||||
// up an already retrieved header skeleton.
|
||||
func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
|
||||
if q.headerResults != nil {
|
||||
panic("skeleton assembly already in progress")
|
||||
}
|
||||
// Shedule all the header retrieval tasks for the skeleton assembly
|
||||
q.headerTaskPool = make(map[uint64]*types.Header)
|
||||
q.headerTaskQueue = prque.New()
|
||||
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
||||
q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
|
||||
q.headerProced = 0
|
||||
q.headerOffset = from
|
||||
q.headerContCh = make(chan bool, 1)
|
||||
|
||||
for i, header := range skeleton {
|
||||
index := from + uint64(i*MaxHeaderFetch)
|
||||
|
||||
q.headerTaskPool[index] = header
|
||||
q.headerTaskQueue.Push(index, -float32(index))
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveHeaders retrieves the header chain assemble based on the scheduled
|
||||
// skeleton.
|
||||
func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
headers, proced := q.headerResults, q.headerProced
|
||||
q.headerResults, q.headerProced = nil, 0
|
||||
|
||||
return headers, proced
|
||||
}
|
||||
|
||||
// Schedule adds a set of headers for the download queue for scheduling, returning
|
||||
// the new headers encountered.
|
||||
func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
||||
@ -437,6 +510,46 @@ func (q *queue) countProcessableItems() int {
|
||||
return len(q.resultCache)
|
||||
}
|
||||
|
||||
// ReserveHeaders reserves a set of headers for the given peer, skipping any
|
||||
// previously failed batches.
|
||||
func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Short circuit if the peer's already downloading something (sanity check to
|
||||
// not corrupt state)
|
||||
if _, ok := q.headerPendPool[p.id]; ok {
|
||||
return nil
|
||||
}
|
||||
// Retrieve a batch of hashes, skipping previously failed ones
|
||||
send, skip := uint64(0), []uint64{}
|
||||
for send == 0 && !q.headerTaskQueue.Empty() {
|
||||
from, _ := q.headerTaskQueue.Pop()
|
||||
if q.headerPeerMiss[p.id] != nil {
|
||||
if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
|
||||
skip = append(skip, from.(uint64))
|
||||
continue
|
||||
}
|
||||
}
|
||||
send = from.(uint64)
|
||||
}
|
||||
// Merge all the skipped batches back
|
||||
for _, from := range skip {
|
||||
q.headerTaskQueue.Push(from, -float32(from))
|
||||
}
|
||||
// Assemble and return the block download request
|
||||
if send == 0 {
|
||||
return nil
|
||||
}
|
||||
request := &fetchRequest{
|
||||
Peer: p,
|
||||
From: send,
|
||||
Time: time.Now(),
|
||||
}
|
||||
q.headerPendPool[p.id] = request
|
||||
return request
|
||||
}
|
||||
|
||||
// ReserveBlocks reserves a set of block hashes for the given peer, skipping any
|
||||
// previously failed download.
|
||||
func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest {
|
||||
@ -635,6 +748,11 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ
|
||||
return request, progress, nil
|
||||
}
|
||||
|
||||
// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
|
||||
func (q *queue) CancelHeaders(request *fetchRequest) {
|
||||
q.cancel(request, q.headerTaskQueue, q.headerPendPool)
|
||||
}
|
||||
|
||||
// CancelBlocks aborts a fetch request, returning all pending hashes to the queue.
|
||||
func (q *queue) CancelBlocks(request *fetchRequest) {
|
||||
q.cancel(request, q.hashQueue, q.blockPendPool)
|
||||
@ -663,6 +781,9 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
if request.From > 0 {
|
||||
taskQueue.Push(request.From, -float32(request.From))
|
||||
}
|
||||
for hash, index := range request.Hashes {
|
||||
taskQueue.Push(hash, float32(index))
|
||||
}
|
||||
@ -702,6 +823,15 @@ func (q *queue) Revoke(peerId string) {
|
||||
}
|
||||
}
|
||||
|
||||
// ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
|
||||
// canceling them and returning the responsible peers for penalisation.
|
||||
func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
|
||||
}
|
||||
|
||||
// ExpireBlocks checks for in flight requests that exceeded a timeout allowance,
|
||||
// canceling them and returning the responsible peers for penalisation.
|
||||
func (q *queue) ExpireBlocks(timeout time.Duration) map[string]int {
|
||||
@ -753,6 +883,9 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
|
||||
timeoutMeter.Mark(1)
|
||||
|
||||
// Return any non satisfied requests to the pool
|
||||
if request.From > 0 {
|
||||
taskQueue.Push(request.From, -float32(request.From))
|
||||
}
|
||||
for hash, index := range request.Hashes {
|
||||
taskQueue.Push(hash, float32(index))
|
||||
}
|
||||
@ -842,6 +975,94 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// DeliverHeaders injects a header retrieval response into the header results
|
||||
// cache. This method either accepts all headers it received, or none of them
|
||||
// if they do not map correctly to the skeleton.
|
||||
//
|
||||
// If the headers are accepted, the method makes an attempt to deliver the set
|
||||
// of ready headers to the processor to keep the pipeline full. However it will
|
||||
// not block to prevent stalling other pending deliveries.
|
||||
func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Short circuit if the data was never requested
|
||||
request := q.headerPendPool[id]
|
||||
if request == nil {
|
||||
return 0, errNoFetchesPending
|
||||
}
|
||||
headerReqTimer.UpdateSince(request.Time)
|
||||
delete(q.headerPendPool, id)
|
||||
|
||||
// Ensure headers can be mapped onto the skeleton chain
|
||||
target := q.headerTaskPool[request.From].Hash()
|
||||
|
||||
accepted := len(headers) == MaxHeaderFetch
|
||||
if accepted {
|
||||
if headers[0].Number.Uint64() != request.From {
|
||||
glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From)
|
||||
accepted = false
|
||||
} else if headers[len(headers)-1].Hash() != target {
|
||||
glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4])
|
||||
accepted = false
|
||||
}
|
||||
}
|
||||
if accepted {
|
||||
for i, header := range headers[1:] {
|
||||
hash := header.Hash()
|
||||
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
|
||||
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ordering, expected %d", id, header.Number, hash[:4], want)
|
||||
accepted = false
|
||||
break
|
||||
}
|
||||
if headers[i].Hash() != header.ParentHash {
|
||||
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ancestry", id, header.Number, hash[:4])
|
||||
accepted = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the batch of headers wasn't accepted, mark as unavailable
|
||||
if !accepted {
|
||||
glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From)
|
||||
|
||||
miss := q.headerPeerMiss[id]
|
||||
if miss == nil {
|
||||
q.headerPeerMiss[id] = make(map[uint64]struct{})
|
||||
miss = q.headerPeerMiss[id]
|
||||
}
|
||||
miss[request.From] = struct{}{}
|
||||
|
||||
q.headerTaskQueue.Push(request.From, -float32(request.From))
|
||||
return 0, errors.New("delivery not accepted")
|
||||
}
|
||||
// Clean up a successful fetch and try to deliver any sub-results
|
||||
copy(q.headerResults[request.From-q.headerOffset:], headers)
|
||||
delete(q.headerTaskPool, request.From)
|
||||
|
||||
ready := 0
|
||||
for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
|
||||
ready += MaxHeaderFetch
|
||||
}
|
||||
if ready > 0 {
|
||||
// Headers are ready for delivery, gather them and push forward (non blocking)
|
||||
process := make([]*types.Header, ready)
|
||||
copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
|
||||
|
||||
select {
|
||||
case headerProcCh <- process:
|
||||
glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number)
|
||||
q.headerProced += len(process)
|
||||
default:
|
||||
}
|
||||
}
|
||||
// Check for termination and return
|
||||
if len(q.headerTaskPool) == 0 {
|
||||
q.headerContCh <- false
|
||||
}
|
||||
return len(headers), nil
|
||||
}
|
||||
|
||||
// DeliverBodies injects a block body retrieval response into the results queue.
|
||||
// The method returns the number of blocks bodies accepted from the delivery and
|
||||
// also wakes any threads waiting for data delivery.
|
||||
@ -1041,13 +1262,19 @@ func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(error,
|
||||
|
||||
// Prepare configures the result cache to allow accepting and caching inbound
|
||||
// fetch results.
|
||||
func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64) {
|
||||
func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Prepare the queue for sync results
|
||||
if q.resultOffset < offset {
|
||||
q.resultOffset = offset
|
||||
}
|
||||
q.fastSyncPivot = pivot
|
||||
q.mode = mode
|
||||
|
||||
// If long running fast sync, also start up a head stateretrieval immediately
|
||||
if mode == FastSync && pivot > 0 {
|
||||
q.stateScheduler = state.NewStateSync(head.Root, q.stateDatabase)
|
||||
}
|
||||
}
|
||||
|
@ -233,6 +233,7 @@ func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []commo
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Logs creates a subscription that fires for all new log that match the given filter criteria.
|
||||
func (s *PublicFilterAPI) Logs(ctx context.Context, args NewFilterArgs) (rpc.Subscription, error) {
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
@ -291,12 +292,13 @@ type NewFilterArgs struct {
|
||||
Topics [][]common.Hash
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *args fields with given data.
|
||||
func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
type input struct {
|
||||
From *rpc.BlockNumber `json:"fromBlock"`
|
||||
ToBlock *rpc.BlockNumber `json:"toBlock"`
|
||||
Addresses interface{} `json:"address"`
|
||||
Topics interface{} `json:"topics"`
|
||||
Topics []interface{} `json:"topics"`
|
||||
}
|
||||
|
||||
var raw input
|
||||
@ -321,7 +323,6 @@ func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
if raw.Addresses != nil {
|
||||
// raw.Address can contain a single address or an array of addresses
|
||||
var addresses []common.Address
|
||||
|
||||
if strAddrs, ok := raw.Addresses.([]interface{}); ok {
|
||||
for i, addr := range strAddrs {
|
||||
if strAddr, ok := addr.(string); ok {
|
||||
@ -352,56 +353,53 @@ func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
args.Addresses = addresses
|
||||
}
|
||||
|
||||
// helper function which parses a string to a topic hash
|
||||
topicConverter := func(raw string) (common.Hash, error) {
|
||||
if len(raw) == 0 {
|
||||
return common.Hash{}, nil
|
||||
}
|
||||
|
||||
if len(raw) >= 2 && raw[0] == '0' && (raw[1] == 'x' || raw[1] == 'X') {
|
||||
raw = raw[2:]
|
||||
}
|
||||
|
||||
if len(raw) != 2 * common.HashLength {
|
||||
return common.Hash{}, errors.New("invalid topic(s)")
|
||||
}
|
||||
if decAddr, err := hex.DecodeString(raw); err == nil {
|
||||
return common.BytesToHash(decAddr), nil
|
||||
}
|
||||
|
||||
return common.Hash{}, errors.New("invalid topic given")
|
||||
return common.Hash{}, errors.New("invalid topic(s)")
|
||||
}
|
||||
|
||||
// topics is an array consisting of strings or arrays of strings
|
||||
if raw.Topics != nil {
|
||||
topics, ok := raw.Topics.([]interface{})
|
||||
if ok {
|
||||
parsedTopics := make([][]common.Hash, len(topics))
|
||||
for i, topic := range topics {
|
||||
if topic == nil {
|
||||
parsedTopics[i] = []common.Hash{common.StringToHash("")}
|
||||
} else if strTopic, ok := topic.(string); ok {
|
||||
if t, err := topicConverter(strTopic); err != nil {
|
||||
return fmt.Errorf("invalid topic on index %d", i)
|
||||
} else {
|
||||
parsedTopics[i] = []common.Hash{t}
|
||||
}
|
||||
} else if arrTopic, ok := topic.([]interface{}); ok {
|
||||
parsedTopics[i] = make([]common.Hash, len(arrTopic))
|
||||
for j := 0; j < len(parsedTopics[i]); i++ {
|
||||
if arrTopic[j] == nil {
|
||||
parsedTopics[i][j] = common.StringToHash("")
|
||||
} else if str, ok := arrTopic[j].(string); ok {
|
||||
if t, err := topicConverter(str); err != nil {
|
||||
return fmt.Errorf("invalid topic on index %d", i)
|
||||
} else {
|
||||
parsedTopics[i] = []common.Hash{t}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("topic[%d][%d] not a string", i, j)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("topic[%d] invalid", i)
|
||||
// topics is an array consisting of strings and/or arrays of strings.
|
||||
// JSON null values are converted to common.Hash{} and ignored by the filter manager.
|
||||
if len(raw.Topics) > 0 {
|
||||
args.Topics = make([][]common.Hash, len(raw.Topics))
|
||||
for i, t := range raw.Topics {
|
||||
if t == nil { // ignore topic when matching logs
|
||||
args.Topics[i] = []common.Hash{common.Hash{}}
|
||||
} else if topic, ok := t.(string); ok { // match specific topic
|
||||
top, err := topicConverter(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args.Topics[i] = []common.Hash{top}
|
||||
} else if topics, ok := t.([]interface{}); ok { // or case e.g. [null, "topic0", "topic1"]
|
||||
for _, rawTopic := range topics {
|
||||
if rawTopic == nil {
|
||||
args.Topics[i] = append(args.Topics[i], common.Hash{})
|
||||
} else if topic, ok := rawTopic.(string); ok {
|
||||
parsed, err := topicConverter(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args.Topics[i] = append(args.Topics[i], parsed)
|
||||
} else {
|
||||
return fmt.Errorf("invalid topic(s)")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("invalid topic(s)")
|
||||
}
|
||||
args.Topics = parsedTopics
|
||||
}
|
||||
}
|
||||
|
||||
|
198
eth/filters/api_test.go
Normal file
198
eth/filters/api_test.go
Normal file
@ -0,0 +1,198 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filters_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
func TestUnmarshalJSONNewFilterArgs(t *testing.T) {
|
||||
var (
|
||||
fromBlock rpc.BlockNumber = 0x123435
|
||||
toBlock rpc.BlockNumber = 0xabcdef
|
||||
address0 = common.StringToAddress("70c87d191324e6712a591f304b4eedef6ad9bb9d")
|
||||
address1 = common.StringToAddress("9b2055d370f73ec7d8a03e965129118dc8f5bf83")
|
||||
topic0 = common.HexToHash("3ac225168df54212a25c1c01fd35bebfea408fdac2e31ddd6f80a4bbf9a5f1ca")
|
||||
topic1 = common.HexToHash("9084a792d2f8b16a62b882fd56f7860c07bf5fa91dd8a2ae7e809e5180fef0b3")
|
||||
topic2 = common.HexToHash("6ccae1c4af4152f460ff510e573399795dfab5dcf1fa60d1f33ac8fdc1e480ce")
|
||||
nullTopic = common.Hash{}
|
||||
)
|
||||
|
||||
// default values
|
||||
var test0 filters.NewFilterArgs
|
||||
if err := json.Unmarshal([]byte("{}"), &test0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if test0.FromBlock != rpc.LatestBlockNumber {
|
||||
t.Fatalf("expected %d, got %d", rpc.LatestBlockNumber, test0.FromBlock)
|
||||
}
|
||||
if test0.ToBlock != rpc.LatestBlockNumber {
|
||||
t.Fatalf("expected %d, got %d", rpc.LatestBlockNumber, test0.ToBlock)
|
||||
}
|
||||
if len(test0.Addresses) != 0 {
|
||||
t.Fatalf("expected 0 addresses, got %d", len(test0.Addresses))
|
||||
}
|
||||
if len(test0.Topics) != 0 {
|
||||
t.Fatalf("expected 0 topics, got %d topics", len(test0.Topics))
|
||||
}
|
||||
|
||||
// from, to block number
|
||||
var test1 filters.NewFilterArgs
|
||||
vector := fmt.Sprintf(`{"fromBlock":"0x%x","toBlock":"0x%x"}`, fromBlock, toBlock)
|
||||
if err := json.Unmarshal([]byte(vector), &test1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if test1.FromBlock != fromBlock {
|
||||
t.Fatalf("expected FromBlock %d, got %d", fromBlock, test1.FromBlock)
|
||||
}
|
||||
if test1.ToBlock != toBlock {
|
||||
t.Fatalf("expected ToBlock %d, got %d", toBlock, test1.ToBlock)
|
||||
}
|
||||
|
||||
// single address
|
||||
var test2 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"address": "%s"}`, address0.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test2.Addresses) != 1 {
|
||||
t.Fatalf("expected 1 address, got %d address(es)", len(test2.Addresses))
|
||||
}
|
||||
if test2.Addresses[0] != address0 {
|
||||
t.Fatalf("expected address %x, got %x", address0, test2.Addresses[0])
|
||||
}
|
||||
|
||||
// multiple address
|
||||
var test3 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"address": ["%s", "%s"]}`, address0.Hex(), address1.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test3.Addresses) != 2 {
|
||||
t.Fatalf("expected 2 addresses, got %d address(es)", len(test3.Addresses))
|
||||
}
|
||||
if test3.Addresses[0] != address0 {
|
||||
t.Fatalf("expected address %x, got %x", address0, test3.Addresses[0])
|
||||
}
|
||||
if test3.Addresses[1] != address1 {
|
||||
t.Fatalf("expected address %x, got %x", address1, test3.Addresses[1])
|
||||
}
|
||||
|
||||
// single topic
|
||||
var test4 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s"]}`, topic0.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test4.Topics) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test4.Topics))
|
||||
}
|
||||
if len(test4.Topics[0]) != 1 {
|
||||
t.Fatalf("expected len(topics[0]) to be 1, got %d", len(test4.Topics[0]))
|
||||
}
|
||||
if test4.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test4.Topics[0][0], topic0)
|
||||
}
|
||||
|
||||
// test multiple "AND" topics
|
||||
var test5 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s", "%s"]}`, topic0.Hex(), topic1.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test5); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test5.Topics) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d", len(test5.Topics))
|
||||
}
|
||||
if len(test5.Topics[0]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test5.Topics[0]))
|
||||
}
|
||||
if test5.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test5.Topics[0][0], topic0)
|
||||
}
|
||||
if len(test5.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test5.Topics[1]))
|
||||
}
|
||||
if test5.Topics[1][0] != topic1 {
|
||||
t.Fatalf("got %x, expected %x", test5.Topics[1][0], topic1)
|
||||
}
|
||||
|
||||
// test optional topic
|
||||
var test6 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s", null, "%s"]}`, topic0.Hex(), topic2.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test6); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test6.Topics) != 3 {
|
||||
t.Fatalf("expected 3 topics, got %d", len(test6.Topics))
|
||||
}
|
||||
if len(test6.Topics[0]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[0]))
|
||||
}
|
||||
if test6.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test6.Topics[0][0], topic0)
|
||||
}
|
||||
if len(test6.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[1]))
|
||||
}
|
||||
if test6.Topics[1][0] != nullTopic {
|
||||
t.Fatalf("got %x, expected empty hash", test6.Topics[1][0])
|
||||
}
|
||||
if len(test6.Topics[2]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[2]))
|
||||
}
|
||||
if test6.Topics[2][0] != topic2 {
|
||||
t.Fatalf("got %x, expected %x", test6.Topics[2][0], topic2)
|
||||
}
|
||||
|
||||
// test OR topics
|
||||
var test7 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": [["%s", "%s"], null, ["%s", null]]}`, topic0.Hex(), topic1.Hex(), topic2.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test7); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test7.Topics) != 3 {
|
||||
t.Fatalf("expected 3 topics, got %d topics", len(test7.Topics))
|
||||
}
|
||||
if len(test7.Topics[0]) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d topics", len(test7.Topics[0]))
|
||||
}
|
||||
if test7.Topics[0][0] != topic0 || test7.Topics[0][1] != topic1 {
|
||||
t.Fatalf("invalid topics expected [%x,%x], got [%x,%x]",
|
||||
topic0, topic1, test7.Topics[0][0], test7.Topics[0][1],
|
||||
)
|
||||
}
|
||||
if len(test7.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d topics", len(test7.Topics[1]))
|
||||
}
|
||||
if test7.Topics[1][0] != nullTopic {
|
||||
t.Fatalf("expected empty hash, got %x", test7.Topics[1][0])
|
||||
}
|
||||
if len(test7.Topics[2]) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d topics", len(test7.Topics[2]))
|
||||
}
|
||||
if test7.Topics[2][0] != topic2 || test7.Topics[2][1] != nullTopic {
|
||||
t.Fatalf("invalid topics expected [%x,%x], got [%x,%x]",
|
||||
topic2, nullTopic, test7.Topics[2][0], test7.Topics[2][1],
|
||||
)
|
||||
}
|
||||
}
|
@ -22,6 +22,7 @@ import (
|
||||
"math"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -58,7 +59,9 @@ type blockFetcherFn func([]common.Hash) error
|
||||
type ProtocolManager struct {
|
||||
networkId int
|
||||
|
||||
fastSync bool
|
||||
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
|
||||
synced uint32 // Flag whether we're considered synchronised (enables transaction processing)
|
||||
|
||||
txpool txPool
|
||||
blockchain *core.BlockChain
|
||||
chaindb ethdb.Database
|
||||
@ -82,20 +85,16 @@ type ProtocolManager struct {
|
||||
// wait group is used for graceful shutdowns during downloading
|
||||
// and processing
|
||||
wg sync.WaitGroup
|
||||
|
||||
badBlockReportingEnabled bool
|
||||
}
|
||||
|
||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
// with the ethereum network.
|
||||
func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
||||
// Figure out whether to allow fast sync or not
|
||||
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
|
||||
fastSync = false
|
||||
}
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
networkId: networkId,
|
||||
fastSync: fastSync,
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
blockchain: blockchain,
|
||||
@ -106,6 +105,14 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
|
||||
txsyncCh: make(chan *txsync),
|
||||
quitSync: make(chan struct{}),
|
||||
}
|
||||
// Figure out whether to allow fast sync or not
|
||||
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
|
||||
fastSync = false
|
||||
}
|
||||
if fastSync {
|
||||
manager.fastSync = uint32(1)
|
||||
}
|
||||
// Initiate a sub-protocol for every implemented version we can handle
|
||||
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
|
||||
for i, version := range ProtocolVersions {
|
||||
@ -147,7 +154,7 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
|
||||
// Construct the different synchronisation mechanisms
|
||||
manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeader,
|
||||
blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead,
|
||||
blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
|
||||
blockchain.GetTd, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
|
||||
manager.removePeer)
|
||||
|
||||
validator := func(block *types.Block, parent *types.Block) error {
|
||||
@ -156,11 +163,28 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
|
||||
heighter := func() uint64 {
|
||||
return blockchain.CurrentBlock().NumberU64()
|
||||
}
|
||||
manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, blockchain.InsertChain, manager.removePeer)
|
||||
inserter := func(blocks types.Blocks) (int, error) {
|
||||
atomic.StoreUint32(&manager.synced, 1) // Mark initial sync done on any fetcher import
|
||||
return manager.insertChain(blocks)
|
||||
}
|
||||
manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||
|
||||
if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 {
|
||||
glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled")
|
||||
manager.badBlockReportingEnabled = true
|
||||
}
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) insertChain(blocks types.Blocks) (i int, err error) {
|
||||
i, err = pm.blockchain.InsertChain(blocks)
|
||||
if pm.badBlockReportingEnabled && core.IsValidationErr(err) && i < len(blocks) {
|
||||
go sendBadBlockReport(blocks[i], err)
|
||||
}
|
||||
return i, err
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) removePeer(id string) {
|
||||
// Short circuit if the peer was already removed
|
||||
peer := pm.peers.Peer(id)
|
||||
@ -375,6 +399,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
// Update the receive timestamp of each block
|
||||
for _, block := range blocks {
|
||||
block.ReceivedAt = msg.ReceivedAt
|
||||
block.ReceivedFrom = p
|
||||
}
|
||||
// Filter out any explicitly requested blocks, deliver the rest to the downloader
|
||||
if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 {
|
||||
@ -661,6 +686,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return errResp(ErrDecode, "block validation %v: %v", msg, err)
|
||||
}
|
||||
request.Block.ReceivedAt = msg.ReceivedAt
|
||||
request.Block.ReceivedFrom = p
|
||||
|
||||
// Mark the peer as owning the block and schedule it for import
|
||||
p.MarkBlock(request.Block.Hash())
|
||||
@ -678,7 +704,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case msg.Code == TxMsg:
|
||||
// Transactions arrived, parse all of them and deliver to the pool
|
||||
// Transactions arrived, make sure we have a valid and fresh chain to handle them
|
||||
if atomic.LoadUint32(&pm.synced) == 0 {
|
||||
break
|
||||
}
|
||||
// Transactions can be processed, parse all of them and deliver to the pool
|
||||
var txs []*types.Transaction
|
||||
if err := msg.Decode(&txs); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
|
@ -97,6 +97,7 @@ func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
|
||||
func testRecvTransactions(t *testing.T, protocol int) {
|
||||
txAdded := make(chan []*types.Transaction)
|
||||
pm := newTestProtocolManagerMust(t, false, 0, nil, txAdded)
|
||||
pm.synced = 1 // mark synced to accept transactions
|
||||
p, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer pm.Stop()
|
||||
defer p.close()
|
||||
|
@ -18,6 +18,7 @@ package eth
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -167,18 +168,20 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||
}
|
||||
// Otherwise try to sync with the downloader
|
||||
mode := downloader.FullSync
|
||||
if pm.fastSync {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
mode = downloader.FastSync
|
||||
}
|
||||
if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil {
|
||||
return
|
||||
}
|
||||
atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done
|
||||
|
||||
// If fast sync was enabled, and we synced up, disable it
|
||||
if pm.fastSync {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
// Disable fast sync if we indeed have something in our chain
|
||||
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
|
||||
pm.fastSync = false
|
||||
atomic.StoreUint32(&pm.fastSync, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -29,12 +30,12 @@ import (
|
||||
func TestFastSyncDisabling(t *testing.T) {
|
||||
// Create a pristine protocol manager, check that fast sync is left enabled
|
||||
pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil)
|
||||
if !pmEmpty.fastSync {
|
||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
|
||||
t.Fatalf("fast sync disabled on pristine blockchain")
|
||||
}
|
||||
// Create a full protocol manager, check that fast sync gets disabled
|
||||
pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil)
|
||||
if pmFull.fastSync {
|
||||
if atomic.LoadUint32(&pmFull.fastSync) == 1 {
|
||||
t.Fatalf("fast sync not disabled on non-empty blockchain")
|
||||
}
|
||||
// Sync up the two peers
|
||||
@ -47,7 +48,7 @@ func TestFastSyncDisabling(t *testing.T) {
|
||||
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
|
||||
|
||||
// Check that fast sync was disabled
|
||||
if pmEmpty.fastSync {
|
||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 1 {
|
||||
t.Fatalf("fast sync not disabled after successful synchronisation")
|
||||
}
|
||||
}
|
||||
|
@ -66,6 +66,9 @@ func (mux *TypeMux) Subscribe(types ...interface{}) Subscription {
|
||||
mux.mutex.Lock()
|
||||
defer mux.mutex.Unlock()
|
||||
if mux.stopped {
|
||||
// set the status to closed so that calling Unsubscribe after this
|
||||
// call will short curuit
|
||||
sub.closed = true
|
||||
close(sub.postC)
|
||||
} else {
|
||||
if mux.subm == nil {
|
||||
|
@ -25,6 +25,14 @@ import (
|
||||
|
||||
type testEvent int
|
||||
|
||||
func TestSubCloseUnsub(t *testing.T) {
|
||||
// the point of this test is **not** to panic
|
||||
var mux TypeMux
|
||||
mux.Stop()
|
||||
sub := mux.Subscribe(int(0))
|
||||
sub.Unsubscribe()
|
||||
}
|
||||
|
||||
func TestSub(t *testing.T) {
|
||||
mux := new(TypeMux)
|
||||
defer mux.Stop()
|
||||
|
@ -17,12 +17,13 @@
|
||||
package jsre
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCompleteKeywords(t *testing.T) {
|
||||
re := New("")
|
||||
re := New("", os.Stdout)
|
||||
re.Run(`
|
||||
function theClass() {
|
||||
this.foo = 3;
|
@ -21,9 +21,9 @@ import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -40,9 +40,10 @@ It provides some helper functions to
|
||||
*/
|
||||
type JSRE struct {
|
||||
assetPath string
|
||||
output io.Writer
|
||||
evalQueue chan *evalReq
|
||||
stopEventLoop chan bool
|
||||
loopWg sync.WaitGroup
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
// jsTimer is a single timer instance with a callback function
|
||||
@ -60,13 +61,14 @@ type evalReq struct {
|
||||
}
|
||||
|
||||
// runtime must be stopped with Stop() after use and cannot be used after stopping
|
||||
func New(assetPath string) *JSRE {
|
||||
func New(assetPath string, output io.Writer) *JSRE {
|
||||
re := &JSRE{
|
||||
assetPath: assetPath,
|
||||
output: output,
|
||||
closed: make(chan struct{}),
|
||||
evalQueue: make(chan *evalReq),
|
||||
stopEventLoop: make(chan bool),
|
||||
}
|
||||
re.loopWg.Add(1)
|
||||
go re.runEventLoop()
|
||||
re.Set("loadScript", re.loadScript)
|
||||
re.Set("inspect", prettyPrintJS)
|
||||
@ -95,6 +97,8 @@ func randomSource() *rand.Rand {
|
||||
// functions should be used if and only if running a routine that was already
|
||||
// called from JS through an RPC call.
|
||||
func (self *JSRE) runEventLoop() {
|
||||
defer close(self.closed)
|
||||
|
||||
vm := otto.New()
|
||||
r := randomSource()
|
||||
vm.SetRandomSource(r.Float64)
|
||||
@ -210,8 +214,6 @@ loop:
|
||||
timer.timer.Stop()
|
||||
delete(registry, timer)
|
||||
}
|
||||
|
||||
self.loopWg.Done()
|
||||
}
|
||||
|
||||
// Do executes the given function on the JS event loop.
|
||||
@ -224,8 +226,11 @@ func (self *JSRE) Do(fn func(*otto.Otto)) {
|
||||
|
||||
// stops the event loop before exit, optionally waits for all timers to expire
|
||||
func (self *JSRE) Stop(waitForCallbacks bool) {
|
||||
self.stopEventLoop <- waitForCallbacks
|
||||
self.loopWg.Wait()
|
||||
select {
|
||||
case <-self.closed:
|
||||
case self.stopEventLoop <- waitForCallbacks:
|
||||
<-self.closed
|
||||
}
|
||||
}
|
||||
|
||||
// Exec(file) loads and runs the contents of a file
|
||||
@ -292,19 +297,21 @@ func (self *JSRE) loadScript(call otto.FunctionCall) otto.Value {
|
||||
return otto.TrueValue()
|
||||
}
|
||||
|
||||
// EvalAndPrettyPrint evaluates code and pretty prints the result to
|
||||
// standard output.
|
||||
func (self *JSRE) EvalAndPrettyPrint(code string) (err error) {
|
||||
// Evaluate executes code and pretty prints the result to the specified output
|
||||
// stream.
|
||||
func (self *JSRE) Evaluate(code string, w io.Writer) error {
|
||||
var fail error
|
||||
|
||||
self.Do(func(vm *otto.Otto) {
|
||||
var val otto.Value
|
||||
val, err = vm.Run(code)
|
||||
val, err := vm.Run(code)
|
||||
if err != nil {
|
||||
return
|
||||
prettyError(vm, err, w)
|
||||
} else {
|
||||
prettyPrint(vm, val, w)
|
||||
}
|
||||
prettyPrint(vm, val)
|
||||
fmt.Println()
|
||||
fmt.Fprintln(w)
|
||||
})
|
||||
return err
|
||||
return fail
|
||||
}
|
||||
|
||||
// Compile compiles and then runs a piece of JS code.
|
@ -51,7 +51,7 @@ func newWithTestJS(t *testing.T, testjs string) (*JSRE, string) {
|
||||
t.Fatal("cannot create test.js:", err)
|
||||
}
|
||||
}
|
||||
return New(dir), dir
|
||||
return New(dir, os.Stdout), dir
|
||||
}
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
@ -102,7 +102,7 @@ func TestNatto(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBind(t *testing.T) {
|
||||
jsre := New("")
|
||||
jsre := New("", os.Stdout)
|
||||
defer jsre.Stop(false)
|
||||
|
||||
jsre.Bind("no", &testNativeObjectBinding{})
|
@ -18,6 +18,7 @@ package jsre
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -32,10 +33,11 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
functionColor = color.New(color.FgMagenta)
|
||||
specialColor = color.New(color.Bold)
|
||||
numberColor = color.New(color.FgRed)
|
||||
stringColor = color.New(color.FgGreen)
|
||||
FunctionColor = color.New(color.FgMagenta).SprintfFunc()
|
||||
SpecialColor = color.New(color.Bold).SprintfFunc()
|
||||
NumberColor = color.New(color.FgRed).SprintfFunc()
|
||||
StringColor = color.New(color.FgGreen).SprintfFunc()
|
||||
ErrorColor = color.New(color.FgHiRed).SprintfFunc()
|
||||
)
|
||||
|
||||
// these fields are hidden when printing objects.
|
||||
@ -50,19 +52,39 @@ var boringKeys = map[string]bool{
|
||||
}
|
||||
|
||||
// prettyPrint writes value to standard output.
|
||||
func prettyPrint(vm *otto.Otto, value otto.Value) {
|
||||
ppctx{vm}.printValue(value, 0, false)
|
||||
func prettyPrint(vm *otto.Otto, value otto.Value, w io.Writer) {
|
||||
ppctx{vm: vm, w: w}.printValue(value, 0, false)
|
||||
}
|
||||
|
||||
func prettyPrintJS(call otto.FunctionCall) otto.Value {
|
||||
// prettyError writes err to standard output.
|
||||
func prettyError(vm *otto.Otto, err error, w io.Writer) {
|
||||
failure := err.Error()
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
failure = ottoErr.String()
|
||||
}
|
||||
fmt.Fprint(w, ErrorColor("%s", failure))
|
||||
}
|
||||
|
||||
// jsErrorString adds a backtrace to errors generated by otto.
|
||||
func jsErrorString(err error) string {
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
return ottoErr.String()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func prettyPrintJS(call otto.FunctionCall, w io.Writer) otto.Value {
|
||||
for _, v := range call.ArgumentList {
|
||||
prettyPrint(call.Otto, v)
|
||||
fmt.Println()
|
||||
prettyPrint(call.Otto, v, w)
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
return otto.UndefinedValue()
|
||||
}
|
||||
|
||||
type ppctx struct{ vm *otto.Otto }
|
||||
type ppctx struct {
|
||||
vm *otto.Otto
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (ctx ppctx) indent(level int) string {
|
||||
return strings.Repeat(indentString, level)
|
||||
@ -73,22 +95,22 @@ func (ctx ppctx) printValue(v otto.Value, level int, inArray bool) {
|
||||
case v.IsObject():
|
||||
ctx.printObject(v.Object(), level, inArray)
|
||||
case v.IsNull():
|
||||
specialColor.Print("null")
|
||||
fmt.Fprint(ctx.w, SpecialColor("null"))
|
||||
case v.IsUndefined():
|
||||
specialColor.Print("undefined")
|
||||
fmt.Fprint(ctx.w, SpecialColor("undefined"))
|
||||
case v.IsString():
|
||||
s, _ := v.ToString()
|
||||
stringColor.Printf("%q", s)
|
||||
fmt.Fprint(ctx.w, StringColor("%q", s))
|
||||
case v.IsBoolean():
|
||||
b, _ := v.ToBoolean()
|
||||
specialColor.Printf("%t", b)
|
||||
fmt.Fprint(ctx.w, SpecialColor("%t", b))
|
||||
case v.IsNaN():
|
||||
numberColor.Printf("NaN")
|
||||
fmt.Fprint(ctx.w, NumberColor("NaN"))
|
||||
case v.IsNumber():
|
||||
s, _ := v.ToString()
|
||||
numberColor.Printf("%s", s)
|
||||
fmt.Fprint(ctx.w, NumberColor("%s", s))
|
||||
default:
|
||||
fmt.Printf("<unprintable>")
|
||||
fmt.Fprint(ctx.w, "<unprintable>")
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,75 +120,75 @@ func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) {
|
||||
lv, _ := obj.Get("length")
|
||||
len, _ := lv.ToInteger()
|
||||
if len == 0 {
|
||||
fmt.Printf("[]")
|
||||
fmt.Fprintf(ctx.w, "[]")
|
||||
return
|
||||
}
|
||||
if level > maxPrettyPrintLevel {
|
||||
fmt.Print("[...]")
|
||||
fmt.Fprint(ctx.w, "[...]")
|
||||
return
|
||||
}
|
||||
fmt.Print("[")
|
||||
fmt.Fprint(ctx.w, "[")
|
||||
for i := int64(0); i < len; i++ {
|
||||
el, err := obj.Get(strconv.FormatInt(i, 10))
|
||||
if err == nil {
|
||||
ctx.printValue(el, level+1, true)
|
||||
}
|
||||
if i < len-1 {
|
||||
fmt.Printf(", ")
|
||||
fmt.Fprintf(ctx.w, ", ")
|
||||
}
|
||||
}
|
||||
fmt.Print("]")
|
||||
fmt.Fprint(ctx.w, "]")
|
||||
|
||||
case "Object":
|
||||
// Print values from bignumber.js as regular numbers.
|
||||
if ctx.isBigNumber(obj) {
|
||||
numberColor.Print(toString(obj))
|
||||
fmt.Fprint(ctx.w, NumberColor("%s", toString(obj)))
|
||||
return
|
||||
}
|
||||
// Otherwise, print all fields indented, but stop if we're too deep.
|
||||
keys := ctx.fields(obj)
|
||||
if len(keys) == 0 {
|
||||
fmt.Print("{}")
|
||||
fmt.Fprint(ctx.w, "{}")
|
||||
return
|
||||
}
|
||||
if level > maxPrettyPrintLevel {
|
||||
fmt.Print("{...}")
|
||||
fmt.Fprint(ctx.w, "{...}")
|
||||
return
|
||||
}
|
||||
fmt.Println("{")
|
||||
fmt.Fprintln(ctx.w, "{")
|
||||
for i, k := range keys {
|
||||
v, _ := obj.Get(k)
|
||||
fmt.Printf("%s%s: ", ctx.indent(level+1), k)
|
||||
fmt.Fprintf(ctx.w, "%s%s: ", ctx.indent(level+1), k)
|
||||
ctx.printValue(v, level+1, false)
|
||||
if i < len(keys)-1 {
|
||||
fmt.Printf(",")
|
||||
fmt.Fprintf(ctx.w, ",")
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Fprintln(ctx.w)
|
||||
}
|
||||
if inArray {
|
||||
level--
|
||||
}
|
||||
fmt.Printf("%s}", ctx.indent(level))
|
||||
fmt.Fprintf(ctx.w, "%s}", ctx.indent(level))
|
||||
|
||||
case "Function":
|
||||
// Use toString() to display the argument list if possible.
|
||||
if robj, err := obj.Call("toString"); err != nil {
|
||||
functionColor.Print("function()")
|
||||
fmt.Fprint(ctx.w, FunctionColor("function()"))
|
||||
} else {
|
||||
desc := strings.Trim(strings.Split(robj.String(), "{")[0], " \t\n")
|
||||
desc = strings.Replace(desc, " (", "(", 1)
|
||||
functionColor.Print(desc)
|
||||
fmt.Fprint(ctx.w, FunctionColor("%s", desc))
|
||||
}
|
||||
|
||||
case "RegExp":
|
||||
stringColor.Print(toString(obj))
|
||||
fmt.Fprint(ctx.w, StringColor("%s", toString(obj)))
|
||||
|
||||
default:
|
||||
if v, _ := obj.Get("toString"); v.IsFunction() && level <= maxPrettyPrintLevel {
|
||||
s, _ := obj.Call("toString")
|
||||
fmt.Printf("<%s %s>", obj.Class(), s.String())
|
||||
fmt.Fprintf(ctx.w, "<%s %s>", obj.Class(), s.String())
|
||||
} else {
|
||||
fmt.Printf("<%s>", obj.Class())
|
||||
fmt.Fprintf(ctx.w, "<%s>", obj.Class())
|
||||
}
|
||||
}
|
||||
}
|
@ -18,44 +18,17 @@
|
||||
package web3ext
|
||||
|
||||
var Modules = map[string]string{
|
||||
"txpool": TxPool_JS,
|
||||
"admin": Admin_JS,
|
||||
"personal": Personal_JS,
|
||||
"debug": Debug_JS,
|
||||
"eth": Eth_JS,
|
||||
"miner": Miner_JS,
|
||||
"debug": Debug_JS,
|
||||
"net": Net_JS,
|
||||
"personal": Personal_JS,
|
||||
"rpc": RPC_JS,
|
||||
"shh": Shh_JS,
|
||||
"txpool": TxPool_JS,
|
||||
}
|
||||
|
||||
const TxPool_JS = `
|
||||
web3._extend({
|
||||
property: 'txpool',
|
||||
methods:
|
||||
[
|
||||
],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'content',
|
||||
getter: 'txpool_content'
|
||||
}),
|
||||
new web3._extend.Property({
|
||||
name: 'inspect',
|
||||
getter: 'txpool_inspect'
|
||||
}),
|
||||
new web3._extend.Property({
|
||||
name: 'status',
|
||||
getter: 'txpool_status',
|
||||
outputFormatter: function(status) {
|
||||
status.pending = web3._extend.utils.toDecimal(status.pending);
|
||||
status.queued = web3._extend.utils.toDecimal(status.queued);
|
||||
return status;
|
||||
}
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Admin_JS = `
|
||||
web3._extend({
|
||||
property: 'admin',
|
||||
@ -176,88 +149,6 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Personal_JS = `
|
||||
web3._extend({
|
||||
property: 'personal',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'importRawKey',
|
||||
call: 'personal_importRawKey',
|
||||
params: 2
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Eth_JS = `
|
||||
web3._extend({
|
||||
property: 'eth',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'sign',
|
||||
call: 'eth_sign',
|
||||
params: 2,
|
||||
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'resend',
|
||||
call: 'eth_resend',
|
||||
params: 3,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, web3._extend.utils.fromDecimal, web3._extend.utils.fromDecimal]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getNatSpec',
|
||||
call: 'eth_getNatSpec',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'signTransaction',
|
||||
call: 'eth_signTransaction',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'submitTransaction',
|
||||
call: 'eth_submitTransaction',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
})
|
||||
],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'pendingTransactions',
|
||||
getter: 'eth_pendingTransactions',
|
||||
outputFormatter: function(txs) {
|
||||
var formatted = [];
|
||||
for (var i = 0; i < txs.length; i++) {
|
||||
formatted.push(web3._extend.formatters.outputTransactionFormatter(txs[i]));
|
||||
formatted[i].blockHash = null;
|
||||
}
|
||||
return formatted;
|
||||
}
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Net_JS = `
|
||||
web3._extend({
|
||||
property: 'net',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'version',
|
||||
getter: 'net_version'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Debug_JS = `
|
||||
web3._extend({
|
||||
property: 'debug',
|
||||
@ -410,6 +301,60 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Eth_JS = `
|
||||
web3._extend({
|
||||
property: 'eth',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'sign',
|
||||
call: 'eth_sign',
|
||||
params: 2,
|
||||
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'resend',
|
||||
call: 'eth_resend',
|
||||
params: 3,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, web3._extend.utils.fromDecimal, web3._extend.utils.fromDecimal]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getNatSpec',
|
||||
call: 'eth_getNatSpec',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'signTransaction',
|
||||
call: 'eth_signTransaction',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'submitTransaction',
|
||||
call: 'eth_submitTransaction',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
|
||||
})
|
||||
],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'pendingTransactions',
|
||||
getter: 'eth_pendingTransactions',
|
||||
outputFormatter: function(txs) {
|
||||
var formatted = [];
|
||||
for (var i = 0; i < txs.length; i++) {
|
||||
formatted.push(web3._extend.formatters.outputTransactionFormatter(txs[i]));
|
||||
formatted[i].blockHash = null;
|
||||
}
|
||||
return formatted;
|
||||
}
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Miner_JS = `
|
||||
web3._extend({
|
||||
property: 'miner',
|
||||
@ -440,7 +385,7 @@ web3._extend({
|
||||
name: 'setGasPrice',
|
||||
call: 'miner_setGasPrice',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.utils.fromDecial]
|
||||
inputFormatter: [web3._extend.utils.fromDecimal]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'startAutoDAG',
|
||||
@ -463,6 +408,54 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Net_JS = `
|
||||
web3._extend({
|
||||
property: 'net',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'version',
|
||||
getter: 'net_version'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Personal_JS = `
|
||||
web3._extend({
|
||||
property: 'personal',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'importRawKey',
|
||||
call: 'personal_importRawKey',
|
||||
params: 2
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'signAndSendTransaction',
|
||||
call: 'personal_signAndSendTransaction',
|
||||
params: 2,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, null]
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const RPC_JS = `
|
||||
web3._extend({
|
||||
property: 'rpc',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'modules',
|
||||
getter: 'rpc_modules'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Shh_JS = `
|
||||
web3._extend({
|
||||
property: 'shh',
|
||||
@ -471,7 +464,35 @@ web3._extend({
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'version',
|
||||
getter: 'shh_version'
|
||||
getter: 'shh_version',
|
||||
outputFormatter: web3._extend.utils.toDecimal
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const TxPool_JS = `
|
||||
web3._extend({
|
||||
property: 'txpool',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'content',
|
||||
getter: 'txpool_content'
|
||||
}),
|
||||
new web3._extend.Property({
|
||||
name: 'inspect',
|
||||
getter: 'txpool_inspect'
|
||||
}),
|
||||
new web3._extend.Property({
|
||||
name: 'status',
|
||||
getter: 'txpool_status',
|
||||
outputFormatter: function(status) {
|
||||
status.pending = web3._extend.utils.toDecimal(status.pending);
|
||||
status.queued = web3._extend.utils.toDecimal(status.queued);
|
||||
return status;
|
||||
}
|
||||
})
|
||||
]
|
||||
});
|
||||
|
@ -145,7 +145,6 @@ func newWorker(config *core.ChainConfig, coinbase common.Address, eth core.Backe
|
||||
fullValidation: false,
|
||||
}
|
||||
worker.events = worker.mux.Subscribe(core.ChainHeadEvent{}, core.ChainSideEvent{}, core.TxPreEvent{})
|
||||
worker.wg.Add(1)
|
||||
go worker.update()
|
||||
|
||||
go worker.wait()
|
||||
@ -188,8 +187,6 @@ func (self *worker) start() {
|
||||
}
|
||||
|
||||
func (self *worker) stop() {
|
||||
// Quit update.
|
||||
self.events.Unsubscribe()
|
||||
self.wg.Wait()
|
||||
|
||||
self.mu.Lock()
|
||||
@ -224,7 +221,6 @@ func (self *worker) unregister(agent Agent) {
|
||||
}
|
||||
|
||||
func (self *worker) update() {
|
||||
defer self.wg.Done()
|
||||
for event := range self.events.Chan() {
|
||||
// A real event arrived, process interesting content
|
||||
switch ev := event.Data.(type) {
|
||||
|
@ -49,7 +49,7 @@ type Node struct {
|
||||
datadir string // Path to the currently used data directory
|
||||
eventmux *event.TypeMux // Event multiplexer used between the services of a stack
|
||||
|
||||
serverConfig *p2p.Server // Configuration of the underlying P2P networking layer
|
||||
serverConfig p2p.Config
|
||||
server *p2p.Server // Currently running P2P networking layer
|
||||
|
||||
serviceFuncs []ServiceConstructor // Service constructors (in dependency order)
|
||||
@ -97,7 +97,7 @@ func New(conf *Config) (*Node, error) {
|
||||
}
|
||||
return &Node{
|
||||
datadir: conf.DataDir,
|
||||
serverConfig: &p2p.Server{
|
||||
serverConfig: p2p.Config{
|
||||
PrivateKey: conf.NodeKey(),
|
||||
Name: conf.Name,
|
||||
Discovery: !conf.NoDiscovery,
|
||||
@ -151,9 +151,7 @@ func (n *Node) Start() error {
|
||||
return ErrNodeRunning
|
||||
}
|
||||
// Otherwise copy and specialize the P2P configuration
|
||||
running := new(p2p.Server)
|
||||
*running = *n.serverConfig
|
||||
|
||||
running := &p2p.Server{Config: n.serverConfig}
|
||||
services := make(map[reflect.Type]Service)
|
||||
for _, constructor := range n.serviceFuncs {
|
||||
// Create a new context for the particular service
|
||||
|
@ -478,7 +478,8 @@ func TestDialResolve(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now run the task, it should resolve the ID once.
|
||||
srv := &Server{ntab: table, Dialer: &net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}}
|
||||
config := Config{Dialer: &net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}}
|
||||
srv := &Server{ntab: table, Config: config}
|
||||
tasks[0].Do(srv)
|
||||
if !reflect.DeepEqual(table.resolveCalls, []discover.NodeID{dest.ID}) {
|
||||
t.Fatalf("wrong resolve calls, got %v", table.resolveCalls)
|
||||
|
@ -54,12 +54,8 @@ var errServerStopped = errors.New("server stopped")
|
||||
|
||||
var srvjslog = logger.NewJsonLogger()
|
||||
|
||||
// Server manages all peer connections.
|
||||
//
|
||||
// The fields of Server are used as configuration parameters.
|
||||
// You should set them before starting the Server. Fields may not be
|
||||
// modified while the server is running.
|
||||
type Server struct {
|
||||
// Config holds Server options.
|
||||
type Config struct {
|
||||
// This field must be set to a valid secp256k1 private key.
|
||||
PrivateKey *ecdsa.PrivateKey
|
||||
|
||||
@ -120,6 +116,12 @@ type Server struct {
|
||||
|
||||
// If NoDial is true, the server will not dial any peers.
|
||||
NoDial bool
|
||||
}
|
||||
|
||||
// Server manages all peer connections.
|
||||
type Server struct {
|
||||
// Config fields may not be modified while the server is running.
|
||||
Config
|
||||
|
||||
// Hooks for testing. These are useful because we can inhibit
|
||||
// the whole protocol stack.
|
||||
|
@ -67,11 +67,14 @@ func (c *testTransport) close(err error) {
|
||||
}
|
||||
|
||||
func startTestServer(t *testing.T, id discover.NodeID, pf func(*Peer)) *Server {
|
||||
config := Config{
|
||||
Name: "test",
|
||||
MaxPeers: 10,
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
PrivateKey: newkey(),
|
||||
}
|
||||
server := &Server{
|
||||
Name: "test",
|
||||
MaxPeers: 10,
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
PrivateKey: newkey(),
|
||||
Config: config,
|
||||
newPeerHook: pf,
|
||||
newTransport: func(fd net.Conn) transport { return newTestTransport(id, fd) },
|
||||
}
|
||||
@ -200,10 +203,10 @@ func TestServerTaskScheduling(t *testing.T) {
|
||||
// The Server in this test isn't actually running
|
||||
// because we're only interested in what run does.
|
||||
srv := &Server{
|
||||
MaxPeers: 10,
|
||||
quit: make(chan struct{}),
|
||||
ntab: fakeTable{},
|
||||
running: true,
|
||||
Config: Config{MaxPeers: 10},
|
||||
quit: make(chan struct{}),
|
||||
ntab: fakeTable{},
|
||||
running: true,
|
||||
}
|
||||
srv.loopWG.Add(1)
|
||||
go func() {
|
||||
@ -314,10 +317,12 @@ func (t *testTask) Do(srv *Server) {
|
||||
func TestServerAtCap(t *testing.T) {
|
||||
trustedID := randomID()
|
||||
srv := &Server{
|
||||
PrivateKey: newkey(),
|
||||
MaxPeers: 10,
|
||||
NoDial: true,
|
||||
TrustedNodes: []*discover.Node{{ID: trustedID}},
|
||||
Config: Config{
|
||||
PrivateKey: newkey(),
|
||||
MaxPeers: 10,
|
||||
NoDial: true,
|
||||
TrustedNodes: []*discover.Node{{ID: trustedID}},
|
||||
},
|
||||
}
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("could not start: %v", err)
|
||||
@ -415,10 +420,12 @@ func TestServerSetupConn(t *testing.T) {
|
||||
|
||||
for i, test := range tests {
|
||||
srv := &Server{
|
||||
PrivateKey: srvkey,
|
||||
MaxPeers: 10,
|
||||
NoDial: true,
|
||||
Protocols: []Protocol{discard},
|
||||
Config: Config{
|
||||
PrivateKey: srvkey,
|
||||
MaxPeers: 10,
|
||||
NoDial: true,
|
||||
Protocols: []Protocol{discard},
|
||||
},
|
||||
newTransport: func(fd net.Conn) transport { return test.tt },
|
||||
}
|
||||
if !test.dontstart {
|
||||
|
14
rpc/json.go
14
rpc/json.go
@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
jsonRPCVersion = "2.0"
|
||||
JSONRPCVersion = "2.0"
|
||||
serviceMethodSeparator = "_"
|
||||
subscribeMethod = "eth_subscribe"
|
||||
unsubscribeMethod = "eth_unsubscribe"
|
||||
@ -302,31 +302,31 @@ func parsePositionalArguments(args json.RawMessage, callbackArgs []reflect.Type)
|
||||
// CreateResponse will create a JSON-RPC success response with the given id and reply as result.
|
||||
func (c *jsonCodec) CreateResponse(id interface{}, reply interface{}) interface{} {
|
||||
if isHexNum(reflect.TypeOf(reply)) {
|
||||
return &JSONSuccessResponse{Version: jsonRPCVersion, Id: id, Result: fmt.Sprintf(`%#x`, reply)}
|
||||
return &JSONSuccessResponse{Version: JSONRPCVersion, Id: id, Result: fmt.Sprintf(`%#x`, reply)}
|
||||
}
|
||||
return &JSONSuccessResponse{Version: jsonRPCVersion, Id: id, Result: reply}
|
||||
return &JSONSuccessResponse{Version: JSONRPCVersion, Id: id, Result: reply}
|
||||
}
|
||||
|
||||
// CreateErrorResponse will create a JSON-RPC error response with the given id and error.
|
||||
func (c *jsonCodec) CreateErrorResponse(id interface{}, err RPCError) interface{} {
|
||||
return &JSONErrResponse{Version: jsonRPCVersion, Id: id, Error: JSONError{Code: err.Code(), Message: err.Error()}}
|
||||
return &JSONErrResponse{Version: JSONRPCVersion, Id: id, Error: JSONError{Code: err.Code(), Message: err.Error()}}
|
||||
}
|
||||
|
||||
// CreateErrorResponseWithInfo will create a JSON-RPC error response with the given id and error.
|
||||
// info is optional and contains additional information about the error. When an empty string is passed it is ignored.
|
||||
func (c *jsonCodec) CreateErrorResponseWithInfo(id interface{}, err RPCError, info interface{}) interface{} {
|
||||
return &JSONErrResponse{Version: jsonRPCVersion, Id: id,
|
||||
return &JSONErrResponse{Version: JSONRPCVersion, Id: id,
|
||||
Error: JSONError{Code: err.Code(), Message: err.Error(), Data: info}}
|
||||
}
|
||||
|
||||
// CreateNotification will create a JSON-RPC notification with the given subscription id and event as params.
|
||||
func (c *jsonCodec) CreateNotification(subid string, event interface{}) interface{} {
|
||||
if isHexNum(reflect.TypeOf(event)) {
|
||||
return &jsonNotification{Version: jsonRPCVersion, Method: notificationMethod,
|
||||
return &jsonNotification{Version: JSONRPCVersion, Method: notificationMethod,
|
||||
Params: jsonSubscription{Subscription: subid, Result: fmt.Sprintf(`%#x`, event)}}
|
||||
}
|
||||
|
||||
return &jsonNotification{Version: jsonRPCVersion, Method: notificationMethod,
|
||||
return &jsonNotification{Version: JSONRPCVersion, Method: notificationMethod,
|
||||
Params: jsonSubscription{Subscription: subid, Result: event}}
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,8 @@ const (
|
||||
|
||||
notificationBufferSize = 10000 // max buffered notifications before codec is closed
|
||||
|
||||
DefaultIPCApis = "admin,eth,debug,miner,net,shh,txpool,personal,web3"
|
||||
MetadataApi = "rpc"
|
||||
DefaultIPCApis = "admin,debug,eth,miner,net,personal,shh,txpool,web3"
|
||||
DefaultHTTPApis = "eth,net,web3"
|
||||
)
|
||||
|
||||
@ -61,7 +62,7 @@ func NewServer() *Server {
|
||||
// register a default service which will provide meta information about the RPC service such as the services and
|
||||
// methods it offers.
|
||||
rpcService := &RPCService{server}
|
||||
server.RegisterName("rpc", rpcService)
|
||||
server.RegisterName(MetadataApi, rpcService)
|
||||
|
||||
return server
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ func SupportedModules(client Client) (map[string]string, error) {
|
||||
req := JSONRequest{
|
||||
Id: []byte("1"),
|
||||
Version: "2.0",
|
||||
Method: "rpc_modules",
|
||||
Method: MetadataApi + "_modules",
|
||||
}
|
||||
if err := client.Send(req); err != nil {
|
||||
return nil, err
|
||||
|
@ -61,22 +61,22 @@ func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http
|
||||
allowAllOrigins = true
|
||||
}
|
||||
if origin != "" {
|
||||
origins.Add(origin)
|
||||
origins.Add(strings.ToLower(origin))
|
||||
}
|
||||
}
|
||||
|
||||
// allow localhost if no allowedOrigins are specified
|
||||
// allow localhost if no allowedOrigins are specified.
|
||||
if len(origins.List()) == 0 {
|
||||
origins.Add("http://localhost")
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
origins.Add("http://" + hostname)
|
||||
origins.Add("http://" + strings.ToLower(hostname))
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(logger.Debug).Infof("Allowed origin(s) for WS RPC interface %v\n", origins.List())
|
||||
|
||||
f := func(cfg *websocket.Config, req *http.Request) error {
|
||||
origin := req.Header.Get("Origin")
|
||||
origin := strings.ToLower(req.Header.Get("Origin"))
|
||||
if allowAllOrigins || origins.Has(origin) {
|
||||
return nil
|
||||
}
|
||||
|
@ -25,8 +25,6 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -59,11 +57,6 @@ var (
|
||||
VmSkipTests = []string{}
|
||||
)
|
||||
|
||||
// Disable reporting bad blocks for the tests
|
||||
func init() {
|
||||
core.DisableBadBlockReporting = true
|
||||
}
|
||||
|
||||
func readJson(reader io.Reader, value interface{}) error {
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
|
@ -62,7 +62,7 @@ func (self *Iterator) next(node interface{}, key []byte, isIterStart bool) []byt
|
||||
switch node := node.(type) {
|
||||
case fullNode:
|
||||
if len(key) > 0 {
|
||||
k := self.next(node[key[0]], key[1:], isIterStart)
|
||||
k := self.next(node.Children[key[0]], key[1:], isIterStart)
|
||||
if k != nil {
|
||||
return append([]byte{key[0]}, k...)
|
||||
}
|
||||
@ -74,7 +74,7 @@ func (self *Iterator) next(node interface{}, key []byte, isIterStart bool) []byt
|
||||
}
|
||||
|
||||
for i := r; i < 16; i++ {
|
||||
k := self.key(node[i])
|
||||
k := self.key(node.Children[i])
|
||||
if k != nil {
|
||||
return append([]byte{i}, k...)
|
||||
}
|
||||
@ -130,12 +130,12 @@ func (self *Iterator) key(node interface{}) []byte {
|
||||
}
|
||||
return append(k, self.key(node.Val)...)
|
||||
case fullNode:
|
||||
if node[16] != nil {
|
||||
self.Value = node[16].(valueNode)
|
||||
if node.Children[16] != nil {
|
||||
self.Value = node.Children[16].(valueNode)
|
||||
return []byte{16}
|
||||
}
|
||||
for i := 0; i < 16; i++ {
|
||||
k := self.key(node[i])
|
||||
k := self.key(node.Children[i])
|
||||
if k != nil {
|
||||
return append([]byte{byte(i)}, k...)
|
||||
}
|
||||
@ -175,7 +175,7 @@ type NodeIterator struct {
|
||||
|
||||
// NewNodeIterator creates an post-order trie iterator.
|
||||
func NewNodeIterator(trie *Trie) *NodeIterator {
|
||||
if bytes.Compare(trie.Root(), emptyRoot.Bytes()) == 0 {
|
||||
if trie.Hash() == emptyState {
|
||||
return new(NodeIterator)
|
||||
}
|
||||
return &NodeIterator{trie: trie}
|
||||
@ -205,9 +205,11 @@ func (it *NodeIterator) step() error {
|
||||
}
|
||||
// Initialize the iterator if we've just started, or pop off the old node otherwise
|
||||
if len(it.stack) == 0 {
|
||||
it.stack = append(it.stack, &nodeIteratorState{node: it.trie.root, child: -1})
|
||||
// Always start with a collapsed root
|
||||
root := it.trie.Hash()
|
||||
it.stack = append(it.stack, &nodeIteratorState{node: hashNode(root[:]), child: -1})
|
||||
if it.stack[0].node == nil {
|
||||
return fmt.Errorf("root node missing: %x", it.trie.Root())
|
||||
return fmt.Errorf("root node missing: %x", it.trie.Hash())
|
||||
}
|
||||
} else {
|
||||
it.stack = it.stack[:len(it.stack)-1]
|
||||
@ -225,11 +227,11 @@ func (it *NodeIterator) step() error {
|
||||
}
|
||||
if node, ok := parent.node.(fullNode); ok {
|
||||
// Full node, traverse all children, then the node itself
|
||||
if parent.child >= len(node) {
|
||||
if parent.child >= len(node.Children) {
|
||||
break
|
||||
}
|
||||
for parent.child++; parent.child < len(node); parent.child++ {
|
||||
if current := node[parent.child]; current != nil {
|
||||
for parent.child++; parent.child < len(node.Children); parent.child++ {
|
||||
if current := node.Children[parent.child]; current != nil {
|
||||
it.stack = append(it.stack, &nodeIteratorState{node: current, parent: ancestor, child: -1})
|
||||
break
|
||||
}
|
||||
|
60
trie/node.go
60
trie/node.go
@ -29,18 +29,36 @@ var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b
|
||||
|
||||
type node interface {
|
||||
fstring(string) string
|
||||
cache() (hashNode, bool)
|
||||
}
|
||||
|
||||
type (
|
||||
fullNode [17]node
|
||||
fullNode struct {
|
||||
Children [17]node // Actual trie node data to encode/decode (needs custom encoder)
|
||||
hash hashNode // Cached hash of the node to prevent rehashing (may be nil)
|
||||
dirty bool // Cached flag whether the node's new or already stored
|
||||
}
|
||||
shortNode struct {
|
||||
Key []byte
|
||||
Val node
|
||||
Key []byte
|
||||
Val node
|
||||
hash hashNode // Cached hash of the node to prevent rehashing (may be nil)
|
||||
dirty bool // Cached flag whether the node's new or already stored
|
||||
}
|
||||
hashNode []byte
|
||||
valueNode []byte
|
||||
)
|
||||
|
||||
// EncodeRLP encodes a full node into the consensus RLP format.
|
||||
func (n fullNode) EncodeRLP(w io.Writer) error {
|
||||
return rlp.Encode(w, n.Children)
|
||||
}
|
||||
|
||||
// Cache accessors to retrieve precalculated values (avoid lengthy type switches).
|
||||
func (n fullNode) cache() (hashNode, bool) { return n.hash, n.dirty }
|
||||
func (n shortNode) cache() (hashNode, bool) { return n.hash, n.dirty }
|
||||
func (n hashNode) cache() (hashNode, bool) { return nil, true }
|
||||
func (n valueNode) cache() (hashNode, bool) { return nil, true }
|
||||
|
||||
// Pretty printing.
|
||||
func (n fullNode) String() string { return n.fstring("") }
|
||||
func (n shortNode) String() string { return n.fstring("") }
|
||||
@ -49,7 +67,7 @@ func (n valueNode) String() string { return n.fstring("") }
|
||||
|
||||
func (n fullNode) fstring(ind string) string {
|
||||
resp := fmt.Sprintf("[\n%s ", ind)
|
||||
for i, node := range n {
|
||||
for i, node := range n.Children {
|
||||
if node == nil {
|
||||
resp += fmt.Sprintf("%s: <nil> ", indices[i])
|
||||
} else {
|
||||
@ -68,16 +86,16 @@ func (n valueNode) fstring(ind string) string {
|
||||
return fmt.Sprintf("%x ", []byte(n))
|
||||
}
|
||||
|
||||
func mustDecodeNode(dbkey, buf []byte) node {
|
||||
n, err := decodeNode(buf)
|
||||
func mustDecodeNode(hash, buf []byte) node {
|
||||
n, err := decodeNode(hash, buf)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("node %x: %v", dbkey, err))
|
||||
panic(fmt.Sprintf("node %x: %v", hash, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// decodeNode parses the RLP encoding of a trie node.
|
||||
func decodeNode(buf []byte) (node, error) {
|
||||
func decodeNode(hash, buf []byte) (node, error) {
|
||||
if len(buf) == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -87,18 +105,18 @@ func decodeNode(buf []byte) (node, error) {
|
||||
}
|
||||
switch c, _ := rlp.CountValues(elems); c {
|
||||
case 2:
|
||||
n, err := decodeShort(elems)
|
||||
n, err := decodeShort(hash, buf, elems)
|
||||
return n, wrapError(err, "short")
|
||||
case 17:
|
||||
n, err := decodeFull(elems)
|
||||
n, err := decodeFull(hash, buf, elems)
|
||||
return n, wrapError(err, "full")
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid number of list elements: %v", c)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeShort(buf []byte) (node, error) {
|
||||
kbuf, rest, err := rlp.SplitString(buf)
|
||||
func decodeShort(hash, buf, elems []byte) (node, error) {
|
||||
kbuf, rest, err := rlp.SplitString(elems)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -109,30 +127,30 @@ func decodeShort(buf []byte) (node, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value node: %v", err)
|
||||
}
|
||||
return shortNode{key, valueNode(val)}, nil
|
||||
return shortNode{key, valueNode(val), hash, false}, nil
|
||||
}
|
||||
r, _, err := decodeRef(rest)
|
||||
if err != nil {
|
||||
return nil, wrapError(err, "val")
|
||||
}
|
||||
return shortNode{key, r}, nil
|
||||
return shortNode{key, r, hash, false}, nil
|
||||
}
|
||||
|
||||
func decodeFull(buf []byte) (fullNode, error) {
|
||||
var n fullNode
|
||||
func decodeFull(hash, buf, elems []byte) (fullNode, error) {
|
||||
n := fullNode{hash: hash}
|
||||
for i := 0; i < 16; i++ {
|
||||
cld, rest, err := decodeRef(buf)
|
||||
cld, rest, err := decodeRef(elems)
|
||||
if err != nil {
|
||||
return n, wrapError(err, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
n[i], buf = cld, rest
|
||||
n.Children[i], elems = cld, rest
|
||||
}
|
||||
val, _, err := rlp.SplitString(buf)
|
||||
val, _, err := rlp.SplitString(elems)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if len(val) > 0 {
|
||||
n[16] = valueNode(val)
|
||||
n.Children[16] = valueNode(val)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
@ -152,7 +170,7 @@ func decodeRef(buf []byte) (node, []byte, error) {
|
||||
err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen)
|
||||
return nil, buf, err
|
||||
}
|
||||
n, err := decodeNode(buf)
|
||||
n, err := decodeNode(nil, buf)
|
||||
return n, rest, err
|
||||
case kind == rlp.String && len(val) == 0:
|
||||
// empty node
|
||||
|
@ -54,7 +54,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue {
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
case fullNode:
|
||||
tn = n[key[0]]
|
||||
tn = n.Children[key[0]]
|
||||
key = key[1:]
|
||||
nodes = append(nodes, n)
|
||||
case hashNode:
|
||||
@ -77,7 +77,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue {
|
||||
for i, n := range nodes {
|
||||
// Don't bother checking for errors here since hasher panics
|
||||
// if encoding doesn't work and we're not writing to any database.
|
||||
n, _ = t.hasher.replaceChildren(n, nil)
|
||||
n, _, _ = t.hasher.hashChildren(n, nil)
|
||||
hn, _ := t.hasher.store(n, nil, false)
|
||||
if _, ok := hn.(hashNode); ok || i == 0 {
|
||||
// If the node's database encoding is a hash (or is the
|
||||
@ -103,7 +103,7 @@ func VerifyProof(rootHash common.Hash, key []byte, proof []rlp.RawValue) (value
|
||||
if !bytes.Equal(sha.Sum(nil), wantHash) {
|
||||
return nil, fmt.Errorf("bad proof node %d: hash mismatch", i)
|
||||
}
|
||||
n, err := decodeNode(buf)
|
||||
n, err := decodeNode(wantHash, buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad proof node %d: %v", i, err)
|
||||
}
|
||||
@ -139,7 +139,7 @@ func get(tn node, key []byte) ([]byte, node) {
|
||||
tn = n.Val
|
||||
key = key[len(n.Key):]
|
||||
case fullNode:
|
||||
tn = n[key[0]]
|
||||
tn = n.Children[key[0]]
|
||||
key = key[1:]
|
||||
case hashNode:
|
||||
return key, n
|
||||
|
@ -162,11 +162,11 @@ func (t *SecureTrie) CommitTo(db DatabaseWriter) (root common.Hash, err error) {
|
||||
}
|
||||
t.secKeyCache = make(map[string][]byte)
|
||||
}
|
||||
n, err := t.hashRoot(db)
|
||||
n, clean, err := t.hashRoot(db)
|
||||
if err != nil {
|
||||
return (common.Hash{}), err
|
||||
}
|
||||
t.root = n
|
||||
t.root = clean
|
||||
return common.BytesToHash(n.(hashNode)), nil
|
||||
}
|
||||
|
||||
|
20
trie/sync.go
20
trie/sync.go
@ -17,6 +17,7 @@
|
||||
package trie
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -24,6 +25,10 @@ import (
|
||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
||||
)
|
||||
|
||||
// ErrNotRequested is returned by the trie sync when it's requested to process a
|
||||
// node it did not request.
|
||||
var ErrNotRequested = errors.New("not requested")
|
||||
|
||||
// request represents a scheduled or already in-flight state retrieval request.
|
||||
type request struct {
|
||||
hash common.Hash // Hash of the node data content to retrieve
|
||||
@ -75,8 +80,9 @@ func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, c
|
||||
if root == emptyRoot {
|
||||
return
|
||||
}
|
||||
blob, _ := s.database.Get(root.Bytes())
|
||||
if local, err := decodeNode(blob); local != nil && err == nil {
|
||||
key := root.Bytes()
|
||||
blob, _ := s.database.Get(key)
|
||||
if local, err := decodeNode(key, blob); local != nil && err == nil {
|
||||
return
|
||||
}
|
||||
// Assemble the new sub-trie sync request
|
||||
@ -143,7 +149,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
|
||||
// If the item was not requested, bail out
|
||||
request := s.requests[item.Hash]
|
||||
if request == nil {
|
||||
return i, fmt.Errorf("not requested: %x", item.Hash)
|
||||
return i, ErrNotRequested
|
||||
}
|
||||
// If the item is a raw entry request, commit directly
|
||||
if request.object == nil {
|
||||
@ -152,7 +158,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
|
||||
continue
|
||||
}
|
||||
// Decode the node data content and update the request
|
||||
node, err := decodeNode(item.Data)
|
||||
node, err := decodeNode(item.Hash[:], item.Data)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
@ -213,9 +219,9 @@ func (s *TrieSync) children(req *request) ([]*request, error) {
|
||||
}}
|
||||
case fullNode:
|
||||
for i := 0; i < 17; i++ {
|
||||
if node[i] != nil {
|
||||
if node.Children[i] != nil {
|
||||
children = append(children, child{
|
||||
node: &node[i],
|
||||
node: &node.Children[i],
|
||||
depth: req.depth + 1,
|
||||
})
|
||||
}
|
||||
@ -238,7 +244,7 @@ func (s *TrieSync) children(req *request) ([]*request, error) {
|
||||
if node, ok := (*child.node).(hashNode); ok {
|
||||
// Try to resolve the node from the local database
|
||||
blob, _ := s.database.Get(node)
|
||||
if local, err := decodeNode(blob); local != nil && err == nil {
|
||||
if local, err := decodeNode(node[:], blob); local != nil && err == nil {
|
||||
*child.node = local
|
||||
continue
|
||||
}
|
||||
|
239
trie/trie.go
239
trie/trie.go
@ -129,7 +129,7 @@ func (t *Trie) TryGet(key []byte) ([]byte, error) {
|
||||
tn = n.Val
|
||||
pos += len(n.Key)
|
||||
case fullNode:
|
||||
tn = n[key[pos]]
|
||||
tn = n.Children[key[pos]]
|
||||
pos++
|
||||
case nil:
|
||||
return nil, nil
|
||||
@ -169,13 +169,13 @@ func (t *Trie) Update(key, value []byte) {
|
||||
func (t *Trie) TryUpdate(key, value []byte) error {
|
||||
k := compactHexDecode(key)
|
||||
if len(value) != 0 {
|
||||
n, err := t.insert(t.root, nil, k, valueNode(value))
|
||||
_, n, err := t.insert(t.root, nil, k, valueNode(value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.root = n
|
||||
} else {
|
||||
n, err := t.delete(t.root, nil, k)
|
||||
_, n, err := t.delete(t.root, nil, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -184,9 +184,12 @@ func (t *Trie) TryUpdate(key, value []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Trie) insert(n node, prefix, key []byte, value node) (node, error) {
|
||||
func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error) {
|
||||
if len(key) == 0 {
|
||||
return value, nil
|
||||
if v, ok := n.(valueNode); ok {
|
||||
return !bytes.Equal(v, value.(valueNode)), value, nil
|
||||
}
|
||||
return true, value, nil
|
||||
}
|
||||
switch n := n.(type) {
|
||||
case shortNode:
|
||||
@ -194,53 +197,63 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (node, error) {
|
||||
// If the whole key matches, keep this short node as is
|
||||
// and only update the value.
|
||||
if matchlen == len(n.Key) {
|
||||
nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value)
|
||||
dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
return shortNode{n.Key, nn}, nil
|
||||
if !dirty {
|
||||
return false, n, nil
|
||||
}
|
||||
return true, shortNode{n.Key, nn, nil, true}, nil
|
||||
}
|
||||
// Otherwise branch out at the index where they differ.
|
||||
var branch fullNode
|
||||
branch := fullNode{dirty: true}
|
||||
var err error
|
||||
branch[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val)
|
||||
_, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
branch[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value)
|
||||
_, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
// Replace this shortNode with the branch if it occurs at index 0.
|
||||
if matchlen == 0 {
|
||||
return branch, nil
|
||||
return true, branch, nil
|
||||
}
|
||||
// Otherwise, replace it with a short node leading up to the branch.
|
||||
return shortNode{key[:matchlen], branch}, nil
|
||||
return true, shortNode{key[:matchlen], branch, nil, true}, nil
|
||||
|
||||
case fullNode:
|
||||
nn, err := t.insert(n[key[0]], append(prefix, key[0]), key[1:], value)
|
||||
dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
n[key[0]] = nn
|
||||
return n, nil
|
||||
if !dirty {
|
||||
return false, n, nil
|
||||
}
|
||||
n.Children[key[0]], n.hash, n.dirty = nn, nil, true
|
||||
return true, n, nil
|
||||
|
||||
case nil:
|
||||
return shortNode{key, value}, nil
|
||||
return true, shortNode{key, value, nil, true}, nil
|
||||
|
||||
case hashNode:
|
||||
// We've hit a part of the trie that isn't loaded yet. Load
|
||||
// the node and insert into it. This leaves all child nodes on
|
||||
// the path to the value in the trie.
|
||||
//
|
||||
// TODO: track whether insertion changed the value and keep
|
||||
// n as a hash node if it didn't.
|
||||
rn, err := t.resolveHash(n, prefix, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
return t.insert(rn, prefix, key, value)
|
||||
dirty, nn, err := t.insert(rn, prefix, key, value)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if !dirty {
|
||||
return false, rn, nil
|
||||
}
|
||||
return true, nn, nil
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
|
||||
@ -258,7 +271,7 @@ func (t *Trie) Delete(key []byte) {
|
||||
// If a node was not found in the database, a MissingNodeError is returned.
|
||||
func (t *Trie) TryDelete(key []byte) error {
|
||||
k := compactHexDecode(key)
|
||||
n, err := t.delete(t.root, nil, k)
|
||||
_, n, err := t.delete(t.root, nil, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -269,23 +282,26 @@ func (t *Trie) TryDelete(key []byte) error {
|
||||
// delete returns the new root of the trie with key deleted.
|
||||
// It reduces the trie to minimal form by simplifying
|
||||
// nodes on the way up after deleting recursively.
|
||||
func (t *Trie) delete(n node, prefix, key []byte) (node, error) {
|
||||
func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
|
||||
switch n := n.(type) {
|
||||
case shortNode:
|
||||
matchlen := prefixLen(key, n.Key)
|
||||
if matchlen < len(n.Key) {
|
||||
return n, nil // don't replace n on mismatch
|
||||
return false, n, nil // don't replace n on mismatch
|
||||
}
|
||||
if matchlen == len(key) {
|
||||
return nil, nil // remove n entirely for whole matches
|
||||
return true, nil, nil // remove n entirely for whole matches
|
||||
}
|
||||
// The key is longer than n.Key. Remove the remaining suffix
|
||||
// from the subtrie. Child can never be nil here since the
|
||||
// subtrie must contain at least two other values with keys
|
||||
// longer than n.Key.
|
||||
child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):])
|
||||
dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
if !dirty {
|
||||
return false, n, nil
|
||||
}
|
||||
switch child := child.(type) {
|
||||
case shortNode:
|
||||
@ -295,17 +311,21 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) {
|
||||
// always creates a new slice) instead of append to
|
||||
// avoid modifying n.Key since it might be shared with
|
||||
// other nodes.
|
||||
return shortNode{concat(n.Key, child.Key...), child.Val}, nil
|
||||
return true, shortNode{concat(n.Key, child.Key...), child.Val, nil, true}, nil
|
||||
default:
|
||||
return shortNode{n.Key, child}, nil
|
||||
return true, shortNode{n.Key, child, nil, true}, nil
|
||||
}
|
||||
|
||||
case fullNode:
|
||||
nn, err := t.delete(n[key[0]], append(prefix, key[0]), key[1:])
|
||||
dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
n[key[0]] = nn
|
||||
if !dirty {
|
||||
return false, n, nil
|
||||
}
|
||||
n.Children[key[0]], n.hash, n.dirty = nn, nil, true
|
||||
|
||||
// Check how many non-nil entries are left after deleting and
|
||||
// reduce the full node to a short node if only one entry is
|
||||
// left. Since n must've contained at least two children
|
||||
@ -316,7 +336,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) {
|
||||
// value that is left in n or -2 if n contains at least two
|
||||
// values.
|
||||
pos := -1
|
||||
for i, cld := range n {
|
||||
for i, cld := range n.Children {
|
||||
if cld != nil {
|
||||
if pos == -1 {
|
||||
pos = i
|
||||
@ -334,37 +354,41 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) {
|
||||
// shortNode{..., shortNode{...}}. Since the entry
|
||||
// might not be loaded yet, resolve it just for this
|
||||
// check.
|
||||
cnode, err := t.resolve(n[pos], prefix, []byte{byte(pos)})
|
||||
cnode, err := t.resolve(n.Children[pos], prefix, []byte{byte(pos)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
if cnode, ok := cnode.(shortNode); ok {
|
||||
k := append([]byte{byte(pos)}, cnode.Key...)
|
||||
return shortNode{k, cnode.Val}, nil
|
||||
return true, shortNode{k, cnode.Val, nil, true}, nil
|
||||
}
|
||||
}
|
||||
// Otherwise, n is replaced by a one-nibble short node
|
||||
// containing the child.
|
||||
return shortNode{[]byte{byte(pos)}, n[pos]}, nil
|
||||
return true, shortNode{[]byte{byte(pos)}, n.Children[pos], nil, true}, nil
|
||||
}
|
||||
// n still contains at least two values and cannot be reduced.
|
||||
return n, nil
|
||||
return true, n, nil
|
||||
|
||||
case nil:
|
||||
return nil, nil
|
||||
return false, nil, nil
|
||||
|
||||
case hashNode:
|
||||
// We've hit a part of the trie that isn't loaded yet. Load
|
||||
// the node and delete from it. This leaves all child nodes on
|
||||
// the path to the value in the trie.
|
||||
//
|
||||
// TODO: track whether deletion actually hit a key and keep
|
||||
// n as a hash node if it didn't.
|
||||
rn, err := t.resolveHash(n, prefix, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil, err
|
||||
}
|
||||
return t.delete(rn, prefix, key)
|
||||
dirty, nn, err := t.delete(rn, prefix, key)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if !dirty {
|
||||
return false, rn, nil
|
||||
}
|
||||
return true, nn, nil
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("%T: invalid node: %v (%v)", n, n, key))
|
||||
@ -413,8 +437,9 @@ func (t *Trie) Root() []byte { return t.Hash().Bytes() }
|
||||
// Hash returns the root hash of the trie. It does not write to the
|
||||
// database and can be used even if the trie doesn't have one.
|
||||
func (t *Trie) Hash() common.Hash {
|
||||
root, _ := t.hashRoot(nil)
|
||||
return common.BytesToHash(root.(hashNode))
|
||||
hash, cached, _ := t.hashRoot(nil)
|
||||
t.root = cached
|
||||
return common.BytesToHash(hash.(hashNode))
|
||||
}
|
||||
|
||||
// Commit writes all nodes to the trie's database.
|
||||
@ -437,17 +462,17 @@ func (t *Trie) Commit() (root common.Hash, err error) {
|
||||
// the changes made to db are written back to the trie's attached
|
||||
// database before using the trie.
|
||||
func (t *Trie) CommitTo(db DatabaseWriter) (root common.Hash, err error) {
|
||||
n, err := t.hashRoot(db)
|
||||
hash, cached, err := t.hashRoot(db)
|
||||
if err != nil {
|
||||
return (common.Hash{}), err
|
||||
}
|
||||
t.root = n
|
||||
return common.BytesToHash(n.(hashNode)), nil
|
||||
t.root = cached
|
||||
return common.BytesToHash(hash.(hashNode)), nil
|
||||
}
|
||||
|
||||
func (t *Trie) hashRoot(db DatabaseWriter) (node, error) {
|
||||
func (t *Trie) hashRoot(db DatabaseWriter) (node, node, error) {
|
||||
if t.root == nil {
|
||||
return hashNode(emptyRoot.Bytes()), nil
|
||||
return hashNode(emptyRoot.Bytes()), nil, nil
|
||||
}
|
||||
if t.hasher == nil {
|
||||
t.hasher = newHasher()
|
||||
@ -464,51 +489,87 @@ func newHasher() *hasher {
|
||||
return &hasher{tmp: new(bytes.Buffer), sha: sha3.NewKeccak256()}
|
||||
}
|
||||
|
||||
func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, error) {
|
||||
hashed, err := h.replaceChildren(n, db)
|
||||
// hash collapses a node down into a hash node, also returning a copy of the
|
||||
// original node initialzied with the computed hash to replace the original one.
|
||||
func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) {
|
||||
// If we're not storing the node, just hashing, use avaialble cached data
|
||||
if hash, dirty := n.cache(); hash != nil && (db == nil || !dirty) {
|
||||
return hash, n, nil
|
||||
}
|
||||
// Trie not processed yet or needs storage, walk the children
|
||||
collapsed, cached, err := h.hashChildren(n, db)
|
||||
if err != nil {
|
||||
return hashNode{}, err
|
||||
return hashNode{}, n, err
|
||||
}
|
||||
if n, err = h.store(hashed, db, force); err != nil {
|
||||
return hashNode{}, err
|
||||
hashed, err := h.store(collapsed, db, force)
|
||||
if err != nil {
|
||||
return hashNode{}, n, err
|
||||
}
|
||||
return n, nil
|
||||
// Cache the hash and RLP blob of the ndoe for later reuse
|
||||
if hash, ok := hashed.(hashNode); ok && !force {
|
||||
switch cached := cached.(type) {
|
||||
case shortNode:
|
||||
cached.hash = hash
|
||||
if db != nil {
|
||||
cached.dirty = false
|
||||
}
|
||||
return hashed, cached, nil
|
||||
case fullNode:
|
||||
cached.hash = hash
|
||||
if db != nil {
|
||||
cached.dirty = false
|
||||
}
|
||||
return hashed, cached, nil
|
||||
}
|
||||
}
|
||||
return hashed, cached, nil
|
||||
}
|
||||
|
||||
// hashChildren replaces child nodes of n with their hashes if the encoded
|
||||
// size of the child is larger than a hash.
|
||||
func (h *hasher) replaceChildren(n node, db DatabaseWriter) (node, error) {
|
||||
// hashChildren replaces the children of a node with their hashes if the encoded
|
||||
// size of the child is larger than a hash, returning the collapsed node as well
|
||||
// as a replacement for the original node with the child hashes cached in.
|
||||
func (h *hasher) hashChildren(original node, db DatabaseWriter) (node, node, error) {
|
||||
var err error
|
||||
switch n := n.(type) {
|
||||
|
||||
switch n := original.(type) {
|
||||
case shortNode:
|
||||
// Hash the short node's child, caching the newly hashed subtree
|
||||
cached := n
|
||||
cached.Key = common.CopyBytes(cached.Key)
|
||||
|
||||
n.Key = compactEncode(n.Key)
|
||||
if _, ok := n.Val.(valueNode); !ok {
|
||||
if n.Val, err = h.hash(n.Val, db, false); err != nil {
|
||||
return n, err
|
||||
if n.Val, cached.Val, err = h.hash(n.Val, db, false); err != nil {
|
||||
return n, original, err
|
||||
}
|
||||
}
|
||||
if n.Val == nil {
|
||||
// Ensure that nil children are encoded as empty strings.
|
||||
n.Val = valueNode(nil)
|
||||
n.Val = valueNode(nil) // Ensure that nil children are encoded as empty strings.
|
||||
}
|
||||
return n, nil
|
||||
return n, cached, nil
|
||||
|
||||
case fullNode:
|
||||
// Hash the full node's children, caching the newly hashed subtrees
|
||||
cached := fullNode{dirty: n.dirty}
|
||||
|
||||
for i := 0; i < 16; i++ {
|
||||
if n[i] != nil {
|
||||
if n[i], err = h.hash(n[i], db, false); err != nil {
|
||||
return n, err
|
||||
if n.Children[i] != nil {
|
||||
if n.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, false); err != nil {
|
||||
return n, original, err
|
||||
}
|
||||
} else {
|
||||
// Ensure that nil children are encoded as empty strings.
|
||||
n[i] = valueNode(nil)
|
||||
n.Children[i] = valueNode(nil) // Ensure that nil children are encoded as empty strings.
|
||||
}
|
||||
}
|
||||
if n[16] == nil {
|
||||
n[16] = valueNode(nil)
|
||||
cached.Children[16] = n.Children[16]
|
||||
if n.Children[16] == nil {
|
||||
n.Children[16] = valueNode(nil)
|
||||
}
|
||||
return n, nil
|
||||
return n, cached, nil
|
||||
|
||||
default:
|
||||
return n, nil
|
||||
// Value and hash nodes don't have children so they're left as were
|
||||
return n, original, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -517,21 +578,23 @@ func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) {
|
||||
if _, isHash := n.(hashNode); n == nil || isHash {
|
||||
return n, nil
|
||||
}
|
||||
// Generate the RLP encoding of the node
|
||||
h.tmp.Reset()
|
||||
if err := rlp.Encode(h.tmp, n); err != nil {
|
||||
panic("encode error: " + err.Error())
|
||||
}
|
||||
if h.tmp.Len() < 32 && !force {
|
||||
// Nodes smaller than 32 bytes are stored inside their parent.
|
||||
return n, nil
|
||||
return n, nil // Nodes smaller than 32 bytes are stored inside their parent
|
||||
}
|
||||
// Larger nodes are replaced by their hash and stored in the database.
|
||||
h.sha.Reset()
|
||||
h.sha.Write(h.tmp.Bytes())
|
||||
key := hashNode(h.sha.Sum(nil))
|
||||
if db != nil {
|
||||
err := db.Put(key, h.tmp.Bytes())
|
||||
return key, err
|
||||
hash, _ := n.cache()
|
||||
if hash == nil {
|
||||
h.sha.Reset()
|
||||
h.sha.Write(h.tmp.Bytes())
|
||||
hash = hashNode(h.sha.Sum(nil))
|
||||
}
|
||||
return key, nil
|
||||
if db != nil {
|
||||
return hash, db.Put(hash, h.tmp.Bytes())
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
@ -295,7 +295,7 @@ func TestReplication(t *testing.T) {
|
||||
for _, val := range vals2 {
|
||||
updateString(trie2, val.k, val.v)
|
||||
}
|
||||
if trie2.Hash() != exp {
|
||||
if hash := trie2.Hash(); hash != exp {
|
||||
t.Errorf("root failure. expected %x got %x", exp, hash)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user