Compare commits
30 Commits
Author | SHA1 | Date | |
---|---|---|---|
a269a713d6 | |||
27df30f30f | |||
311f5a0ed1 | |||
68ae6b52e9 | |||
1776c717bf | |||
0f6e3e873a | |||
7a7a5acc9f | |||
66d74dfb75 | |||
b950a2977c | |||
8ea3c88e44 | |||
94ad694a26 | |||
4c6953606e | |||
fc0638f9d8 | |||
4b11f207cb | |||
7e5c49cafa | |||
efcfa2209b | |||
aa18aad7ad | |||
594328c112 | |||
2e6b9c141b | |||
b38cea6654 | |||
dd083aa34e | |||
f213a9d8e8 | |||
6826b2040f | |||
2a3657d8d9 | |||
566af6ef92 | |||
290e851f57 | |||
8f96d66241 | |||
4b9de75623 | |||
d52a693f80 | |||
8241fa5227 |
@ -82,3 +82,5 @@ included in our repository in the `COPYING.LESSER` file.
|
||||
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the
|
||||
[GNU General Public License v3.0](http://www.gnu.org/licenses/gpl-3.0.en.html), also included
|
||||
in our repository in the `COPYING` file.
|
||||
|
||||
|
||||
|
@ -238,8 +238,16 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(v).Elem()
|
||||
typ := value.Type()
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
if len(method.Outputs) > 1 {
|
||||
switch value.Kind() {
|
||||
@ -268,6 +276,25 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
|
||||
}
|
||||
|
||||
// if the slice already contains values, set those instead of the interface slice itself.
|
||||
if value.Len() > 0 {
|
||||
if len(method.Outputs) > value.Len() {
|
||||
return fmt.Errorf("abi: cannot marshal in to slices of unequal size (require: %v, got: %v)", len(method.Outputs), value.Len())
|
||||
}
|
||||
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(value.Index(i).Elem(), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a new slice and start appending the unmarshalled
|
||||
// values to the new interface slice.
|
||||
z := reflect.MakeSlice(typ, 0, len(method.Outputs))
|
||||
@ -296,34 +323,6 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// set attempts to assign src to dst by either setting, copying or otherwise.
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if !dstType.Elem().AssignableTo(r_byte) {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
|
||||
}
|
||||
|
||||
if dst.Len() < output.Type.SliceSize {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
dst.Set(src)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
|
@ -289,6 +289,37 @@ func TestSimpleMethodUnpack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
@ -27,15 +27,16 @@ import (
|
||||
// ErrNoCode is returned by call and transact operations for which the requested
|
||||
// recipient contract to operate on does not exist in the state db or does not
|
||||
// have any code associated with it (i.e. suicided).
|
||||
//
|
||||
// Please note, this error string is part of the RPC API and is expected by the
|
||||
// native contract bindings to signal this particular error. Do not change this
|
||||
// as it will break all dependent code!
|
||||
var ErrNoCode = errors.New("no contract code at given address")
|
||||
|
||||
// ContractCaller defines the methods needed to allow operating with contract on a read
|
||||
// only basis.
|
||||
type ContractCaller interface {
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// ContractCall executes an Ethereum contract call with the specified data as
|
||||
// the input. The pending flag requests execution against the pending block, not
|
||||
// the stable head of the chain.
|
||||
@ -55,6 +56,11 @@ type ContractTransactor interface {
|
||||
// execution of a transaction.
|
||||
SuggestGasPrice() (*big.Int, error)
|
||||
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// EstimateGasLimit tries to estimate the gas needed to execute a specific
|
||||
// transaction based on the current pending state of the backend blockchain.
|
||||
// There is no guarantee that this is the true gas limit requirement as other
|
||||
@ -68,7 +74,38 @@ type ContractTransactor interface {
|
||||
|
||||
// ContractBackend defines the methods needed to allow operating with contract
|
||||
// on a read-write basis.
|
||||
//
|
||||
// This interface is essentially the union of ContractCaller and ContractTransactor
|
||||
// but due to a bug in the Go compiler (https://github.com/golang/go/issues/6977),
|
||||
// we cannot simply list it as the two interfaces. The other solution is to add a
|
||||
// third interface containing the common methods, but that convolutes the user API
|
||||
// as it introduces yet another parameter to require for initialization.
|
||||
type ContractBackend interface {
|
||||
ContractCaller
|
||||
ContractTransactor
|
||||
// HasCode checks if the contract at the given address has any code associated
|
||||
// with it or not. This is needed to differentiate between contract internal
|
||||
// errors and the local chain being out of sync.
|
||||
HasCode(contract common.Address, pending bool) (bool, error)
|
||||
|
||||
// ContractCall executes an Ethereum contract call with the specified data as
|
||||
// the input. The pending flag requests execution against the pending block, not
|
||||
// the stable head of the chain.
|
||||
ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error)
|
||||
|
||||
// PendingAccountNonce retrieves the current pending nonce associated with an
|
||||
// account.
|
||||
PendingAccountNonce(account common.Address) (uint64, error)
|
||||
|
||||
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
|
||||
// execution of a transaction.
|
||||
SuggestGasPrice() (*big.Int, error)
|
||||
|
||||
// EstimateGasLimit tries to estimate the gas needed to execute a specific
|
||||
// transaction based on the current pending state of the backend blockchain.
|
||||
// There is no guarantee that this is the true gas limit requirement as other
|
||||
// transactions may be added or removed by miners, but it should provide a basis
|
||||
// for setting a reasonable default.
|
||||
EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error)
|
||||
|
||||
// SendTransaction injects the transaction into the pending pool for execution.
|
||||
SendTransaction(tx *types.Transaction) error
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ func (*nilBackend) ContractCall(common.Address, []byte, bool) ([]byte, error) {
|
||||
func (*nilBackend) EstimateGasLimit(common.Address, *common.Address, *big.Int, []byte) (*big.Int, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
func (*nilBackend) HasCode(common.Address, bool) (bool, error) { panic("not implemented") }
|
||||
func (*nilBackend) SuggestGasPrice() (*big.Int, error) { panic("not implemented") }
|
||||
func (*nilBackend) PendingAccountNonce(common.Address) (uint64, error) { panic("not implemented") }
|
||||
func (*nilBackend) SendTransaction(*types.Transaction) error { panic("not implemented") }
|
||||
|
@ -111,6 +111,26 @@ func (b *rpcBackend) request(method string, params []interface{}) (json.RawMessa
|
||||
return res.Result, nil
|
||||
}
|
||||
|
||||
// HasCode implements ContractVerifier.HasCode by retrieving any code associated
|
||||
// with the contract from the remote node, and checking its size.
|
||||
func (b *rpcBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
// Execute the RPC code retrieval
|
||||
block := "latest"
|
||||
if pending {
|
||||
block = "pending"
|
||||
}
|
||||
res, err := b.request("eth_getCode", []interface{}{contract.Hex(), block})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var hex string
|
||||
if err := json.Unmarshal(res, &hex); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Convert the response back to a Go byte slice and return
|
||||
return len(common.FromHex(hex)) > 0, nil
|
||||
}
|
||||
|
||||
// ContractCall implements ContractCaller.ContractCall, delegating the execution of
|
||||
// a contract call to the remote node, returning the reply to for local processing.
|
||||
func (b *rpcBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) {
|
||||
|
@ -78,6 +78,16 @@ func (b *SimulatedBackend) Rollback() {
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||
}
|
||||
|
||||
// HasCode implements ContractVerifier.HasCode, checking whether there is any
|
||||
// code associated with a certain account in the blockchain.
|
||||
func (b *SimulatedBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
if pending {
|
||||
return len(b.pendingState.GetCode(contract)) > 0, nil
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
return len(statedb.GetCode(contract)) > 0, nil
|
||||
}
|
||||
|
||||
// ContractCall implements ContractCaller.ContractCall, executing the specified
|
||||
// contract with the given input data.
|
||||
func (b *SimulatedBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) {
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -56,6 +57,9 @@ type BoundContract struct {
|
||||
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
|
||||
caller ContractCaller // Read interface to interact with the blockchain
|
||||
transactor ContractTransactor // Write interface to interact with the blockchain
|
||||
|
||||
latestHasCode uint32 // Cached verification that the latest state contains code for this contract
|
||||
pendingHasCode uint32 // Cached verification that the pending state contains code for this contract
|
||||
}
|
||||
|
||||
// NewBoundContract creates a low level contract interface through which calls
|
||||
@ -96,6 +100,19 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
|
||||
if opts == nil {
|
||||
opts = new(CallOpts)
|
||||
}
|
||||
// Make sure we have a contract to operate on, and bail out otherwise
|
||||
if (opts.Pending && atomic.LoadUint32(&c.pendingHasCode) == 0) || (!opts.Pending && atomic.LoadUint32(&c.latestHasCode) == 0) {
|
||||
if code, err := c.caller.HasCode(c.address, opts.Pending); err != nil {
|
||||
return err
|
||||
} else if !code {
|
||||
return ErrNoCode
|
||||
}
|
||||
if opts.Pending {
|
||||
atomic.StoreUint32(&c.pendingHasCode, 1)
|
||||
} else {
|
||||
atomic.StoreUint32(&c.latestHasCode, 1)
|
||||
}
|
||||
}
|
||||
// Pack the input, call and unpack the results
|
||||
input, err := c.abi.Pack(method, params...)
|
||||
if err != nil {
|
||||
@ -153,6 +170,16 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
}
|
||||
gasLimit := opts.GasLimit
|
||||
if gasLimit == nil {
|
||||
// Gas estimation cannot succeed without code for method invocations
|
||||
if contract != nil && atomic.LoadUint32(&c.pendingHasCode) == 0 {
|
||||
if code, err := c.transactor.HasCode(c.address, true); err != nil {
|
||||
return nil, err
|
||||
} else if !code {
|
||||
return nil, ErrNoCode
|
||||
}
|
||||
atomic.StoreUint32(&c.pendingHasCode, 1)
|
||||
}
|
||||
// If the contract surely has code (or code is not needed), estimate the transaction
|
||||
gasLimit, err = c.transactor.EstimateGasLimit(opts.From, contract, value, input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to exstimate gas needed: %v", err)
|
||||
|
@ -194,12 +194,44 @@ var bindTests = []struct {
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that plain values can be properly returned and deserialized
|
||||
{
|
||||
`Getter`,
|
||||
`
|
||||
contract Getter {
|
||||
function getter() constant returns (string, int, bytes32) {
|
||||
return ("Hi", 1, sha3(""));
|
||||
}
|
||||
}
|
||||
`,
|
||||
`606060405260dc8060106000396000f3606060405260e060020a6000350463993a04b78114601a575b005b600060605260c0604052600260809081527f486900000000000000000000000000000000000000000000000000000000000060a05260017fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060e0829052610100819052606060c0908152600261012081905281906101409060a09080838184600060046012f1505081517fffff000000000000000000000000000000000000000000000000000000000000169091525050604051610160819003945092505050f3`,
|
||||
`[{"constant":true,"inputs":[],"name":"getter","outputs":[{"name":"","type":"string"},{"name":"","type":"int256"},{"name":"","type":"bytes32"}],"type":"function"}]`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
|
||||
|
||||
// Deploy a tuple tester contract and execute a structured call on it
|
||||
_, _, getter, err := DeployGetter(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy getter contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
if str, num, _, err := getter.Getter(nil); err != nil {
|
||||
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
||||
} else if str != "Hi" || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", str, num, "Hi", 1)
|
||||
}
|
||||
`,
|
||||
},
|
||||
// Tests that tuples can be properly returned and deserialized
|
||||
{
|
||||
`Tupler`,
|
||||
`
|
||||
contract Tupler {
|
||||
function tuple() returns (string a, int b, bytes32 c) {
|
||||
function tuple() constant returns (string a, int b, bytes32 c) {
|
||||
return ("Hi", 1, sha3(""));
|
||||
}
|
||||
}
|
||||
@ -219,8 +251,10 @@ var bindTests = []struct {
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
if _, err := tupler.Tuple(nil); err != nil {
|
||||
if res, err := tupler.Tuple(nil); err != nil {
|
||||
t.Fatalf("Failed to call structure retriever: %v", err)
|
||||
} else if res.A != "Hi" || res.B.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", res.A, res.B, "Hi", 1)
|
||||
}
|
||||
`,
|
||||
},
|
||||
|
@ -211,7 +211,7 @@ package {{.Package}}
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}} = new({{bindtype .Type}})
|
||||
{{end}}
|
||||
){{end}}
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}[]interface{}{
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}&[]interface{}{
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}},
|
||||
{{end}}
|
||||
}{{end}}{{end}}
|
||||
|
@ -16,7 +16,10 @@
|
||||
|
||||
package abi
|
||||
|
||||
import "reflect"
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
// or finds a big.Int
|
||||
@ -62,3 +65,33 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
||||
reflect.Copy(slice, value)
|
||||
return slice
|
||||
}
|
||||
|
||||
// set attempts to assign src to dst by either setting, copying or otherwise.
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if !dstType.Elem().AssignableTo(r_byte) {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
|
||||
}
|
||||
|
||||
if dst.Len() < output.Type.SliceSize {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Ptr:
|
||||
return set(dst.Elem(), src, output)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -147,9 +147,21 @@ func (am *Manager) Sign(addr common.Address, hash []byte) (signature []byte, err
|
||||
return crypto.Sign(hash, unlockedKey.PrivateKey)
|
||||
}
|
||||
|
||||
// SignWithPassphrase signs hash if the private key matching the given address can be
|
||||
// decrypted with the given passphrase.
|
||||
func (am *Manager) SignWithPassphrase(addr common.Address, passphrase string, hash []byte) (signature []byte, err error) {
|
||||
_, key, err := am.getDecryptedKey(Account{Address: addr}, passphrase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer zeroKey(key.PrivateKey)
|
||||
return crypto.Sign(hash, key.PrivateKey)
|
||||
}
|
||||
|
||||
// Unlock unlocks the given account indefinitely.
|
||||
func (am *Manager) Unlock(a Account, keyAuth string) error {
|
||||
return am.TimedUnlock(a, keyAuth, 0)
|
||||
func (am *Manager) Unlock(a Account, passphrase string) error {
|
||||
return am.TimedUnlock(a, passphrase, 0)
|
||||
}
|
||||
|
||||
// Lock removes the private key with the given address from memory.
|
||||
|
@ -81,6 +81,34 @@ func TestSign(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignWithPassphrase(t *testing.T) {
|
||||
dir, am := tmpManager(t, true)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
pass := "passwd"
|
||||
acc, err := am.NewAccount(pass)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, unlocked := am.unlocked[acc.Address]; unlocked {
|
||||
t.Fatal("expected account to be locked")
|
||||
}
|
||||
|
||||
_, err = am.SignWithPassphrase(acc.Address, pass, testSigData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, unlocked := am.unlocked[acc.Address]; unlocked {
|
||||
t.Fatal("expected account to be locked")
|
||||
}
|
||||
|
||||
if _, err = am.SignWithPassphrase(acc.Address, "invalid passwd", testSigData); err == nil {
|
||||
t.Fatal("expected SignHash to fail with invalid password")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimedUnlock(t *testing.T) {
|
||||
dir, am := tmpManager(t, true)
|
||||
defer os.RemoveAll(dir)
|
||||
|
@ -41,8 +41,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
passwordRegexp = regexp.MustCompile("personal.[nu]")
|
||||
leadingSpace = regexp.MustCompile("^ ")
|
||||
passwordRegexp = regexp.MustCompile("personal.[nus]")
|
||||
onlyws = regexp.MustCompile("^\\s*$")
|
||||
exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$")
|
||||
)
|
||||
@ -361,7 +360,7 @@ func (self *jsre) interactive() {
|
||||
str += input + "\n"
|
||||
self.setIndent()
|
||||
if indentCount <= 0 {
|
||||
if mustLogInHistory(str) {
|
||||
if !excludeFromHistory(str) {
|
||||
utils.Stdin.AppendHistory(str[:len(str)-1])
|
||||
}
|
||||
self.parseInput(str)
|
||||
@ -371,10 +370,8 @@ func (self *jsre) interactive() {
|
||||
}
|
||||
}
|
||||
|
||||
func mustLogInHistory(input string) bool {
|
||||
return len(input) == 0 ||
|
||||
passwordRegexp.MatchString(input) ||
|
||||
!leadingSpace.MatchString(input)
|
||||
func excludeFromHistory(input string) bool {
|
||||
return len(input) == 0 || input[0] == ' ' || passwordRegexp.MatchString(input)
|
||||
}
|
||||
|
||||
func (self *jsre) withHistory(datadir string, op func(*os.File)) {
|
||||
|
@ -47,11 +47,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
clientIdentifier = "Geth" // Client identifier to advertise over the network
|
||||
versionMajor = 1 // Major version component of the current release
|
||||
versionMinor = 5 // Minor version component of the current release
|
||||
versionPatch = 0 // Patch version component of the current release
|
||||
versionMeta = "unstable" // Version metadata to append to the version string
|
||||
clientIdentifier = "Geth" // Client identifier to advertise over the network
|
||||
versionMajor = 1 // Major version component of the current release
|
||||
versionMinor = 4 // Minor version component of the current release
|
||||
versionPatch = 5 // Patch version component of the current release
|
||||
versionMeta = "stable" // Version metadata to append to the version string
|
||||
|
||||
versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle
|
||||
)
|
||||
|
@ -292,7 +292,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
|
||||
// minimum difficulty can ever be (before exponential factor)
|
||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||
x = params.MinimumDifficulty
|
||||
x.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
// for the exponential factor
|
||||
@ -325,7 +325,7 @@ func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *b
|
||||
diff.Sub(parentDiff, adjust)
|
||||
}
|
||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||
diff = params.MinimumDifficulty
|
||||
diff.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||
|
197
eth/api.go
197
eth/api.go
@ -52,15 +52,6 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// errNoCode is returned by call and transact operations for which the requested
|
||||
// recipient contract to operate on does not exist in the state db or does not
|
||||
// have any code associated with it (i.e. suicided).
|
||||
//
|
||||
// Please note, this error string is part of the RPC API and is expected by the
|
||||
// native contract bindings to signal this particular error. Do not change this
|
||||
// as it will break all dependent code!
|
||||
var errNoCode = errors.New("no contract code at given address")
|
||||
|
||||
const defaultGas = uint64(90000)
|
||||
|
||||
// blockByNumber is a commonly used helper function which retrieves and returns
|
||||
@ -148,7 +139,7 @@ func (s *PublicEthereumAPI) Etherbase() (common.Address, error) {
|
||||
return s.e.Etherbase()
|
||||
}
|
||||
|
||||
// see Etherbase
|
||||
// Coinbase is the address that mining rewards will be send to (alias for Etherbase)
|
||||
func (s *PublicEthereumAPI) Coinbase() (common.Address, error) {
|
||||
return s.Etherbase()
|
||||
}
|
||||
@ -217,18 +208,17 @@ func (s *PublicMinerAPI) SubmitWork(nonce rpc.HexNumber, solution, digest common
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
func (s *PublicMinerAPI) GetWork() ([]string, error) {
|
||||
func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
|
||||
if !s.e.IsMining() {
|
||||
if err := s.e.StartMining(0, ""); err != nil {
|
||||
return nil, err
|
||||
return work, err
|
||||
}
|
||||
}
|
||||
if work, err := s.agent.GetWork(); err == nil {
|
||||
return work[:], nil
|
||||
} else {
|
||||
glog.Infof("%v\n", err)
|
||||
if work, err = s.agent.GetWork(); err == nil {
|
||||
return
|
||||
}
|
||||
return nil, fmt.Errorf("mining not ready")
|
||||
glog.V(logger.Debug).Infof("%v", err)
|
||||
return work, fmt.Errorf("mining not ready")
|
||||
}
|
||||
|
||||
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
|
||||
@ -423,14 +413,23 @@ func (s *PublicAccountAPI) Accounts() []accounts.Account {
|
||||
}
|
||||
|
||||
// PrivateAccountAPI provides an API to access accounts managed by this node.
|
||||
// It offers methods to create, (un)lock en list accounts.
|
||||
// It offers methods to create, (un)lock en list accounts. Some methods accept
|
||||
// passwords and are therefore considered private by default.
|
||||
type PrivateAccountAPI struct {
|
||||
am *accounts.Manager
|
||||
am *accounts.Manager
|
||||
txPool *core.TxPool
|
||||
txMu *sync.Mutex
|
||||
gpo *GasPriceOracle
|
||||
}
|
||||
|
||||
// NewPrivateAccountAPI create a new PrivateAccountAPI.
|
||||
func NewPrivateAccountAPI(am *accounts.Manager) *PrivateAccountAPI {
|
||||
return &PrivateAccountAPI{am}
|
||||
func NewPrivateAccountAPI(e *Ethereum) *PrivateAccountAPI {
|
||||
return &PrivateAccountAPI{
|
||||
am: e.accountManager,
|
||||
txPool: e.txPool,
|
||||
txMu: &e.txMu,
|
||||
gpo: e.gpo,
|
||||
}
|
||||
}
|
||||
|
||||
// ListAccounts will return a list of addresses for accounts this node manages.
|
||||
@ -452,6 +451,8 @@ func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error)
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
// ImportRawKey stores the given hex encoded ECDSA key into the key directory,
|
||||
// encrypting it with the passphrase.
|
||||
func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) {
|
||||
hexkey, err := hex.DecodeString(privkey)
|
||||
if err != nil {
|
||||
@ -482,6 +483,34 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool {
|
||||
return s.am.Lock(addr) == nil
|
||||
}
|
||||
|
||||
// SignAndSendTransaction will create a transaction from the given arguments and
|
||||
// tries to sign it with the key associated with args.To. If the given passwd isn't
|
||||
// able to decrypt the key it fails.
|
||||
func (s *PrivateAccountAPI) SignAndSendTransaction(args SendTxArgs, passwd string) (common.Hash, error) {
|
||||
args = prepareSendTxArgs(args, s.gpo)
|
||||
|
||||
s.txMu.Lock()
|
||||
defer s.txMu.Unlock()
|
||||
|
||||
if args.Nonce == nil {
|
||||
args.Nonce = rpc.NewHexNumber(s.txPool.State().GetNonce(args.From))
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
}
|
||||
|
||||
signature, err := s.am.SignWithPassphrase(args.From, passwd, tx.SigHash().Bytes())
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
return submitTransaction(s.txPool, tx, signature)
|
||||
}
|
||||
|
||||
// PublicBlockChainAPI provides an API to access the Ethereum blockchain.
|
||||
// It offers only methods that operate on public data that is freely available to anyone.
|
||||
type PublicBlockChainAPI struct {
|
||||
@ -645,15 +674,14 @@ func (s *PublicBlockChainAPI) NewBlocks(ctx context.Context, args NewBlocksArgs)
|
||||
// add a callback that is called on chain events which will format the block and notify the client
|
||||
s.muNewBlockSubscriptions.Lock()
|
||||
s.newBlockSubscriptions[subscription.ID()] = func(e core.ChainEvent) error {
|
||||
if notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails); err == nil {
|
||||
notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails)
|
||||
if err == nil {
|
||||
return subscription.Notify(notification)
|
||||
} else {
|
||||
glog.V(logger.Warn).Info("unable to format block %v\n", err)
|
||||
}
|
||||
glog.V(logger.Warn).Info("unable to format block %v\n", err)
|
||||
return nil
|
||||
}
|
||||
s.muNewBlockSubscriptions.Unlock()
|
||||
|
||||
return subscription, nil
|
||||
}
|
||||
|
||||
@ -700,6 +728,7 @@ func (m callmsg) Gas() *big.Int { return m.gas }
|
||||
func (m callmsg) Value() *big.Int { return m.value }
|
||||
func (m callmsg) Data() []byte { return m.data }
|
||||
|
||||
// CallArgs represents the arguments for a call.
|
||||
type CallArgs struct {
|
||||
From common.Address `json:"from"`
|
||||
To *common.Address `json:"to"`
|
||||
@ -717,12 +746,6 @@ func (s *PublicBlockChainAPI) doCall(args CallArgs, blockNr rpc.BlockNumber) (st
|
||||
}
|
||||
stateDb = stateDb.Copy()
|
||||
|
||||
// If there's no code to interact with, respond with an appropriate error
|
||||
if args.To != nil {
|
||||
if code := stateDb.GetCode(*args.To); len(code) == 0 {
|
||||
return "0x", nil, errNoCode
|
||||
}
|
||||
}
|
||||
// Retrieve the account state object to interact with
|
||||
var from *state.StateObject
|
||||
if args.From == (common.Address{}) {
|
||||
@ -911,7 +934,7 @@ type PublicTransactionPoolAPI struct {
|
||||
miner *miner.Miner
|
||||
am *accounts.Manager
|
||||
txPool *core.TxPool
|
||||
txMu sync.Mutex
|
||||
txMu *sync.Mutex
|
||||
muPendingTxSubs sync.Mutex
|
||||
pendingTxSubs map[string]rpc.Subscription
|
||||
}
|
||||
@ -925,6 +948,7 @@ func NewPublicTransactionPoolAPI(e *Ethereum) *PublicTransactionPoolAPI {
|
||||
bc: e.blockchain,
|
||||
am: e.accountManager,
|
||||
txPool: e.txPool,
|
||||
txMu: &e.txMu,
|
||||
miner: e.miner,
|
||||
pendingTxSubs: make(map[string]rpc.Subscription),
|
||||
}
|
||||
@ -1124,6 +1148,7 @@ func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transacti
|
||||
return tx.WithSignature(signature)
|
||||
}
|
||||
|
||||
// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool.
|
||||
type SendTxArgs struct {
|
||||
From common.Address `json:"from"`
|
||||
To *common.Address `json:"to"`
|
||||
@ -1134,18 +1159,47 @@ type SendTxArgs struct {
|
||||
Nonce *rpc.HexNumber `json:"nonce"`
|
||||
}
|
||||
|
||||
// SendTransaction will create a transaction for the given transaction argument, sign it and submit it to the
|
||||
// transaction pool.
|
||||
func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash, error) {
|
||||
// prepareSendTxArgs is a helper function that fills in default values for unspecified tx fields.
|
||||
func prepareSendTxArgs(args SendTxArgs, gpo *GasPriceOracle) SendTxArgs {
|
||||
if args.Gas == nil {
|
||||
args.Gas = rpc.NewHexNumber(defaultGas)
|
||||
}
|
||||
if args.GasPrice == nil {
|
||||
args.GasPrice = rpc.NewHexNumber(s.gpo.SuggestPrice())
|
||||
args.GasPrice = rpc.NewHexNumber(gpo.SuggestPrice())
|
||||
}
|
||||
if args.Value == nil {
|
||||
args.Value = rpc.NewHexNumber(0)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// submitTransaction is a helper function that submits tx to txPool and creates a log entry.
|
||||
func submitTransaction(txPool *core.TxPool, tx *types.Transaction, signature []byte) (common.Hash, error) {
|
||||
signedTx, err := tx.WithSignature(signature)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
txPool.SetLocal(signedTx)
|
||||
if err := txPool.Add(signedTx); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
if signedTx.To() == nil {
|
||||
from, _ := signedTx.From()
|
||||
addr := crypto.CreateAddress(from, signedTx.Nonce())
|
||||
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex())
|
||||
} else {
|
||||
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex())
|
||||
}
|
||||
|
||||
return signedTx.Hash(), nil
|
||||
}
|
||||
|
||||
// SendTransaction creates a transaction for the given argument, sign it and submit it to the
|
||||
// transaction pool.
|
||||
func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash, error) {
|
||||
args = prepareSendTxArgs(args, s.gpo)
|
||||
|
||||
s.txMu.Lock()
|
||||
defer s.txMu.Unlock()
|
||||
@ -1155,32 +1209,18 @@ func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
contractCreation := (args.To == nil)
|
||||
|
||||
if contractCreation {
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
}
|
||||
|
||||
signedTx, err := s.sign(args.From, tx)
|
||||
signature, err := s.am.Sign(args.From, tx.SigHash().Bytes())
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
s.txPool.SetLocal(signedTx)
|
||||
if err := s.txPool.Add(signedTx); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
if contractCreation {
|
||||
addr := crypto.CreateAddress(args.From, args.Nonce.Uint64())
|
||||
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex())
|
||||
} else {
|
||||
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex())
|
||||
}
|
||||
|
||||
return signedTx.Hash(), nil
|
||||
return submitTransaction(s.txPool, tx, signature)
|
||||
}
|
||||
|
||||
// SendRawTransaction will add the signed transaction to the transaction pool.
|
||||
@ -1217,6 +1257,7 @@ func (s *PublicTransactionPoolAPI) Sign(addr common.Address, hash common.Hash) (
|
||||
return common.ToHex(signature), error
|
||||
}
|
||||
|
||||
// SignTransactionArgs represents the arguments to sign a transaction.
|
||||
type SignTransactionArgs struct {
|
||||
From common.Address
|
||||
To *common.Address
|
||||
@ -1243,6 +1284,7 @@ type Tx struct {
|
||||
Hash common.Hash `json:"hash"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses JSON data into tx.
|
||||
func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
req := struct {
|
||||
To *common.Address `json:"to"`
|
||||
@ -1283,8 +1325,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
tx.GasPrice = rpc.NewHexNumber(int64(50000000000))
|
||||
}
|
||||
|
||||
contractCreation := (req.To == nil)
|
||||
if contractCreation {
|
||||
if req.To == nil {
|
||||
tx.tx = types.NewContractCreation(tx.Nonce.Uint64(), tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data)
|
||||
} else {
|
||||
tx.tx = types.NewTransaction(tx.Nonce.Uint64(), *tx.To, tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data)
|
||||
@ -1293,6 +1334,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignTransactionResult represents a RLP encoded signed transaction.
|
||||
type SignTransactionResult struct {
|
||||
Raw string `json:"raw"`
|
||||
Tx *Tx `json:"tx"`
|
||||
@ -1335,9 +1377,7 @@ func (s *PublicTransactionPoolAPI) SignTransaction(args SignTransactionArgs) (*S
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
contractCreation := (args.To == nil)
|
||||
|
||||
if contractCreation {
|
||||
if args.To == nil {
|
||||
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
} else {
|
||||
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
|
||||
@ -1353,14 +1393,14 @@ func (s *PublicTransactionPoolAPI) SignTransaction(args SignTransactionArgs) (*S
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(tx)}, nil
|
||||
return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(signedTx)}, nil
|
||||
}
|
||||
|
||||
// PendingTransactions returns the transactions that are in the transaction pool and have a from address that is one of
|
||||
// the accounts this node manages.
|
||||
func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
|
||||
pending := s.txPool.GetTransactions()
|
||||
transactions := make([]*RPCTransaction, 0)
|
||||
transactions := make([]*RPCTransaction, 0, len(pending))
|
||||
for _, tx := range pending {
|
||||
from, _ := tx.FromFrontier()
|
||||
if s.am.HasAddress(from) {
|
||||
@ -1370,7 +1410,7 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
|
||||
return transactions
|
||||
}
|
||||
|
||||
// NewPendingTransaction creates a subscription that is triggered each time a transaction enters the transaction pool
|
||||
// NewPendingTransactions creates a subscription that is triggered each time a transaction enters the transaction pool
|
||||
// and is send from one of the transactions this nodes manages.
|
||||
func (s *PublicTransactionPoolAPI) NewPendingTransactions(ctx context.Context) (rpc.Subscription, error) {
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
@ -1410,8 +1450,7 @@ func (s *PublicTransactionPoolAPI) Resend(tx Tx, gasPrice, gasLimit *rpc.HexNumb
|
||||
}
|
||||
|
||||
var newTx *types.Transaction
|
||||
contractCreation := (tx.tx.To() == nil)
|
||||
if contractCreation {
|
||||
if tx.tx.To() == nil {
|
||||
newTx = types.NewContractCreation(tx.tx.Nonce(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data())
|
||||
} else {
|
||||
newTx = types.NewTransaction(tx.tx.Nonce(), *tx.tx.To(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data())
|
||||
@ -1612,7 +1651,7 @@ func (api *PrivateDebugAPI) ChaindbProperty(property string) (string, error) {
|
||||
return ldb.LDB().GetProperty(property)
|
||||
}
|
||||
|
||||
// BlockTraceResults is the returned value when replaying a block to check for
|
||||
// BlockTraceResult is the returned value when replaying a block to check for
|
||||
// consensus results and full VM trace logs for all included transactions.
|
||||
type BlockTraceResult struct {
|
||||
Validated bool `json:"validated"`
|
||||
@ -1647,7 +1686,7 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.Config) B
|
||||
return api.TraceBlock(blockRlp, config)
|
||||
}
|
||||
|
||||
// TraceProcessBlock processes the block by canonical block number.
|
||||
// TraceBlockByNumber processes the block by canonical block number.
|
||||
func (api *PrivateDebugAPI) TraceBlockByNumber(number uint64, config *vm.Config) BlockTraceResult {
|
||||
// Fetch the block that we aim to reprocess
|
||||
block := api.eth.BlockChain().GetBlockByNumber(number)
|
||||
@ -1752,15 +1791,6 @@ type structLogRes struct {
|
||||
Storage map[string]string `json:"storage"`
|
||||
}
|
||||
|
||||
// VmLoggerOptions are the options used for debugging transactions and capturing
|
||||
// specific data.
|
||||
type VmLoggerOptions struct {
|
||||
DisableMemory bool // disable memory capture
|
||||
DisableStack bool // disable stack capture
|
||||
DisableStorage bool // disable storage capture
|
||||
FullStorage bool // show full storage (slow)
|
||||
}
|
||||
|
||||
// formatLogs formats EVM returned structured logs for json output
|
||||
func formatLogs(structLogs []vm.StructLog) []structLogRes {
|
||||
formattedStructLogs := make([]structLogRes, len(structLogs))
|
||||
@ -1802,25 +1832,25 @@ func formatError(err error) string {
|
||||
|
||||
// TraceTransaction returns the structured logs created during the execution of EVM
|
||||
// and returns them as a JSON object.
|
||||
func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ExecutionResult, error) {
|
||||
func (api *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ExecutionResult, error) {
|
||||
if logger == nil {
|
||||
logger = new(vm.LogConfig)
|
||||
}
|
||||
// Retrieve the tx from the chain and the containing block
|
||||
tx, blockHash, _, txIndex := core.GetTransaction(s.eth.ChainDb(), txHash)
|
||||
tx, blockHash, _, txIndex := core.GetTransaction(api.eth.ChainDb(), txHash)
|
||||
if tx == nil {
|
||||
return nil, fmt.Errorf("transaction %x not found", txHash)
|
||||
}
|
||||
block := s.eth.BlockChain().GetBlock(blockHash)
|
||||
block := api.eth.BlockChain().GetBlock(blockHash)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("block %x not found", blockHash)
|
||||
}
|
||||
// Create the state database to mutate and eventually trace
|
||||
parent := s.eth.BlockChain().GetBlock(block.ParentHash())
|
||||
parent := api.eth.BlockChain().GetBlock(block.ParentHash())
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("block parent %x not found", block.ParentHash())
|
||||
}
|
||||
stateDb, err := state.New(parent.Root(), s.eth.ChainDb())
|
||||
stateDb, err := state.New(parent.Root(), api.eth.ChainDb())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1841,7 +1871,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
}
|
||||
// Mutate the state if we haven't reached the tracing transaction yet
|
||||
if uint64(idx) < txIndex {
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{})
|
||||
vmenv := core.NewEnv(stateDb, api.config, api.eth.BlockChain(), msg, block.Header(), vm.Config{})
|
||||
_, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mutation failed: %v", err)
|
||||
@ -1849,7 +1879,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
continue
|
||||
}
|
||||
// Otherwise trace the transaction and return
|
||||
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
|
||||
vmenv := core.NewEnv(stateDb, api.config, api.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
|
||||
ret, gas, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tracing failed: %v", err)
|
||||
@ -1863,6 +1893,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
|
||||
return nil, errors.New("database inconsistency")
|
||||
}
|
||||
|
||||
// TraceCall executes a call and returns the amount of gas, created logs and optionally returned values.
|
||||
func (s *PublicBlockChainAPI) TraceCall(args CallArgs, blockNr rpc.BlockNumber) (*ExecutionResult, error) {
|
||||
// Fetch the state associated with the block number
|
||||
stateDb, block, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb)
|
||||
@ -1931,12 +1962,12 @@ func (s *PublicNetAPI) Listening() bool {
|
||||
return true // always listening
|
||||
}
|
||||
|
||||
// Peercount returns the number of connected peers
|
||||
// PeerCount returns the number of connected peers
|
||||
func (s *PublicNetAPI) PeerCount() *rpc.HexNumber {
|
||||
return rpc.NewHexNumber(s.net.PeerCount())
|
||||
}
|
||||
|
||||
// ProtocolVersion returns the current ethereum protocol version.
|
||||
// Version returns the current ethereum protocol version.
|
||||
func (s *PublicNetAPI) Version() string {
|
||||
return fmt.Sprintf("%d", s.networkVersion)
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/ethash"
|
||||
@ -113,6 +114,7 @@ type Ethereum struct {
|
||||
|
||||
// Handlers
|
||||
txPool *core.TxPool
|
||||
txMu sync.Mutex
|
||||
blockchain *core.BlockChain
|
||||
accountManager *accounts.Manager
|
||||
pow *ethash.Ethash
|
||||
@ -293,7 +295,7 @@ func (s *Ethereum) APIs() []rpc.API {
|
||||
}, {
|
||||
Namespace: "personal",
|
||||
Version: "1.0",
|
||||
Service: NewPrivateAccountAPI(s.accountManager),
|
||||
Service: NewPrivateAccountAPI(s),
|
||||
Public: false,
|
||||
}, {
|
||||
Namespace: "eth",
|
||||
|
18
eth/bind.go
18
eth/bind.go
@ -19,7 +19,6 @@ package eth
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -49,6 +48,17 @@ func NewContractBackend(eth *Ethereum) *ContractBackend {
|
||||
}
|
||||
}
|
||||
|
||||
// HasCode implements bind.ContractVerifier.HasCode by retrieving any code associated
|
||||
// with the contract from the local API, and checking its size.
|
||||
func (b *ContractBackend) HasCode(contract common.Address, pending bool) (bool, error) {
|
||||
block := rpc.LatestBlockNumber
|
||||
if pending {
|
||||
block = rpc.PendingBlockNumber
|
||||
}
|
||||
out, err := b.bcapi.GetCode(contract, block)
|
||||
return len(common.FromHex(out)) > 0, err
|
||||
}
|
||||
|
||||
// ContractCall implements bind.ContractCaller executing an Ethereum contract
|
||||
// call with the specified data as the input. The pending flag requests execution
|
||||
// against the pending block, not the stable head of the chain.
|
||||
@ -64,9 +74,6 @@ func (b *ContractBackend) ContractCall(contract common.Address, data []byte, pen
|
||||
}
|
||||
// Execute the call and convert the output back to Go types
|
||||
out, err := b.bcapi.Call(args, block)
|
||||
if err == errNoCode {
|
||||
err = bind.ErrNoCode
|
||||
}
|
||||
return common.FromHex(out), err
|
||||
}
|
||||
|
||||
@ -95,9 +102,6 @@ func (b *ContractBackend) EstimateGasLimit(sender common.Address, contract *comm
|
||||
Value: *rpc.NewHexNumber(value),
|
||||
Data: common.ToHex(data),
|
||||
})
|
||||
if err == errNoCode {
|
||||
err = bind.ErrNoCode
|
||||
}
|
||||
return out.BigInt(), err
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
@ -45,6 +46,8 @@ var (
|
||||
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
|
||||
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
|
||||
|
||||
MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
|
||||
|
||||
hashTTL = 3 * time.Second // [eth/61] Time it takes for a hash request to time out
|
||||
blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request
|
||||
blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
|
||||
@ -79,6 +82,7 @@ var (
|
||||
errEmptyHeaderSet = errors.New("empty header set by peer")
|
||||
errPeersUnavailable = errors.New("no peers available or all tried for download")
|
||||
errAlreadyInPool = errors.New("hash already in pool")
|
||||
errInvalidAncestor = errors.New("retrieved ancestor is invalid")
|
||||
errInvalidChain = errors.New("retrieved hash chain is invalid")
|
||||
errInvalidBlock = errors.New("retrieved block is invalid")
|
||||
errInvalidBody = errors.New("retrieved block body is invalid")
|
||||
@ -266,7 +270,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
|
||||
case errBusy:
|
||||
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
||||
|
||||
case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidChain:
|
||||
case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidAncestor, errInvalidChain:
|
||||
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
|
||||
d.dropPeer(id)
|
||||
|
||||
@ -353,7 +357,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origin, err := d.findAncestor61(p)
|
||||
origin, err := d.findAncestor61(p, latest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -380,7 +384,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origin, err := d.findAncestor(p)
|
||||
origin, err := d.findAncestor(p, latest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -536,11 +540,19 @@ func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
|
||||
// on the correct chain, checking the top N blocks should already get us a match.
|
||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
||||
// the head blocks match), we do a binary search to find the common ancestor.
|
||||
func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
|
||||
func (d *Downloader) findAncestor61(p *peer, height uint64) (uint64, error) {
|
||||
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
|
||||
|
||||
// Request out head blocks to short circuit ancestor location
|
||||
head := d.headBlock().NumberU64()
|
||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||
floor, ceil := int64(-1), d.headBlock().NumberU64()
|
||||
if ceil >= MaxForkAncestry {
|
||||
floor = int64(ceil - MaxForkAncestry)
|
||||
}
|
||||
// Request the topmost blocks to short circuit binary ancestor lookup
|
||||
head := ceil
|
||||
if head > height {
|
||||
head = height
|
||||
}
|
||||
from := int64(head) - int64(MaxHashFetch) + 1
|
||||
if from < 0 {
|
||||
from = 0
|
||||
@ -600,11 +612,18 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
|
||||
}
|
||||
// If the head fetch already found an ancestor, return
|
||||
if !common.EmptyHash(hash) {
|
||||
if int64(number) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
|
||||
return number, nil
|
||||
}
|
||||
// Ancestor not found, we need to binary search over our chain
|
||||
start, end := uint64(0), head
|
||||
if floor > 0 {
|
||||
start = uint64(floor)
|
||||
}
|
||||
for start+1 < end {
|
||||
// Split our chain interval in two, and request the hash to cross check
|
||||
check := (start + end) / 2
|
||||
@ -660,6 +679,12 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure valid ancestry and return
|
||||
if int64(start) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
|
||||
return start, nil
|
||||
}
|
||||
|
||||
@ -961,15 +986,23 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) {
|
||||
// on the correct chain, checking the top N links should already get us a match.
|
||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
||||
// the head links match), we do a binary search to find the common ancestor.
|
||||
func (d *Downloader) findAncestor(p *peer) (uint64, error) {
|
||||
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
|
||||
|
||||
// Request our head headers to short circuit ancestor location
|
||||
head := d.headHeader().Number.Uint64()
|
||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
||||
if d.mode == FullSync {
|
||||
head = d.headBlock().NumberU64()
|
||||
ceil = d.headBlock().NumberU64()
|
||||
} else if d.mode == FastSync {
|
||||
head = d.headFastBlock().NumberU64()
|
||||
ceil = d.headFastBlock().NumberU64()
|
||||
}
|
||||
if ceil >= MaxForkAncestry {
|
||||
floor = int64(ceil - MaxForkAncestry)
|
||||
}
|
||||
// Request the topmost blocks to short circuit binary ancestor lookup
|
||||
head := ceil
|
||||
if head > height {
|
||||
head = height
|
||||
}
|
||||
from := int64(head) - int64(MaxHeaderFetch) + 1
|
||||
if from < 0 {
|
||||
@ -1040,11 +1073,18 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
|
||||
}
|
||||
// If the head fetch already found an ancestor, return
|
||||
if !common.EmptyHash(hash) {
|
||||
if int64(number) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
|
||||
return number, nil
|
||||
}
|
||||
// Ancestor not found, we need to binary search over our chain
|
||||
start, end := uint64(0), head
|
||||
if floor > 0 {
|
||||
start = uint64(floor)
|
||||
}
|
||||
for start+1 < end {
|
||||
// Split our chain interval in two, and request the hash to cross check
|
||||
check := (start + end) / 2
|
||||
@ -1100,6 +1140,12 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure valid ancestry and return
|
||||
if int64(start) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
|
||||
return start, nil
|
||||
}
|
||||
|
||||
|
@ -43,8 +43,9 @@ var (
|
||||
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
|
||||
)
|
||||
|
||||
// Reduce the block cache limit, otherwise the tests will be very heavy.
|
||||
// Reduce some of the parameters to make the tester faster.
|
||||
func init() {
|
||||
MaxForkAncestry = uint64(10000)
|
||||
blockCacheLimit = 1024
|
||||
}
|
||||
|
||||
@ -52,11 +53,15 @@ func init() {
|
||||
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
||||
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||
// reassembly.
|
||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||
// Generate the block chain
|
||||
blocks, receipts := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
|
||||
block.SetCoinbase(common.Address{seed})
|
||||
|
||||
// If a heavy chain is requested, delay blocks to raise difficulty
|
||||
if heavy {
|
||||
block.OffsetTime(-1)
|
||||
}
|
||||
// If the block number is multiple of 3, send a bonus transaction to the miner
|
||||
if parent == genesis && i%3 == 0 {
|
||||
tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
|
||||
@ -97,15 +102,19 @@ func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Recei
|
||||
|
||||
// makeChainFork creates two chains of length n, such that h1[:f] and
|
||||
// h2[:f] are different but have a common suffix of length n-f.
|
||||
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
|
||||
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
|
||||
// Create the common suffix
|
||||
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts)
|
||||
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts, false)
|
||||
|
||||
// Create the forks
|
||||
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]])
|
||||
// Create the forks, making the second heavyer if non balanced forks were requested
|
||||
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
|
||||
hashes1 = append(hashes1, hashes[1:]...)
|
||||
|
||||
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]])
|
||||
heavy := false
|
||||
if !balanced {
|
||||
heavy = true
|
||||
}
|
||||
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
|
||||
hashes2 = append(hashes2, hashes[1:]...)
|
||||
|
||||
for hash, header := range headers {
|
||||
@ -712,7 +721,7 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
@ -736,7 +745,7 @@ func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
|
||||
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a long block chain to download and the tester
|
||||
targetBlocks := 8 * blockCacheLimit
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
@ -810,20 +819,20 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Tests that simple synchronization against a forked chain works correctly. In
|
||||
// this test common ancestor lookup should *not* be short circuited, and a full
|
||||
// binary search should be executed.
|
||||
func TestForkedSynchronisation61(t *testing.T) { testForkedSynchronisation(t, 61, FullSync) }
|
||||
func TestForkedSynchronisation62(t *testing.T) { testForkedSynchronisation(t, 62, FullSync) }
|
||||
func TestForkedSynchronisation63Full(t *testing.T) { testForkedSynchronisation(t, 63, FullSync) }
|
||||
func TestForkedSynchronisation63Fast(t *testing.T) { testForkedSynchronisation(t, 63, FastSync) }
|
||||
func TestForkedSynchronisation64Full(t *testing.T) { testForkedSynchronisation(t, 64, FullSync) }
|
||||
func TestForkedSynchronisation64Fast(t *testing.T) { testForkedSynchronisation(t, 64, FastSync) }
|
||||
func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(t, 64, LightSync) }
|
||||
func TestForkedSync61(t *testing.T) { testForkedSync(t, 61, FullSync) }
|
||||
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
|
||||
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
||||
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
||||
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
|
||||
func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
|
||||
func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := MaxHashFetch, 2*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
@ -842,6 +851,40 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
|
||||
}
|
||||
|
||||
// Tests that synchronising against a much shorter but much heavyer fork works
|
||||
// corrently and is not dropped.
|
||||
func TestHeavyForkedSync61(t *testing.T) { testHeavyForkedSync(t, 61, FullSync) }
|
||||
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := MaxHashFetch, 4*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("light", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and make sure that fork is pulled too
|
||||
if err := tester.sync("heavy", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
||||
func TestInactiveDownloader61(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -856,6 +899,74 @@ func TestInactiveDownloader61(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
||||
// long dead chains.
|
||||
func TestBoundedForkedSync61(t *testing.T) { testBoundedForkedSync(t, 61, FullSync) }
|
||||
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := 13, int(MaxForkAncestry+17)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head for short but heavy forks too. These are a bit special because they
|
||||
// take different ancestor lookup paths.
|
||||
func TestBoundedHeavyForkedSync61(t *testing.T) { testBoundedHeavyForkedSync(t, 61, FullSync) }
|
||||
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
|
||||
|
||||
func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a long enough forked chain
|
||||
common, fork := 13, int(MaxForkAncestry+17)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
|
||||
tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
assertOwnChain(t, tester, common+fork+1)
|
||||
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming block headers and
|
||||
// bodies.
|
||||
func TestInactiveDownloader62(t *testing.T) {
|
||||
@ -909,7 +1020,7 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
|
||||
if targetBlocks >= MaxHeaderFetch {
|
||||
targetBlocks = MaxHeaderFetch - 15
|
||||
}
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
@ -944,7 +1055,7 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create various peers with various parts of the chain
|
||||
targetPeers := 8
|
||||
targetBlocks := targetPeers*blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
for i := 0; i < targetPeers; i++ {
|
||||
@ -972,7 +1083,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Create peers of every type
|
||||
tester := newTester()
|
||||
@ -1010,7 +1121,7 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a block chain to download
|
||||
targetBlocks := 2*blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
@ -1063,7 +1174,7 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
|
||||
@ -1095,7 +1206,7 @@ func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 6
|
||||
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
|
||||
@ -1126,7 +1237,7 @@ func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(
|
||||
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := 3*fsHeaderSafetyNet + fsMinFullBlocks
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
tester := newTester()
|
||||
|
||||
@ -1217,7 +1328,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil, false)
|
||||
|
||||
tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
|
||||
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
|
||||
@ -1247,6 +1358,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
||||
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
|
||||
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
||||
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
||||
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
|
||||
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
|
||||
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
|
||||
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
||||
@ -1294,7 +1406,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
@ -1366,7 +1478,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a forked chain to simulate origin revertal
|
||||
common, fork := MaxHashFetch, 2*MaxHashFetch
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
|
||||
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
@ -1441,7 +1553,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
@ -1517,7 +1629,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Create a small block chain
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil, false)
|
||||
|
||||
// Set a sync init hook to catch progress changes
|
||||
starting := make(chan struct{})
|
||||
@ -1590,7 +1702,7 @@ func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64,
|
||||
|
||||
func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil)
|
||||
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil, false)
|
||||
fakeHeads := []*types.Header{{}, {}, {}, {}}
|
||||
for i := 0; i < 200; i++ {
|
||||
tester := newTester()
|
||||
|
@ -233,6 +233,7 @@ func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []commo
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Logs creates a subscription that fires for all new log that match the given filter criteria.
|
||||
func (s *PublicFilterAPI) Logs(ctx context.Context, args NewFilterArgs) (rpc.Subscription, error) {
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
@ -291,12 +292,13 @@ type NewFilterArgs struct {
|
||||
Topics [][]common.Hash
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *args fields with given data.
|
||||
func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
type input struct {
|
||||
From *rpc.BlockNumber `json:"fromBlock"`
|
||||
ToBlock *rpc.BlockNumber `json:"toBlock"`
|
||||
Addresses interface{} `json:"address"`
|
||||
Topics interface{} `json:"topics"`
|
||||
Topics []interface{} `json:"topics"`
|
||||
}
|
||||
|
||||
var raw input
|
||||
@ -321,7 +323,6 @@ func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
if raw.Addresses != nil {
|
||||
// raw.Address can contain a single address or an array of addresses
|
||||
var addresses []common.Address
|
||||
|
||||
if strAddrs, ok := raw.Addresses.([]interface{}); ok {
|
||||
for i, addr := range strAddrs {
|
||||
if strAddr, ok := addr.(string); ok {
|
||||
@ -352,56 +353,53 @@ func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
||||
args.Addresses = addresses
|
||||
}
|
||||
|
||||
// helper function which parses a string to a topic hash
|
||||
topicConverter := func(raw string) (common.Hash, error) {
|
||||
if len(raw) == 0 {
|
||||
return common.Hash{}, nil
|
||||
}
|
||||
|
||||
if len(raw) >= 2 && raw[0] == '0' && (raw[1] == 'x' || raw[1] == 'X') {
|
||||
raw = raw[2:]
|
||||
}
|
||||
|
||||
if len(raw) != 2 * common.HashLength {
|
||||
return common.Hash{}, errors.New("invalid topic(s)")
|
||||
}
|
||||
if decAddr, err := hex.DecodeString(raw); err == nil {
|
||||
return common.BytesToHash(decAddr), nil
|
||||
}
|
||||
|
||||
return common.Hash{}, errors.New("invalid topic given")
|
||||
return common.Hash{}, errors.New("invalid topic(s)")
|
||||
}
|
||||
|
||||
// topics is an array consisting of strings or arrays of strings
|
||||
if raw.Topics != nil {
|
||||
topics, ok := raw.Topics.([]interface{})
|
||||
if ok {
|
||||
parsedTopics := make([][]common.Hash, len(topics))
|
||||
for i, topic := range topics {
|
||||
if topic == nil {
|
||||
parsedTopics[i] = []common.Hash{common.StringToHash("")}
|
||||
} else if strTopic, ok := topic.(string); ok {
|
||||
if t, err := topicConverter(strTopic); err != nil {
|
||||
return fmt.Errorf("invalid topic on index %d", i)
|
||||
} else {
|
||||
parsedTopics[i] = []common.Hash{t}
|
||||
}
|
||||
} else if arrTopic, ok := topic.([]interface{}); ok {
|
||||
parsedTopics[i] = make([]common.Hash, len(arrTopic))
|
||||
for j := 0; j < len(parsedTopics[i]); i++ {
|
||||
if arrTopic[j] == nil {
|
||||
parsedTopics[i][j] = common.StringToHash("")
|
||||
} else if str, ok := arrTopic[j].(string); ok {
|
||||
if t, err := topicConverter(str); err != nil {
|
||||
return fmt.Errorf("invalid topic on index %d", i)
|
||||
} else {
|
||||
parsedTopics[i] = []common.Hash{t}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("topic[%d][%d] not a string", i, j)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("topic[%d] invalid", i)
|
||||
// topics is an array consisting of strings and/or arrays of strings.
|
||||
// JSON null values are converted to common.Hash{} and ignored by the filter manager.
|
||||
if len(raw.Topics) > 0 {
|
||||
args.Topics = make([][]common.Hash, len(raw.Topics))
|
||||
for i, t := range raw.Topics {
|
||||
if t == nil { // ignore topic when matching logs
|
||||
args.Topics[i] = []common.Hash{common.Hash{}}
|
||||
} else if topic, ok := t.(string); ok { // match specific topic
|
||||
top, err := topicConverter(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args.Topics[i] = []common.Hash{top}
|
||||
} else if topics, ok := t.([]interface{}); ok { // or case e.g. [null, "topic0", "topic1"]
|
||||
for _, rawTopic := range topics {
|
||||
if rawTopic == nil {
|
||||
args.Topics[i] = append(args.Topics[i], common.Hash{})
|
||||
} else if topic, ok := rawTopic.(string); ok {
|
||||
parsed, err := topicConverter(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args.Topics[i] = append(args.Topics[i], parsed)
|
||||
} else {
|
||||
return fmt.Errorf("invalid topic(s)")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("invalid topic(s)")
|
||||
}
|
||||
args.Topics = parsedTopics
|
||||
}
|
||||
}
|
||||
|
||||
|
198
eth/filters/api_test.go
Normal file
198
eth/filters/api_test.go
Normal file
@ -0,0 +1,198 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filters_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
func TestUnmarshalJSONNewFilterArgs(t *testing.T) {
|
||||
var (
|
||||
fromBlock rpc.BlockNumber = 0x123435
|
||||
toBlock rpc.BlockNumber = 0xabcdef
|
||||
address0 = common.StringToAddress("70c87d191324e6712a591f304b4eedef6ad9bb9d")
|
||||
address1 = common.StringToAddress("9b2055d370f73ec7d8a03e965129118dc8f5bf83")
|
||||
topic0 = common.HexToHash("3ac225168df54212a25c1c01fd35bebfea408fdac2e31ddd6f80a4bbf9a5f1ca")
|
||||
topic1 = common.HexToHash("9084a792d2f8b16a62b882fd56f7860c07bf5fa91dd8a2ae7e809e5180fef0b3")
|
||||
topic2 = common.HexToHash("6ccae1c4af4152f460ff510e573399795dfab5dcf1fa60d1f33ac8fdc1e480ce")
|
||||
nullTopic = common.Hash{}
|
||||
)
|
||||
|
||||
// default values
|
||||
var test0 filters.NewFilterArgs
|
||||
if err := json.Unmarshal([]byte("{}"), &test0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if test0.FromBlock != rpc.LatestBlockNumber {
|
||||
t.Fatalf("expected %d, got %d", rpc.LatestBlockNumber, test0.FromBlock)
|
||||
}
|
||||
if test0.ToBlock != rpc.LatestBlockNumber {
|
||||
t.Fatalf("expected %d, got %d", rpc.LatestBlockNumber, test0.ToBlock)
|
||||
}
|
||||
if len(test0.Addresses) != 0 {
|
||||
t.Fatalf("expected 0 addresses, got %d", len(test0.Addresses))
|
||||
}
|
||||
if len(test0.Topics) != 0 {
|
||||
t.Fatalf("expected 0 topics, got %d topics", len(test0.Topics))
|
||||
}
|
||||
|
||||
// from, to block number
|
||||
var test1 filters.NewFilterArgs
|
||||
vector := fmt.Sprintf(`{"fromBlock":"0x%x","toBlock":"0x%x"}`, fromBlock, toBlock)
|
||||
if err := json.Unmarshal([]byte(vector), &test1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if test1.FromBlock != fromBlock {
|
||||
t.Fatalf("expected FromBlock %d, got %d", fromBlock, test1.FromBlock)
|
||||
}
|
||||
if test1.ToBlock != toBlock {
|
||||
t.Fatalf("expected ToBlock %d, got %d", toBlock, test1.ToBlock)
|
||||
}
|
||||
|
||||
// single address
|
||||
var test2 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"address": "%s"}`, address0.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test2.Addresses) != 1 {
|
||||
t.Fatalf("expected 1 address, got %d address(es)", len(test2.Addresses))
|
||||
}
|
||||
if test2.Addresses[0] != address0 {
|
||||
t.Fatalf("expected address %x, got %x", address0, test2.Addresses[0])
|
||||
}
|
||||
|
||||
// multiple address
|
||||
var test3 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"address": ["%s", "%s"]}`, address0.Hex(), address1.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test3.Addresses) != 2 {
|
||||
t.Fatalf("expected 2 addresses, got %d address(es)", len(test3.Addresses))
|
||||
}
|
||||
if test3.Addresses[0] != address0 {
|
||||
t.Fatalf("expected address %x, got %x", address0, test3.Addresses[0])
|
||||
}
|
||||
if test3.Addresses[1] != address1 {
|
||||
t.Fatalf("expected address %x, got %x", address1, test3.Addresses[1])
|
||||
}
|
||||
|
||||
// single topic
|
||||
var test4 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s"]}`, topic0.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test4.Topics) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test4.Topics))
|
||||
}
|
||||
if len(test4.Topics[0]) != 1 {
|
||||
t.Fatalf("expected len(topics[0]) to be 1, got %d", len(test4.Topics[0]))
|
||||
}
|
||||
if test4.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test4.Topics[0][0], topic0)
|
||||
}
|
||||
|
||||
// test multiple "AND" topics
|
||||
var test5 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s", "%s"]}`, topic0.Hex(), topic1.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test5); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test5.Topics) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d", len(test5.Topics))
|
||||
}
|
||||
if len(test5.Topics[0]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test5.Topics[0]))
|
||||
}
|
||||
if test5.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test5.Topics[0][0], topic0)
|
||||
}
|
||||
if len(test5.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test5.Topics[1]))
|
||||
}
|
||||
if test5.Topics[1][0] != topic1 {
|
||||
t.Fatalf("got %x, expected %x", test5.Topics[1][0], topic1)
|
||||
}
|
||||
|
||||
// test optional topic
|
||||
var test6 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": ["%s", null, "%s"]}`, topic0.Hex(), topic2.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test6); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test6.Topics) != 3 {
|
||||
t.Fatalf("expected 3 topics, got %d", len(test6.Topics))
|
||||
}
|
||||
if len(test6.Topics[0]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[0]))
|
||||
}
|
||||
if test6.Topics[0][0] != topic0 {
|
||||
t.Fatalf("got %x, expected %x", test6.Topics[0][0], topic0)
|
||||
}
|
||||
if len(test6.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[1]))
|
||||
}
|
||||
if test6.Topics[1][0] != nullTopic {
|
||||
t.Fatalf("got %x, expected empty hash", test6.Topics[1][0])
|
||||
}
|
||||
if len(test6.Topics[2]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d", len(test6.Topics[2]))
|
||||
}
|
||||
if test6.Topics[2][0] != topic2 {
|
||||
t.Fatalf("got %x, expected %x", test6.Topics[2][0], topic2)
|
||||
}
|
||||
|
||||
// test OR topics
|
||||
var test7 filters.NewFilterArgs
|
||||
vector = fmt.Sprintf(`{"topics": [["%s", "%s"], null, ["%s", null]]}`, topic0.Hex(), topic1.Hex(), topic2.Hex())
|
||||
if err := json.Unmarshal([]byte(vector), &test7); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(test7.Topics) != 3 {
|
||||
t.Fatalf("expected 3 topics, got %d topics", len(test7.Topics))
|
||||
}
|
||||
if len(test7.Topics[0]) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d topics", len(test7.Topics[0]))
|
||||
}
|
||||
if test7.Topics[0][0] != topic0 || test7.Topics[0][1] != topic1 {
|
||||
t.Fatalf("invalid topics expected [%x,%x], got [%x,%x]",
|
||||
topic0, topic1, test7.Topics[0][0], test7.Topics[0][1],
|
||||
)
|
||||
}
|
||||
if len(test7.Topics[1]) != 1 {
|
||||
t.Fatalf("expected 1 topic, got %d topics", len(test7.Topics[1]))
|
||||
}
|
||||
if test7.Topics[1][0] != nullTopic {
|
||||
t.Fatalf("expected empty hash, got %x", test7.Topics[1][0])
|
||||
}
|
||||
if len(test7.Topics[2]) != 2 {
|
||||
t.Fatalf("expected 2 topics, got %d topics", len(test7.Topics[2]))
|
||||
}
|
||||
if test7.Topics[2][0] != topic2 || test7.Topics[2][1] != nullTopic {
|
||||
t.Fatalf("invalid topics expected [%x,%x], got [%x,%x]",
|
||||
topic2, nullTopic, test7.Topics[2][0], test7.Topics[2][1],
|
||||
)
|
||||
}
|
||||
}
|
@ -22,6 +22,7 @@ import (
|
||||
"math"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -58,7 +59,7 @@ type blockFetcherFn func([]common.Hash) error
|
||||
type ProtocolManager struct {
|
||||
networkId int
|
||||
|
||||
fastSync bool
|
||||
fastSync uint32
|
||||
txpool txPool
|
||||
blockchain *core.BlockChain
|
||||
chaindb ethdb.Database
|
||||
@ -87,15 +88,9 @@ type ProtocolManager struct {
|
||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
// with the ethereum network.
|
||||
func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
||||
// Figure out whether to allow fast sync or not
|
||||
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
|
||||
fastSync = false
|
||||
}
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
networkId: networkId,
|
||||
fastSync: fastSync,
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
blockchain: blockchain,
|
||||
@ -106,6 +101,14 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
|
||||
txsyncCh: make(chan *txsync),
|
||||
quitSync: make(chan struct{}),
|
||||
}
|
||||
// Figure out whether to allow fast sync or not
|
||||
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
|
||||
fastSync = false
|
||||
}
|
||||
if fastSync {
|
||||
manager.fastSync = uint32(1)
|
||||
}
|
||||
// Initiate a sub-protocol for every implemented version we can handle
|
||||
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
|
||||
for i, version := range ProtocolVersions {
|
||||
@ -678,7 +681,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case msg.Code == TxMsg:
|
||||
// Transactions arrived, parse all of them and deliver to the pool
|
||||
// Transactions arrived, make sure we have a valid chain to handle them
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
break
|
||||
}
|
||||
// Transactions can be processed, parse all of them and deliver to the pool
|
||||
var txs []*types.Transaction
|
||||
if err := msg.Decode(&txs); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
|
@ -18,6 +18,7 @@ package eth
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -167,18 +168,18 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||
}
|
||||
// Otherwise try to sync with the downloader
|
||||
mode := downloader.FullSync
|
||||
if pm.fastSync {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
mode = downloader.FastSync
|
||||
}
|
||||
if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil {
|
||||
return
|
||||
}
|
||||
// If fast sync was enabled, and we synced up, disable it
|
||||
if pm.fastSync {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
// Disable fast sync if we indeed have something in our chain
|
||||
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
|
||||
pm.fastSync = false
|
||||
atomic.StoreUint32(&pm.fastSync, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -29,12 +30,12 @@ import (
|
||||
func TestFastSyncDisabling(t *testing.T) {
|
||||
// Create a pristine protocol manager, check that fast sync is left enabled
|
||||
pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil)
|
||||
if !pmEmpty.fastSync {
|
||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
|
||||
t.Fatalf("fast sync disabled on pristine blockchain")
|
||||
}
|
||||
// Create a full protocol manager, check that fast sync gets disabled
|
||||
pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil)
|
||||
if pmFull.fastSync {
|
||||
if atomic.LoadUint32(&pmFull.fastSync) == 1 {
|
||||
t.Fatalf("fast sync not disabled on non-empty blockchain")
|
||||
}
|
||||
// Sync up the two peers
|
||||
@ -47,7 +48,7 @@ func TestFastSyncDisabling(t *testing.T) {
|
||||
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
|
||||
|
||||
// Check that fast sync was disabled
|
||||
if pmEmpty.fastSync {
|
||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 1 {
|
||||
t.Fatalf("fast sync not disabled after successful synchronisation")
|
||||
}
|
||||
}
|
||||
|
@ -66,6 +66,9 @@ func (mux *TypeMux) Subscribe(types ...interface{}) Subscription {
|
||||
mux.mutex.Lock()
|
||||
defer mux.mutex.Unlock()
|
||||
if mux.stopped {
|
||||
// set the status to closed so that calling Unsubscribe after this
|
||||
// call will short curuit
|
||||
sub.closed = true
|
||||
close(sub.postC)
|
||||
} else {
|
||||
if mux.subm == nil {
|
||||
|
@ -25,6 +25,14 @@ import (
|
||||
|
||||
type testEvent int
|
||||
|
||||
func TestSubCloseUnsub(t *testing.T) {
|
||||
// the point of this test is **not** to panic
|
||||
var mux TypeMux
|
||||
mux.Stop()
|
||||
sub := mux.Subscribe(int(0))
|
||||
sub.Unsubscribe()
|
||||
}
|
||||
|
||||
func TestSub(t *testing.T) {
|
||||
mux := new(TypeMux)
|
||||
defer mux.Stop()
|
||||
|
@ -176,20 +176,6 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Personal_JS = `
|
||||
web3._extend({
|
||||
property: 'personal',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'importRawKey',
|
||||
call: 'personal_importRawKey',
|
||||
params: 2
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Eth_JS = `
|
||||
web3._extend({
|
||||
property: 'eth',
|
||||
@ -244,20 +230,6 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Net_JS = `
|
||||
web3._extend({
|
||||
property: 'net',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'version',
|
||||
getter: 'net_version'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Debug_JS = `
|
||||
web3._extend({
|
||||
property: 'debug',
|
||||
@ -463,6 +435,54 @@ web3._extend({
|
||||
});
|
||||
`
|
||||
|
||||
const Net_JS = `
|
||||
web3._extend({
|
||||
property: 'net',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'version',
|
||||
getter: 'net_version'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Personal_JS = `
|
||||
web3._extend({
|
||||
property: 'personal',
|
||||
methods:
|
||||
[
|
||||
new web3._extend.Method({
|
||||
name: 'importRawKey',
|
||||
call: 'personal_importRawKey',
|
||||
params: 2
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'signAndSendTransaction',
|
||||
call: 'personal_signAndSendTransaction',
|
||||
params: 2,
|
||||
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, null]
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const RPC_JS = `
|
||||
web3._extend({
|
||||
property: 'rpc',
|
||||
methods: [],
|
||||
properties:
|
||||
[
|
||||
new web3._extend.Property({
|
||||
name: 'modules',
|
||||
getter: 'rpc_modules'
|
||||
})
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Shh_JS = `
|
||||
web3._extend({
|
||||
property: 'shh',
|
||||
|
@ -145,7 +145,6 @@ func newWorker(config *core.ChainConfig, coinbase common.Address, eth core.Backe
|
||||
fullValidation: false,
|
||||
}
|
||||
worker.events = worker.mux.Subscribe(core.ChainHeadEvent{}, core.ChainSideEvent{}, core.TxPreEvent{})
|
||||
worker.wg.Add(1)
|
||||
go worker.update()
|
||||
|
||||
go worker.wait()
|
||||
@ -188,8 +187,6 @@ func (self *worker) start() {
|
||||
}
|
||||
|
||||
func (self *worker) stop() {
|
||||
// Quit update.
|
||||
self.events.Unsubscribe()
|
||||
self.wg.Wait()
|
||||
|
||||
self.mu.Lock()
|
||||
@ -224,7 +221,6 @@ func (self *worker) unregister(agent Agent) {
|
||||
}
|
||||
|
||||
func (self *worker) update() {
|
||||
defer self.wg.Done()
|
||||
for event := range self.events.Chan() {
|
||||
// A real event arrived, process interesting content
|
||||
switch ev := event.Data.(type) {
|
||||
|
@ -61,22 +61,22 @@ func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http
|
||||
allowAllOrigins = true
|
||||
}
|
||||
if origin != "" {
|
||||
origins.Add(origin)
|
||||
origins.Add(strings.ToLower(origin))
|
||||
}
|
||||
}
|
||||
|
||||
// allow localhost if no allowedOrigins are specified
|
||||
// allow localhost if no allowedOrigins are specified.
|
||||
if len(origins.List()) == 0 {
|
||||
origins.Add("http://localhost")
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
origins.Add("http://" + hostname)
|
||||
origins.Add("http://" + strings.ToLower(hostname))
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(logger.Debug).Infof("Allowed origin(s) for WS RPC interface %v\n", origins.List())
|
||||
|
||||
f := func(cfg *websocket.Config, req *http.Request) error {
|
||||
origin := req.Header.Get("Origin")
|
||||
origin := strings.ToLower(req.Header.Get("Origin"))
|
||||
if allowAllOrigins || origins.Has(origin) {
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user