Compare commits
150 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
017449971e | ||
|
bc01593afb | ||
|
c9dce0bfd7 | ||
|
e78f631dfc | ||
|
6b6882f08b | ||
|
c2d65d34d5 | ||
|
13ccf6016e | ||
|
7ce7c3967c | ||
|
fc7e0fe6c7 | ||
|
5cc6e7a71e | ||
|
d556d39a2c | ||
|
54d332e1db | ||
|
e0bf5f0ccb | ||
|
1ff3d7c2d4 | ||
|
08611cfd75 | ||
|
9a529d64d1 | ||
|
a91b704b01 | ||
|
c9f28ca8e5 | ||
|
58e33d9e5a | ||
|
7800ba978d | ||
|
717f8a4e8f | ||
|
7b189d6f1f | ||
|
c4844e9ee2 | ||
|
23c8c74131 | ||
|
0676320169 | ||
|
d62e9b2857 | ||
|
878e35bfde | ||
|
2e98706a99 | ||
|
8c1e8de839 | ||
|
b26eedf9e9 | ||
|
44b41641f8 | ||
|
9ef90dbf30 | ||
|
d9d2a4eef9 | ||
|
9d67222f4e | ||
|
f5a68a40bf | ||
|
f06ae5ca6a | ||
|
3a0480e07d | ||
|
5d21667587 | ||
|
fdff182f11 | ||
|
0abcf03fde | ||
|
58f2ce8671 | ||
|
dd21f079e8 | ||
|
36a684ca1e | ||
|
bcc1234778 | ||
|
c1db636fb3 | ||
|
5b558ad936 | ||
|
b6d4f6b66e | ||
|
0ec5ab4175 | ||
|
0754100464 | ||
|
475ae8bd93 | ||
|
89ab8a74c0 | ||
|
72e62efc76 | ||
|
f56f969dd3 | ||
|
216ff5a952 | ||
|
7be89a7a01 | ||
|
59177bc8c0 | ||
|
c5b46a79c1 | ||
|
b4bc3b3c35 | ||
|
75e029db8b | ||
|
b8ced9e00b | ||
|
f8790b9482 | ||
|
8bd5bb8918 | ||
|
a7dfaa0bda | ||
|
e1dcea8bf0 | ||
|
b3d6304f1e | ||
|
f4ec85486a | ||
|
dfdb204b48 | ||
|
15fb780de6 | ||
|
3a4a3d080b | ||
|
b7ba944e88 | ||
|
9b59c75405 | ||
|
f71e85b8e2 | ||
|
8008c5b1fa | ||
|
9c6cf960b4 | ||
|
df206d2513 | ||
|
9e8cc00b73 | ||
|
ac5e28ea38 | ||
|
3b0f3483c4 | ||
|
7f70a70106 | ||
|
94e8250983 | ||
|
c013192ba7 | ||
|
0b6338321f | ||
|
b9c90c5581 | ||
|
5fefe39ba5 | ||
|
dfe891270a | ||
|
c5c5e0dbe8 | ||
|
3f4a875bf6 | ||
|
a3d263dd3a | ||
|
190fb8180a | ||
|
b02afb6b3d | ||
|
422604b438 | ||
|
57d697629d | ||
|
11d09fd3ba | ||
|
689486449d | ||
|
7c4a4eb58a | ||
|
9e71f55bfa | ||
|
51c3290bee | ||
|
738b51ae31 | ||
|
f03b2db7db | ||
|
49d1a032da | ||
|
765fe446cf | ||
|
afe0b65405 | ||
|
987648b0ad | ||
|
9504c5c360 | ||
|
f8a95d996f | ||
|
bf5c6b29fa | ||
|
22e3bbbf0a | ||
|
4ea9b62b5c | ||
|
6f1a600f6c | ||
|
de2259d27c | ||
|
adf007dadc | ||
|
4b8f56cf98 | ||
|
a718daa674 | ||
|
b9bac1f384 | ||
|
fc3661f89c | ||
|
9948724deb | ||
|
c702bd70ed | ||
|
734e00af9e | ||
|
b566cfdffd | ||
|
7a6d5d0cce | ||
|
0ff7380465 | ||
|
0ce5e113be | ||
|
86fe283d19 | ||
|
44b74cfc40 | ||
|
9278951a62 | ||
|
12f2a25d5e | ||
|
8927f7724a | ||
|
93422e9d15 | ||
|
5d91acccd5 | ||
|
9641cacea8 | ||
|
64571f9379 | ||
|
e306304414 | ||
|
2c37142d2f | ||
|
3eca7b5d27 | ||
|
b0b277525c | ||
|
ecdbb402ee | ||
|
9c81387bef | ||
|
72617a0742 | ||
|
db79143a13 | ||
|
538f763fdc | ||
|
d4bb3798d8 | ||
|
08953e42c1 | ||
|
b9299bbc46 | ||
|
9a77065948 | ||
|
a28093ced4 | ||
|
d5b79e752e | ||
|
7300365956 | ||
|
c476460cb2 | ||
|
028af3457d | ||
|
a73f3f4518 |
6
.github/CODEOWNERS
vendored
6
.github/CODEOWNERS
vendored
@@ -16,8 +16,8 @@ light/ @zsfelfoldi @rjl493456442
|
||||
mobile/ @karalabe @ligi
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
rpc/ @fjl @holiman
|
||||
p2p/simulations @zelig @nonsense @janos @justelad
|
||||
p2p/protocols @zelig @nonsense @janos @justelad
|
||||
p2p/testing @zelig @nonsense @janos @justelad
|
||||
p2p/simulations @zelig @janos @justelad
|
||||
p2p/protocols @zelig @janos @justelad
|
||||
p2p/testing @zelig @janos @justelad
|
||||
signer/ @holiman
|
||||
whisper/ @gballet @gluk256
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -24,6 +24,7 @@ build/_vendor/pkg
|
||||
|
||||
# used by the Makefile
|
||||
/build/_workspace/
|
||||
/build/cache/
|
||||
/build/bin/
|
||||
/geth*.zip
|
||||
|
||||
|
45
.golangci.yml
Normal file
45
.golangci.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
# This file configures github.com/golangci/golangci-lint.
|
||||
|
||||
run:
|
||||
timeout: 2m
|
||||
tests: true
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs-use-default: true
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- goconst
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
# - staticcheck
|
||||
- unconvert
|
||||
# - unused
|
||||
- varcheck
|
||||
|
||||
linters-settings:
|
||||
gofmt:
|
||||
simplify: true
|
||||
goconst:
|
||||
min-len: 3 # minimum length of string constant
|
||||
min-occurrences: 6 # minimum number of occurrences
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: crypto/blake2b/
|
||||
linters:
|
||||
- deadcode
|
||||
- path: crypto/bn256/cloudflare
|
||||
linters:
|
||||
- deadcode
|
||||
- path: p2p/discv5/
|
||||
linters:
|
||||
- deadcode
|
||||
- path: core/vm/instructions_test.go
|
||||
linters:
|
||||
- goconst
|
19
.travis.yml
19
.travis.yml
@@ -7,7 +7,7 @@ jobs:
|
||||
- stage: lint
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
@@ -19,6 +19,8 @@ jobs:
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
@@ -27,6 +29,8 @@ jobs:
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
@@ -34,6 +38,17 @@ jobs:
|
||||
# These are the latest Go versions.
|
||||
- stage: build
|
||||
os: linux
|
||||
arch: amd64
|
||||
dist: xenial
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- stage: build
|
||||
if: type = pull_request
|
||||
os: linux
|
||||
arch: arm64
|
||||
dist: xenial
|
||||
go: 1.13.x
|
||||
script:
|
||||
@@ -77,7 +92,7 @@ jobs:
|
||||
- python-paramiko
|
||||
script:
|
||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
- go run build/ci.go debsrc -goversion 1.13.4 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- stage: build
|
||||
|
2
Makefile
2
Makefile
@@ -36,7 +36,7 @@ lint: ## Run linters.
|
||||
build/env.sh go run build/ci.go lint
|
||||
|
||||
clean:
|
||||
./build/clean_go_build_cache.sh
|
||||
go clean -cache
|
||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||
|
||||
# The devtools target installs tools required for 'go generate'.
|
||||
|
@@ -303,7 +303,7 @@ ones either). To start a `geth` instance for mining, run it with all your usual
|
||||
by:
|
||||
|
||||
```shell
|
||||
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
$ geth <usual-flags> --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
```
|
||||
|
||||
Which will start mining blocks and transactions on a single CPU thread, crediting all
|
||||
|
@@ -75,9 +75,6 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) {
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
@@ -94,9 +91,6 @@ func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) {
|
||||
|
||||
// UnpackIntoMap unpacks a log into the provided map[string]interface{}
|
||||
func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) {
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
|
@@ -57,7 +57,7 @@ const jsondata2 = `
|
||||
]`
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
Uint256, _ := NewType("uint256", nil)
|
||||
Uint256, _ := NewType("uint256", "", nil)
|
||||
exp := ABI{
|
||||
Methods: map[string]Method{
|
||||
"balance": {
|
||||
@@ -172,7 +172,7 @@ func TestTestSlice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMethodSignature(t *testing.T) {
|
||||
String, _ := NewType("string", nil)
|
||||
String, _ := NewType("string", "", nil)
|
||||
m := Method{"foo", "foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
|
||||
exp := "foo(string,string)"
|
||||
if m.Sig() != exp {
|
||||
@@ -184,7 +184,7 @@ func TestMethodSignature(t *testing.T) {
|
||||
t.Errorf("expected ids to match %x != %x", m.ID(), idexp)
|
||||
}
|
||||
|
||||
uintt, _ := NewType("uint256", nil)
|
||||
uintt, _ := NewType("uint256", "", nil)
|
||||
m = Method{"foo", "foo", false, []Argument{{"bar", uintt, false}}, nil}
|
||||
exp = "foo(uint256)"
|
||||
if m.Sig() != exp {
|
||||
@@ -192,7 +192,7 @@ func TestMethodSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
// Method with tuple arguments
|
||||
s, _ := NewType("tuple", []ArgumentMarshaling{
|
||||
s, _ := NewType("tuple", "", []ArgumentMarshaling{
|
||||
{Name: "a", Type: "int256"},
|
||||
{Name: "b", Type: "int256[]"},
|
||||
{Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
|
||||
@@ -602,9 +602,9 @@ func TestBareEvents(t *testing.T) {
|
||||
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
||||
]`
|
||||
|
||||
arg0, _ := NewType("uint256", nil)
|
||||
arg1, _ := NewType("address", nil)
|
||||
tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||
arg0, _ := NewType("uint256", "", nil)
|
||||
arg1, _ := NewType("address", "", nil)
|
||||
tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||
|
||||
expectedEvents := map[string]struct {
|
||||
Anonymous bool
|
||||
@@ -927,7 +927,7 @@ func TestABI_MethodById(t *testing.T) {
|
||||
}
|
||||
b := fmt.Sprintf("%v", m2)
|
||||
if a != b {
|
||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.ID()))
|
||||
t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID())
|
||||
}
|
||||
}
|
||||
// Also test empty
|
||||
|
@@ -34,10 +34,11 @@ type Argument struct {
|
||||
type Arguments []Argument
|
||||
|
||||
type ArgumentMarshaling struct {
|
||||
Name string
|
||||
Type string
|
||||
Components []ArgumentMarshaling
|
||||
Indexed bool
|
||||
Name string
|
||||
Type string
|
||||
InternalType string
|
||||
Components []ArgumentMarshaling
|
||||
Indexed bool
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
@@ -48,7 +49,7 @@ func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||
return fmt.Errorf("argument json err: %v", err)
|
||||
}
|
||||
|
||||
argument.Type, err = NewType(arg.Type, arg.Components)
|
||||
argument.Type, err = NewType(arg.Type, arg.InternalType, arg.Components)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -88,6 +89,13 @@ func (arguments Arguments) isTuple() bool {
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
if len(data) == 0 {
|
||||
if len(arguments) != 0 {
|
||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
} else {
|
||||
return nil // Nothing to unmarshal, return
|
||||
}
|
||||
}
|
||||
// make sure the passed value is arguments pointer
|
||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
@@ -104,11 +112,17 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
|
||||
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value
|
||||
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
||||
if len(data) == 0 {
|
||||
if len(arguments) != 0 {
|
||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
} else {
|
||||
return nil // Nothing to unmarshal, return
|
||||
}
|
||||
}
|
||||
marshalledValues, err := arguments.UnpackValues(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return arguments.unpackIntoMap(v, marshalledValues)
|
||||
}
|
||||
|
||||
|
@@ -72,7 +72,7 @@ func TestSimulatedBackend(t *testing.T) {
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
tx, isPending, err = sim.TransactionByHash(context.Background(), txHash)
|
||||
_, isPending, err = sim.TransactionByHash(context.Background(), txHash)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting transaction with hash: %v", txHash.String())
|
||||
}
|
||||
|
@@ -218,7 +218,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
}
|
||||
}
|
||||
// If the contract surely has code (or code is not needed), estimate the transaction
|
||||
msg := ethereum.CallMsg{From: opts.From, To: contract, Value: value, Data: input}
|
||||
msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: gasPrice, Value: value, Data: input}
|
||||
gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate gas needed: %v", err)
|
||||
|
@@ -47,13 +47,17 @@ const (
|
||||
// to be used as is in client code, but rather as an intermediate struct which
|
||||
// enforces compile time type safety and naming convention opposed to having to
|
||||
// manually maintain hard coded strings that break on runtime.
|
||||
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string) (string, error) {
|
||||
// Process each individual contract requested binding
|
||||
contracts := make(map[string]*tmplContract)
|
||||
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string, aliases map[string]string) (string, error) {
|
||||
var (
|
||||
// contracts is the map of each individual contract requested binding
|
||||
contracts = make(map[string]*tmplContract)
|
||||
|
||||
// Map used to flag each encountered library as such
|
||||
isLib := make(map[string]struct{})
|
||||
// structs is the map of all reclared structs shared by passed contracts.
|
||||
structs = make(map[string]*tmplStruct)
|
||||
|
||||
// isLib is the map used to flag each encountered library as such
|
||||
isLib = make(map[string]struct{})
|
||||
)
|
||||
for i := 0; i < len(types); i++ {
|
||||
// Parse the actual ABI to generate the binding for
|
||||
evmABI, err := abi.JSON(strings.NewReader(abis[i]))
|
||||
@@ -73,20 +77,36 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
calls = make(map[string]*tmplMethod)
|
||||
transacts = make(map[string]*tmplMethod)
|
||||
events = make(map[string]*tmplEvent)
|
||||
structs = make(map[string]*tmplStruct)
|
||||
|
||||
// identifiers are used to detect duplicated identifier of function
|
||||
// and event. For all calls, transacts and events, abigen will generate
|
||||
// corresponding bindings. However we have to ensure there is no
|
||||
// identifier coliision in the bindings of these categories.
|
||||
callIdentifiers = make(map[string]bool)
|
||||
transactIdentifiers = make(map[string]bool)
|
||||
eventIdentifiers = make(map[string]bool)
|
||||
)
|
||||
for _, original := range evmABI.Methods {
|
||||
// Normalize the method for capital cases and non-anonymous inputs/outputs
|
||||
normalized := original
|
||||
normalized.Name = methodNormalizer[lang](original.Name)
|
||||
|
||||
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
|
||||
// Ensure there is no duplicated identifier
|
||||
var identifiers = callIdentifiers
|
||||
if !original.Const {
|
||||
identifiers = transactIdentifiers
|
||||
}
|
||||
if identifiers[normalizedName] {
|
||||
return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName)
|
||||
}
|
||||
identifiers[normalizedName] = true
|
||||
normalized.Name = normalizedName
|
||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||
copy(normalized.Inputs, original.Inputs)
|
||||
for j, input := range normalized.Inputs {
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if _, exist := structs[input.Type.String()]; input.Type.T == abi.TupleTy && !exist {
|
||||
if hasStruct(input.Type) {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
}
|
||||
@@ -96,7 +116,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
if output.Name != "" {
|
||||
normalized.Outputs[j].Name = capitalise(output.Name)
|
||||
}
|
||||
if _, exist := structs[output.Type.String()]; output.Type.T == abi.TupleTy && !exist {
|
||||
if hasStruct(output.Type) {
|
||||
bindStructType[lang](output.Type, structs)
|
||||
}
|
||||
}
|
||||
@@ -114,19 +134,23 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
}
|
||||
// Normalize the event for capital cases and non-anonymous outputs
|
||||
normalized := original
|
||||
normalized.Name = methodNormalizer[lang](original.Name)
|
||||
|
||||
// Ensure there is no duplicated identifier
|
||||
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
|
||||
if eventIdentifiers[normalizedName] {
|
||||
return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName)
|
||||
}
|
||||
eventIdentifiers[normalizedName] = true
|
||||
normalized.Name = normalizedName
|
||||
|
||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||
copy(normalized.Inputs, original.Inputs)
|
||||
for j, input := range normalized.Inputs {
|
||||
// Indexed fields are input, non-indexed ones are outputs
|
||||
if input.Indexed {
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if _, exist := structs[input.Type.String()]; input.Type.T == abi.TupleTy && !exist {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if hasStruct(input.Type) {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
}
|
||||
// Append the event to the accumulator list
|
||||
@@ -147,7 +171,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
Transacts: transacts,
|
||||
Events: events,
|
||||
Libraries: make(map[string]string),
|
||||
Structs: structs,
|
||||
}
|
||||
// Function 4-byte signatures are stored in the same sequence
|
||||
// as types, if available.
|
||||
@@ -179,6 +202,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
Package: pkg,
|
||||
Contracts: contracts,
|
||||
Libraries: libs,
|
||||
Structs: structs,
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
@@ -244,7 +268,7 @@ func bindBasicTypeGo(kind abi.Type) string {
|
||||
func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
return structs[kind.String()].Name
|
||||
return structs[kind.TupleRawName+kind.String()].Name
|
||||
case abi.ArrayTy:
|
||||
return fmt.Sprintf("[%d]", kind.Size) + bindTypeGo(*kind.Elem, structs)
|
||||
case abi.SliceTy:
|
||||
@@ -321,7 +345,7 @@ func pluralizeJavaType(typ string) string {
|
||||
func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
return structs[kind.String()].Name
|
||||
return structs[kind.TupleRawName+kind.String()].Name
|
||||
case abi.ArrayTy, abi.SliceTy:
|
||||
return pluralizeJavaType(bindTypeJava(*kind.Elem, structs))
|
||||
default:
|
||||
@@ -340,6 +364,13 @@ var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct)
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
bound := bindTypeGo(kind, structs)
|
||||
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
if bound == "string" || bound == "[]byte" {
|
||||
bound = "common.Hash"
|
||||
}
|
||||
@@ -350,6 +381,13 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
bound := bindTypeJava(kind, structs)
|
||||
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
if bound == "String" || bound == "byte[]" {
|
||||
bound = "Hash"
|
||||
}
|
||||
@@ -369,7 +407,14 @@ var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct
|
||||
func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
if s, exist := structs[kind.String()]; exist {
|
||||
// We compose raw struct name and canonical parameter expression
|
||||
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||
// is empty, so we use canonical parameter expression to distinguish
|
||||
// different struct definition. From the consideration of backward
|
||||
// compatibility, we concat these two together so that if kind.TupleRawName
|
||||
// is not empty, it can have unique id.
|
||||
id := kind.TupleRawName + kind.String()
|
||||
if s, exist := structs[id]; exist {
|
||||
return s.Name
|
||||
}
|
||||
var fields []*tmplField
|
||||
@@ -377,8 +422,11 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
field := bindStructTypeGo(*elem, structs)
|
||||
fields = append(fields, &tmplField{Type: field, Name: capitalise(kind.TupleRawNames[i]), SolKind: *elem})
|
||||
}
|
||||
name := fmt.Sprintf("Struct%d", len(structs))
|
||||
structs[kind.String()] = &tmplStruct{
|
||||
name := kind.TupleRawName
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Struct%d", len(structs))
|
||||
}
|
||||
structs[id] = &tmplStruct{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
}
|
||||
@@ -398,7 +446,14 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
if s, exist := structs[kind.String()]; exist {
|
||||
// We compose raw struct name and canonical parameter expression
|
||||
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||
// is empty, so we use canonical parameter expression to distinguish
|
||||
// different struct definition. From the consideration of backward
|
||||
// compatibility, we concat these two together so that if kind.TupleRawName
|
||||
// is not empty, it can have unique id.
|
||||
id := kind.TupleRawName + kind.String()
|
||||
if s, exist := structs[id]; exist {
|
||||
return s.Name
|
||||
}
|
||||
var fields []*tmplField
|
||||
@@ -406,8 +461,11 @@ func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
field := bindStructTypeJava(*elem, structs)
|
||||
fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem})
|
||||
}
|
||||
name := fmt.Sprintf("Class%d", len(structs))
|
||||
structs[kind.String()] = &tmplStruct{
|
||||
name := kind.TupleRawName
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Class%d", len(structs))
|
||||
}
|
||||
structs[id] = &tmplStruct{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
}
|
||||
@@ -452,6 +510,15 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
|
||||
}
|
||||
}
|
||||
|
||||
// alias returns an alias of the given string based on the aliasing rules
|
||||
// or returns itself if no rule is matched.
|
||||
func alias(aliases map[string]string, n string) string {
|
||||
if alias, exist := aliases[n]; exist {
|
||||
return alias
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// methodNormalizer is a name transformer that modifies Solidity method names to
|
||||
// conform to target language naming concentions.
|
||||
var methodNormalizer = map[Lang]func(string) string{
|
||||
@@ -497,6 +564,21 @@ func structured(args abi.Arguments) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// hasStruct returns an indicator whether the given type is struct, struct slice
|
||||
// or struct array.
|
||||
func hasStruct(t abi.Type) bool {
|
||||
switch t.T {
|
||||
case abi.SliceTy:
|
||||
return hasStruct(*t.Elem)
|
||||
case abi.ArrayTy:
|
||||
return hasStruct(*t.Elem)
|
||||
case abi.TupleTy:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// resolveArgName converts a raw argument representation into a user friendly format.
|
||||
func resolveArgName(arg abi.Argument, structs map[string]*tmplStruct) string {
|
||||
var (
|
||||
@@ -512,7 +594,7 @@ loop:
|
||||
case abi.ArrayTy:
|
||||
prefix += fmt.Sprintf("[%d]", typ.Size)
|
||||
default:
|
||||
embedded = typ.String()
|
||||
embedded = typ.TupleRawName + typ.String()
|
||||
break loop
|
||||
}
|
||||
typ = typ.Elem
|
||||
|
File diff suppressed because one or more lines are too long
@@ -23,6 +23,7 @@ type tmplData struct {
|
||||
Package string // Name of the package to place the generated file in
|
||||
Contracts map[string]*tmplContract // List of contracts to generate into this file
|
||||
Libraries map[string]string // Map the bytecode's link pattern to the library name
|
||||
Structs map[string]*tmplStruct // Contract struct type definitions
|
||||
}
|
||||
|
||||
// tmplContract contains the data needed to generate an individual contract binding.
|
||||
@@ -36,8 +37,7 @@ type tmplContract struct {
|
||||
Transacts map[string]*tmplMethod // Contract calls that write state data
|
||||
Events map[string]*tmplEvent // Contract events accessors
|
||||
Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs
|
||||
Structs map[string]*tmplStruct // Contract struct type definitions
|
||||
Library bool
|
||||
Library bool // Indicator whether the contract is a library
|
||||
}
|
||||
|
||||
// tmplMethod is a wrapper around an abi.Method that contains a few preprocessed
|
||||
@@ -65,7 +65,7 @@ type tmplField struct {
|
||||
// tmplStruct is a wrapper around an abi.tuple contains a auto-generated
|
||||
// struct name.
|
||||
type tmplStruct struct {
|
||||
Name string // Auto-generated struct name(We can't obtain the raw struct name through abi)
|
||||
Name string // Auto-generated struct name(before solidity v0.5.11) or raw name.
|
||||
Fields []*tmplField // Struct fields definition depends on the binding language.
|
||||
}
|
||||
|
||||
@@ -108,8 +108,16 @@ var (
|
||||
_ = event.NewSubscription
|
||||
)
|
||||
|
||||
{{$structs := .Structs}}
|
||||
{{range $structs}}
|
||||
// {{.Name}} is an auto generated low-level Go binding around an user-defined struct.
|
||||
type {{.Name}} struct {
|
||||
{{range $field := .Fields}}
|
||||
{{$field.Name}} {{$field.Type}}{{end}}
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{range $contract := .Contracts}}
|
||||
{{$structs := $contract.Structs}}
|
||||
// {{.Type}}ABI is the input ABI used to generate the binding from.
|
||||
const {{.Type}}ABI = "{{.InputABI}}"
|
||||
|
||||
@@ -285,14 +293,6 @@ var (
|
||||
return _{{$contract.Type}}.Contract.contract.Transact(opts, method, params...)
|
||||
}
|
||||
|
||||
{{range .Structs}}
|
||||
// {{.Name}} is an auto generated low-level Go binding around an user-defined struct.
|
||||
type {{.Name}} struct {
|
||||
{{range $field := .Fields}}
|
||||
{{$field.Name}} {{$field.Type}}{{end}}
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{range .Calls}}
|
||||
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||
//
|
||||
@@ -483,7 +483,7 @@ var (
|
||||
|
||||
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
||||
//
|
||||
// Solidity: {{.Original.String}}
|
||||
// Solidity: {{formatevent .Original $structs}}
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
|
||||
event := new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
|
||||
@@ -507,8 +507,8 @@ package {{.Package}};
|
||||
import org.ethereum.geth.*;
|
||||
import java.util.*;
|
||||
|
||||
{{$structs := .Structs}}
|
||||
{{range $contract := .Contracts}}
|
||||
{{$structs := $contract.Structs}}
|
||||
{{if not .Library}}public {{end}}class {{.Type}} {
|
||||
// ABI is the input ABI used to generate the binding from.
|
||||
public final static String ABI = "{{.InputABI}}";
|
||||
|
@@ -80,15 +80,19 @@ func makeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
||||
copy(topic[:], hash[:])
|
||||
|
||||
default:
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
|
||||
// Attempt to generate the topic from funky types
|
||||
val := reflect.ValueOf(rule)
|
||||
|
||||
switch {
|
||||
|
||||
// static byte array
|
||||
case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8:
|
||||
reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported indexed type: %T", rule)
|
||||
}
|
||||
@@ -162,6 +166,7 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er
|
||||
|
||||
default:
|
||||
// Ran out of plain primitive types, try custom types
|
||||
|
||||
switch field.Type() {
|
||||
case reflectHash: // Also covers all dynamic types
|
||||
field.Set(reflect.ValueOf(topics[0]))
|
||||
@@ -178,11 +183,9 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er
|
||||
default:
|
||||
// Ran out of custom types, try the crazies
|
||||
switch {
|
||||
|
||||
// static byte array
|
||||
case arg.Type.T == abi.FixedBytesTy:
|
||||
reflect.Copy(field, reflect.ValueOf(topics[0][:arg.Type.Size]))
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported indexed type: %v", arg.Type)
|
||||
}
|
||||
|
@@ -59,7 +59,7 @@ func TestParseTopics(t *testing.T) {
|
||||
type bytesStruct struct {
|
||||
StaticBytes [5]byte
|
||||
}
|
||||
bytesType, _ := abi.NewType("bytes5", nil)
|
||||
bytesType, _ := abi.NewType("bytes5", "", nil)
|
||||
type args struct {
|
||||
createObj func() interface{}
|
||||
resultObj func() interface{}
|
||||
|
@@ -173,7 +173,7 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
type EventTransferWithTag struct {
|
||||
// this is valid because `value` is not exportable,
|
||||
// so value is only unmarshalled into `Value1`.
|
||||
value *big.Int
|
||||
value *big.Int //lint:ignore U1000 unused field is part of test
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
@@ -354,40 +354,6 @@ func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, ass
|
||||
return a.Unpack(dest, "e", data)
|
||||
}
|
||||
|
||||
/*
|
||||
Taken from
|
||||
https://github.com/ethereum/go-ethereum/pull/15568
|
||||
*/
|
||||
|
||||
type testResult struct {
|
||||
Values [2]*big.Int
|
||||
Value1 *big.Int
|
||||
Value2 *big.Int
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
definition string
|
||||
want testResult
|
||||
}
|
||||
|
||||
func (tc testCase) encoded(intType, arrayType Type) []byte {
|
||||
var b bytes.Buffer
|
||||
if tc.want.Value1 != nil {
|
||||
val, _ := intType.pack(reflect.ValueOf(tc.want.Value1))
|
||||
b.Write(val)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want.Values, [2]*big.Int{nil, nil}) {
|
||||
val, _ := arrayType.pack(reflect.ValueOf(tc.want.Values))
|
||||
b.Write(val)
|
||||
}
|
||||
if tc.want.Value2 != nil {
|
||||
val, _ := intType.pack(reflect.ValueOf(tc.want.Value2))
|
||||
b.Write(val)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
||||
func TestEventUnpackIndexed(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
|
@@ -73,7 +73,7 @@ func packNum(value reflect.Value) []byte {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
return U256(new(big.Int).Set(value.Interface().(*big.Int)))
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
|
@@ -613,7 +613,7 @@ func TestPack(t *testing.T) {
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
|
||||
},
|
||||
} {
|
||||
typ, err := NewType(test.typ, test.components)
|
||||
typ, err := NewType(test.typ, "", test.components)
|
||||
if err != nil {
|
||||
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
||||
}
|
||||
|
@@ -53,6 +53,7 @@ type Type struct {
|
||||
stringKind string // holds the unparsed string for deriving signatures
|
||||
|
||||
// Tuple relative fields
|
||||
TupleRawName string // Raw struct name defined in source code, may be empty.
|
||||
TupleElems []*Type // Type information of all tuple fields
|
||||
TupleRawNames []string // Raw field name of all tuple fields
|
||||
}
|
||||
@@ -63,7 +64,7 @@ var (
|
||||
)
|
||||
|
||||
// NewType creates a new reflection type of abi type given in t.
|
||||
func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
// check that array brackets are equal if they exist
|
||||
if strings.Count(t, "[") != strings.Count(t, "]") {
|
||||
return Type{}, fmt.Errorf("invalid arg type in abi")
|
||||
@@ -73,9 +74,14 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
// if there are brackets, get ready to go into slice/array mode and
|
||||
// recursively create the type
|
||||
if strings.Count(t, "[") != 0 {
|
||||
i := strings.LastIndex(t, "[")
|
||||
// Note internalType can be empty here.
|
||||
subInternal := internalType
|
||||
if i := strings.LastIndex(internalType, "["); i != -1 {
|
||||
subInternal = subInternal[:i]
|
||||
}
|
||||
// recursively embed the type
|
||||
embeddedType, err := NewType(t[:i], components)
|
||||
i := strings.LastIndex(t, "[")
|
||||
embeddedType, err := NewType(t[:i], subInternal, components)
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
@@ -173,7 +179,7 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
)
|
||||
expression += "("
|
||||
for idx, c := range components {
|
||||
cType, err := NewType(c.Type, c.Components)
|
||||
cType, err := NewType(c.Type, c.InternalType, c.Components)
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
@@ -199,6 +205,17 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
typ.TupleRawNames = names
|
||||
typ.T = TupleTy
|
||||
typ.stringKind = expression
|
||||
|
||||
const structPrefix = "struct "
|
||||
// After solidity 0.5.10, a new field of abi "internalType"
|
||||
// is introduced. From that we can obtain the struct name
|
||||
// user defined in the source code.
|
||||
if internalType != "" && strings.HasPrefix(internalType, structPrefix) {
|
||||
// Foo.Bar type definition is not allowed in golang,
|
||||
// convert the format to FooBar
|
||||
typ.TupleRawName = strings.Replace(internalType[len(structPrefix):], ".", "", -1)
|
||||
}
|
||||
|
||||
case "function":
|
||||
typ.Kind = reflect.Array
|
||||
typ.T = FunctionTy
|
||||
|
@@ -106,7 +106,7 @@ func TestTypeRegexp(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
typ, err := NewType(tt.blob, tt.components)
|
||||
typ, err := NewType(tt.blob, "", tt.components)
|
||||
if err != nil {
|
||||
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
|
||||
}
|
||||
@@ -281,7 +281,7 @@ func TestTypeCheck(t *testing.T) {
|
||||
B *big.Int
|
||||
}{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ, test.components)
|
||||
typ, err := NewType(test.typ, "", test.components)
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
} else if err != nil && len(test.err) != 0 {
|
||||
|
@@ -51,6 +51,7 @@ func (test unpackTest) checkError(err error) error {
|
||||
}
|
||||
|
||||
var unpackTests = []unpackTest{
|
||||
// Bools
|
||||
{
|
||||
def: `[{ "type": "bool" }]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
@@ -73,6 +74,7 @@ var unpackTests = []unpackTest{
|
||||
want: false,
|
||||
err: "abi: improperly encoded boolean value",
|
||||
},
|
||||
// Integers
|
||||
{
|
||||
def: `[{"type": "uint32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
@@ -122,11 +124,13 @@ var unpackTests = []unpackTest{
|
||||
enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
want: big.NewInt(-1),
|
||||
},
|
||||
// Address
|
||||
{
|
||||
def: `[{"type": "address"}]`,
|
||||
enc: "0000000000000000000000000100000000000000000000000000000000000000",
|
||||
want: common.Address{1},
|
||||
},
|
||||
// Bytes
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
@@ -154,23 +158,39 @@ var unpackTests = []unpackTest{
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// Functions
|
||||
{
|
||||
def: `[{"type": "function"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [24]byte{1},
|
||||
},
|
||||
// slices
|
||||
// Slice and Array
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: []uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint256[]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: []*big.Int{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint8{1, 2},
|
||||
},
|
||||
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [][]uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@@ -186,11 +206,21 @@ var unpackTests = []unpackTest{
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2][2]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [2][]uint8{{}, {}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: [2][]uint8{{1}, {1}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [][2]uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@@ -420,7 +450,7 @@ func TestUnpack(t *testing.T) {
|
||||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
t.Fatalf("invalid hex %s: %v", test.enc, err)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.want))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
|
@@ -123,6 +123,7 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
|
||||
"Please file a ticket at:\n\n" +
|
||||
"https://github.com/ethereum/go-ethereum/issues." +
|
||||
"The error was : %s"
|
||||
//lint:ignore ST1005 This is a message for the user
|
||||
return fmt.Errorf(msg, tmpName, err)
|
||||
}
|
||||
}
|
||||
@@ -237,7 +238,7 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
|
||||
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
||||
if cryptoJson.Cipher != "aes-128-ctr" {
|
||||
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
|
||||
return nil, fmt.Errorf("cipher not supported: %v", cryptoJson.Cipher)
|
||||
}
|
||||
mac, err := hex.DecodeString(cryptoJson.MAC)
|
||||
if err != nil {
|
||||
@@ -273,7 +274,7 @@ func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
||||
|
||||
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
|
||||
if keyProtected.Version != version {
|
||||
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
|
||||
return nil, nil, fmt.Errorf("version not supported: %v", keyProtected.Version)
|
||||
}
|
||||
keyId = uuid.Parse(keyProtected.Id)
|
||||
plainText, err := DecryptDataV3(keyProtected.Crypto, auth)
|
||||
@@ -335,13 +336,13 @@ func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
|
||||
c := ensureInt(cryptoJSON.KDFParams["c"])
|
||||
prf := cryptoJSON.KDFParams["prf"].(string)
|
||||
if prf != "hmac-sha256" {
|
||||
return nil, fmt.Errorf("Unsupported PBKDF2 PRF: %s", prf)
|
||||
return nil, fmt.Errorf("unsupported PBKDF2 PRF: %s", prf)
|
||||
}
|
||||
key := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unsupported KDF: %s", cryptoJSON.KDF)
|
||||
return nil, fmt.Errorf("unsupported KDF: %s", cryptoJSON.KDF)
|
||||
}
|
||||
|
||||
// TODO: can we do without this when unmarshalling dynamic JSON?
|
||||
|
@@ -71,7 +71,7 @@ func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSes
|
||||
|
||||
cardPublic, ok := gen.Unmarshal(keyData)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Could not unmarshal public key from card")
|
||||
return nil, fmt.Errorf("could not unmarshal public key from card")
|
||||
}
|
||||
|
||||
secret, err := gen.GenerateSharedSecret(private, cardPublic)
|
||||
@@ -109,7 +109,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
|
||||
cardChallenge := response.Data[32:64]
|
||||
|
||||
if !bytes.Equal(expectedCryptogram, cardCryptogram) {
|
||||
return fmt.Errorf("Invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
|
||||
return fmt.Errorf("invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
|
||||
}
|
||||
|
||||
md.Reset()
|
||||
@@ -132,7 +132,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
|
||||
// Unpair disestablishes an existing pairing.
|
||||
func (s *SecureChannelSession) Unpair() error {
|
||||
if s.PairingKey == nil {
|
||||
return fmt.Errorf("Cannot unpair: not paired")
|
||||
return fmt.Errorf("cannot unpair: not paired")
|
||||
}
|
||||
|
||||
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
|
||||
@@ -148,7 +148,7 @@ func (s *SecureChannelSession) Unpair() error {
|
||||
// Open initializes the secure channel.
|
||||
func (s *SecureChannelSession) Open() error {
|
||||
if s.iv != nil {
|
||||
return fmt.Errorf("Session already opened")
|
||||
return fmt.Errorf("session already opened")
|
||||
}
|
||||
|
||||
response, err := s.open()
|
||||
@@ -185,11 +185,11 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
|
||||
return err
|
||||
}
|
||||
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
|
||||
return fmt.Errorf("Got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
||||
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
||||
}
|
||||
|
||||
if len(response.Data) != scSecretLength {
|
||||
return fmt.Errorf("Response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
|
||||
return fmt.Errorf("response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -222,7 +222,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
|
||||
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
|
||||
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
|
||||
if s.iv == nil {
|
||||
return nil, fmt.Errorf("Channel not open")
|
||||
return nil, fmt.Errorf("channel not open")
|
||||
}
|
||||
|
||||
data, err := s.encryptAPDU(data)
|
||||
@@ -261,14 +261,14 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
||||
return nil, err
|
||||
}
|
||||
if !bytes.Equal(s.iv, rmac) {
|
||||
return nil, fmt.Errorf("Invalid MAC in response")
|
||||
return nil, fmt.Errorf("invalid MAC in response")
|
||||
}
|
||||
|
||||
rapdu := &responseAPDU{}
|
||||
rapdu.deserialize(plainData)
|
||||
|
||||
if rapdu.Sw1 != sw1Ok {
|
||||
return nil, fmt.Errorf("Unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
||||
return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
||||
}
|
||||
|
||||
return rapdu, nil
|
||||
@@ -277,7 +277,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
||||
// encryptAPDU is an internal method that serializes and encrypts an APDU.
|
||||
func (s *SecureChannelSession) encryptAPDU(data []byte) ([]byte, error) {
|
||||
if len(data) > maxPayloadSize {
|
||||
return nil, fmt.Errorf("Payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
|
||||
return nil, fmt.Errorf("payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
|
||||
}
|
||||
data = pad(data, 0x80)
|
||||
|
||||
@@ -323,10 +323,10 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
|
||||
case terminator:
|
||||
return data[:len(data)-i], nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Expected end of padding, got %d", data[len(data)-i])
|
||||
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Expected end of padding, got 0")
|
||||
return nil, fmt.Errorf("expected end of padding, got 0")
|
||||
}
|
||||
|
||||
// updateIV is an internal method that updates the initialization vector after
|
||||
|
@@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
|
||||
}
|
||||
|
||||
if response.Sw1 != sw1Ok {
|
||||
return nil, fmt.Errorf("Unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
||||
return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -252,7 +252,7 @@ func (w *Wallet) release() error {
|
||||
// with the wallet.
|
||||
func (w *Wallet) pair(puk []byte) error {
|
||||
if w.session.paired() {
|
||||
return fmt.Errorf("Wallet already paired")
|
||||
return fmt.Errorf("wallet already paired")
|
||||
}
|
||||
pairing, err := w.session.pair(puk)
|
||||
if err != nil {
|
||||
@@ -773,12 +773,12 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
|
||||
|
||||
// Look for the path in the URL
|
||||
if account.URL.Scheme != w.Hub.scheme {
|
||||
return nil, fmt.Errorf("Scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(account.URL.Path, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("Invalid URL format: %s", account.URL)
|
||||
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
||||
}
|
||||
|
||||
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
||||
@@ -813,7 +813,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
|
||||
// unpair deletes an existing pairing.
|
||||
func (s *Session) unpair() error {
|
||||
if !s.verified {
|
||||
return fmt.Errorf("Unpair requires that the PIN be verified")
|
||||
return fmt.Errorf("unpair requires that the PIN be verified")
|
||||
}
|
||||
return s.Channel.Unpair()
|
||||
}
|
||||
@@ -850,7 +850,7 @@ func (s *Session) paired() bool {
|
||||
// authenticate uses an existing pairing to establish a secure channel.
|
||||
func (s *Session) authenticate(pairing smartcardPairing) error {
|
||||
if !bytes.Equal(s.Wallet.PublicKey, pairing.PublicKey) {
|
||||
return fmt.Errorf("Cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
|
||||
return fmt.Errorf("cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
|
||||
}
|
||||
s.Channel.PairingKey = pairing.PairingKey
|
||||
s.Channel.PairingIndex = pairing.PairingIndex
|
||||
@@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
|
||||
}
|
||||
|
||||
// derivationPath fetches the wallet's current derivation path from the card.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
||||
if err != nil {
|
||||
@@ -993,12 +994,14 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
|
||||
}
|
||||
|
||||
// keyExport contains information on an exported keypair.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
type keyExport struct {
|
||||
PublicKey []byte `asn1:"tag:0"`
|
||||
PrivateKey []byte `asn1:"tag:1,optional"`
|
||||
}
|
||||
|
||||
// publicKey returns the public key for the current derivation path.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
func (s *Session) publicKey() ([]byte, error) {
|
||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
||||
if err != nil {
|
||||
|
@@ -162,7 +162,8 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 {
|
||||
//lint:ignore ST1005 brand name displayed on the console
|
||||
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.13.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.13.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
19
build/checksums.txt
Normal file
19
build/checksums.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
# This file contains sha256 checksums of optional build dependencies.
|
||||
|
||||
95dbeab442ee2746b9acf0934c8e2fc26414a0565c008631b04addb8c02e7624 go1.13.4.src.tar.gz
|
||||
|
||||
1fcbc9e36f4319eeed02beb8cfd1b3d425ffc2f90ddf09a80f18d5064c51e0cb golangci-lint-1.21.0-linux-386.tar.gz
|
||||
267b4066e67139a38d29499331a002d6a29ad5be7aafc83db3b1e88f1b027f90 golangci-lint-1.21.0-linux-armv6.tar.gz
|
||||
a602c1f25f90e46e621019cff0a8cb3f4e1837011f3537f15e730d6a9ebf507b golangci-lint-1.21.0-freebsd-armv7.tar.gz
|
||||
2c861f8dc56b560474aa27cab0c075991628cc01af3451e27ac82f5d10d5106b golangci-lint-1.21.0-linux-amd64.tar.gz
|
||||
a1c39e055280e755acaa906e7abfc20b99a5c28be8af541c57fbc44abbb20dde golangci-lint-1.21.0-linux-arm64.tar.gz
|
||||
a8f8bda8c6a4136acf858091077830b1e83ad5612606cb69d5dced869ce00bd8 golangci-lint-1.21.0-linux-ppc64le.tar.gz
|
||||
0a8a8c3bc660ccbca668897ab520f7ee9878f16cc8e4dd24fe46236ceec97ba3 golangci-lint-1.21.0-freebsd-armv6.tar.gz
|
||||
699b07f45e216571f54002bcbd83b511c4801464a422162158e299587b095b18 golangci-lint-1.21.0-freebsd-amd64.tar.gz
|
||||
980fb4993942154bb5c8129ea3b86de09574fe81b24384ebb58cd7a9d2f04483 golangci-lint-1.21.0-linux-armv7.tar.gz
|
||||
f15b689088a47f20d5d3c1d945e9ee7c6238f2b84ea468b5f886cf8713dce62e golangci-lint-1.21.0-windows-386.zip
|
||||
2e40ded7adcf11e59013cb15c24438b15a86526ca241edfcfdf1abd73a5280a8 golangci-lint-1.21.0-windows-amd64.zip
|
||||
6052c7cfea4d6dc2fc722f6c12792a5ec087420198db495afffbc22052653bf7 golangci-lint-1.21.0-freebsd-386.tar.gz
|
||||
ca00b8eacf9af14a71b908b4149606c762aa5c0eac781e74ca0abedfdfdf6c8c golangci-lint-1.21.0-linux-s390x.tar.gz
|
||||
1365455940c342f95718159d89d66ad2eef19f0846c3e87023e915a3527b929f golangci-lint-1.21.0-darwin-386.tar.gz
|
||||
2b2713ec5007e67883aa501eebb81f22abfab0cf0909134ba90f60a066db3760 golangci-lint-1.21.0-darwin-amd64.tar.gz
|
@@ -22,19 +22,18 @@ variables `PPA_SIGNING_KEY` and `PPA_SSH_KEY` on Travis.
|
||||
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||
golang-1.11, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
we bundle the entire Go sources into our own source archive and start the built job by
|
||||
compiling Go and then using that to build go-ethereum. On Trusty we have a special case
|
||||
requiring the `~gophers/ubuntu/archive` PPA since Trusty can't even build Go itself. PPA
|
||||
deps are set at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
|
||||
## Building Packages Locally (for testing)
|
||||
|
||||
You need to run Ubuntu to do test packaging.
|
||||
|
||||
Add the gophers PPA and install Go 1.11 and Debian packaging tools:
|
||||
Install any version of Go and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.11 devscripts debhelper python-bzrlib python-paramiko
|
||||
$ sudo apt-get install build-essential golang-go devscripts debhelper python-bzrlib python-paramiko
|
||||
|
||||
Create the source packages:
|
||||
|
||||
@@ -42,10 +41,10 @@ Create the source packages:
|
||||
|
||||
Then go into the source package directory for your running distribution and build the package:
|
||||
|
||||
$ cd dist/ethereum-unstable-1.6.0+xenial
|
||||
$ cd dist/ethereum-unstable-1.9.6+bionic
|
||||
$ dpkg-buildpackage
|
||||
|
||||
Built packages are placed in the dist/ directory.
|
||||
|
||||
$ cd ..
|
||||
$ dpkg-deb -c geth-unstable_1.6.0+xenial_amd64.deb
|
||||
$ dpkg-deb -c geth-unstable_1.9.6+bionic_amd64.deb
|
||||
|
174
build/ci.go
174
build/ci.go
@@ -58,6 +58,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cespare/cp"
|
||||
"github.com/ethereum/go-ethereum/internal/build"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
@@ -138,7 +139,18 @@ var (
|
||||
// Note: zesty is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: artful is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: cosmic is unsupported because it was officially deprecated on Launchpad.
|
||||
debDistros = []string{"trusty", "xenial", "bionic", "disco", "eoan"}
|
||||
debDistroGoBoots = map[string]string{
|
||||
"trusty": "golang-1.11",
|
||||
"xenial": "golang-go",
|
||||
"bionic": "golang-go",
|
||||
"disco": "golang-go",
|
||||
"eoan": "golang-go",
|
||||
}
|
||||
|
||||
debGoBootPaths = map[string]string{
|
||||
"golang-1.11": "/usr/lib/go-1.11",
|
||||
"golang-go": "/usr/lib/go",
|
||||
}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@@ -217,6 +229,9 @@ func doInstall(cmdline []string) {
|
||||
|
||||
if *arch == "" || *arch == runtime.GOARCH {
|
||||
goinstall := goTool("install", buildFlags(env)...)
|
||||
if runtime.GOARCH == "arm64" {
|
||||
goinstall.Args = append(goinstall.Args, "-p", "1")
|
||||
}
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
goinstall.Args = append(goinstall.Args, packages...)
|
||||
build.MustRun(goinstall)
|
||||
@@ -229,6 +244,7 @@ func doInstall(cmdline []string) {
|
||||
os.RemoveAll(filepath.Join(path, "pkg", runtime.GOOS+"_arm"))
|
||||
}
|
||||
}
|
||||
|
||||
// Seems we are cross compiling, work around forbidden GOBIN
|
||||
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
@@ -303,6 +319,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
|
||||
|
||||
func doTest(cmdline []string) {
|
||||
coverage := flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
verbose := flag.Bool("v", false, "Whether to log verbosely")
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
env := build.Env()
|
||||
|
||||
@@ -315,48 +332,50 @@ func doTest(cmdline []string) {
|
||||
// Test a single package at a time. CI builders are slow
|
||||
// and some tests run into timeouts under load.
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m", "--short")
|
||||
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m")
|
||||
if *coverage {
|
||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||
}
|
||||
if *verbose {
|
||||
gotest.Args = append(gotest.Args, "-v")
|
||||
}
|
||||
|
||||
gotest.Args = append(gotest.Args, packages...)
|
||||
build.MustRun(gotest)
|
||||
}
|
||||
|
||||
// runs gometalinter on requested packages
|
||||
// doLint runs golangci-lint on requested packages.
|
||||
func doLint(cmdline []string) {
|
||||
var (
|
||||
cachedir = flag.String("cachedir", "./build/cache", "directory for caching golangci-lint binary.")
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
packages := []string{"./..."}
|
||||
if len(flag.CommandLine.Args()) > 0 {
|
||||
packages = flag.CommandLine.Args()
|
||||
}
|
||||
// Get metalinter and install all supported linters
|
||||
build.MustRun(goTool("get", "gopkg.in/alecthomas/gometalinter.v2"))
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), "--install")
|
||||
|
||||
// Run fast linters batched together
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--tests",
|
||||
"--deadline=2m",
|
||||
"--disable-all",
|
||||
"--enable=goimports",
|
||||
"--enable=varcheck",
|
||||
"--enable=vet",
|
||||
"--enable=gofmt",
|
||||
"--enable=misspell",
|
||||
"--enable=goconst",
|
||||
"--min-occurrences=6", // for goconst
|
||||
}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...)
|
||||
linter := downloadLinter(*cachedir)
|
||||
lflags := []string{"run", "--config", ".golangci.yml"}
|
||||
build.MustRunCommand(linter, append(lflags, packages...)...)
|
||||
fmt.Println("You have achieved perfection.")
|
||||
}
|
||||
|
||||
// Run slow linters one by one
|
||||
for _, linter := range []string{"unconvert", "gosimple"} {
|
||||
configs = []string{"--vendor", "--tests", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...)
|
||||
// downloadLinter downloads and unpacks golangci-lint.
|
||||
func downloadLinter(cachedir string) string {
|
||||
const version = "1.21.0"
|
||||
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
||||
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
|
||||
archivePath := filepath.Join(cachedir, base+".tar.gz")
|
||||
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := build.ExtractTarballArchive(archivePath, cachedir); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return filepath.Join(cachedir, base, "golangci-lint")
|
||||
}
|
||||
|
||||
// Release Packaging
|
||||
@@ -459,11 +478,13 @@ func maybeSkipArchive(env build.Environment) {
|
||||
// Debian Packaging
|
||||
func doDebianSource(cmdline []string) {
|
||||
var (
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
|
||||
sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
|
||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||
now = time.Now()
|
||||
goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`)
|
||||
cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`)
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
|
||||
sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
|
||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||
now = time.Now()
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
*workdir = makeWorkdir(*workdir)
|
||||
@@ -477,12 +498,39 @@ func doDebianSource(cmdline []string) {
|
||||
build.MustRun(gpg)
|
||||
}
|
||||
|
||||
// Create Debian packages and upload them
|
||||
// Download and verify the Go source package.
|
||||
gobundle := downloadGoSources(*goversion, *cachedir)
|
||||
|
||||
// Download all the dependencies needed to build the sources and run the ci script
|
||||
srcdepfetch := goTool("install", "-n", "./...")
|
||||
srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
|
||||
build.MustRun(srcdepfetch)
|
||||
|
||||
cidepfetch := goTool("run", "./build/ci.go")
|
||||
cidepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
|
||||
cidepfetch.Run() // Command fails, don't care, we only need the deps to start it
|
||||
|
||||
// Create Debian packages and upload them.
|
||||
for _, pkg := range debPackages {
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
for distro, goboot := range debDistroGoBoots {
|
||||
// Prepare the debian package with the go-ethereum sources.
|
||||
meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz")
|
||||
|
||||
// Add Go source code
|
||||
if err := build.ExtractTarballArchive(gobundle, pkgdir); err != nil {
|
||||
log.Fatalf("Failed to extract Go sources: %v", err)
|
||||
}
|
||||
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil {
|
||||
log.Fatalf("Failed to rename Go source folder: %v", err)
|
||||
}
|
||||
// Add all dependency modules in compressed form
|
||||
os.MkdirAll(filepath.Join(pkgdir, ".mod", "cache"), 0755)
|
||||
if err := cp.CopyAll(filepath.Join(pkgdir, ".mod", "cache", "download"), filepath.Join(*workdir, "modgopath", "pkg", "mod", "cache", "download")); err != nil {
|
||||
log.Fatalf("Failed to copy Go module dependencies: %v", err)
|
||||
}
|
||||
// Run the packaging and upload to the PPA
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz", "-nc")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
|
||||
@@ -502,6 +550,17 @@ func doDebianSource(cmdline []string) {
|
||||
}
|
||||
}
|
||||
|
||||
func downloadGoSources(version string, cachedir string) string {
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
file := fmt.Sprintf("go%s.src.tar.gz", version)
|
||||
url := "https://dl.google.com/go/" + file
|
||||
dst := filepath.Join(cachedir, file)
|
||||
if err := csdb.DownloadFile(url, dst); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func ppaUpload(workdir, ppa, sshUser string, files []string) {
|
||||
p := strings.Split(ppa, "/")
|
||||
if len(p) != 2 {
|
||||
@@ -561,7 +620,9 @@ type debPackage struct {
|
||||
}
|
||||
|
||||
type debMetadata struct {
|
||||
Env build.Environment
|
||||
Env build.Environment
|
||||
GoBootPackage string
|
||||
GoBootPath string
|
||||
|
||||
PackageName string
|
||||
|
||||
@@ -590,19 +651,21 @@ func (d debExecutable) Package() string {
|
||||
return d.BinaryName
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
if author == "" {
|
||||
// No signing key, use default author.
|
||||
author = "Ethereum Builds <fjl@ethereum.org>"
|
||||
}
|
||||
return debMetadata{
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: exes,
|
||||
GoBootPackage: goboot,
|
||||
GoBootPath: debGoBootPaths[goboot],
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: exes,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -667,7 +730,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
if err := os.Mkdir(pkgdir, 0755); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Copy the source code.
|
||||
build.MustRunCommand("git", "checkout-index", "-a", "--prefix", pkgdir+string(filepath.Separator))
|
||||
|
||||
@@ -685,7 +747,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe)
|
||||
}
|
||||
|
||||
return pkgdir
|
||||
}
|
||||
|
||||
@@ -733,9 +794,12 @@ func doWindowsInstaller(cmdline []string) {
|
||||
build.Render("build/nsis.uninstall.nsh", filepath.Join(*workdir, "uninstall.nsh"), 0644, allTools)
|
||||
build.Render("build/nsis.pathupdate.nsh", filepath.Join(*workdir, "PathUpdate.nsh"), 0644, nil)
|
||||
build.Render("build/nsis.envvarupdate.nsh", filepath.Join(*workdir, "EnvVarUpdate.nsh"), 0644, nil)
|
||||
build.CopyFile(filepath.Join(*workdir, "SimpleFC.dll"), "build/nsis.simplefc.dll", 0755)
|
||||
build.CopyFile(filepath.Join(*workdir, "COPYING"), "COPYING", 0755)
|
||||
|
||||
if err := cp.CopyFile(filepath.Join(*workdir, "SimpleFC.dll"), "build/nsis.simplefc.dll"); err != nil {
|
||||
log.Fatal("Failed to copy SimpleFC.dll: %v", err)
|
||||
}
|
||||
if err := cp.CopyFile(filepath.Join(*workdir, "COPYING"), "COPYING"); err != nil {
|
||||
log.Fatal("Failed to copy copyright note: %v", err)
|
||||
}
|
||||
// Build the installer. This assumes that all the needed files have been previously
|
||||
// built (don't mix building and packaging to keep cross compilation complexity to a
|
||||
// minimum).
|
||||
@@ -752,7 +816,6 @@ func doWindowsInstaller(cmdline []string) {
|
||||
"/DARCH="+*arch,
|
||||
filepath.Join(*workdir, "geth.nsi"),
|
||||
)
|
||||
|
||||
// Sign and publish installer.
|
||||
if err := archiveUpload(installer, *upload, *signer); err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -1013,16 +1076,11 @@ func doXgo(cmdline []string) {
|
||||
|
||||
func xgoTool(args []string) *exec.Cmd {
|
||||
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
|
||||
cmd.Env = []string{
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, []string{
|
||||
"GOPATH=" + build.GOPATH(),
|
||||
"GOBIN=" + GOBIN,
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||
continue
|
||||
}
|
||||
cmd.Env = append(cmd.Env, e)
|
||||
}
|
||||
}...)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@@ -1,19 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Cleaning the Go cache only makes sense if we actually have Go installed... or
|
||||
# if Go is actually callable. This does not hold true during deb packaging, so
|
||||
# we need an explicit check to avoid build failures.
|
||||
if ! command -v go > /dev/null; then
|
||||
exit
|
||||
fi
|
||||
|
||||
version_gt() {
|
||||
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
|
||||
}
|
||||
|
||||
golang_version=$(go version |cut -d' ' -f3 |sed 's/go//')
|
||||
|
||||
# Clean go build cache when go version is greater than or equal to 1.10
|
||||
if !(version_gt 1.10 $golang_version); then
|
||||
go clean -cache
|
||||
fi
|
@@ -2,7 +2,7 @@ Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.11
|
||||
Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}}
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
|
@@ -4,11 +4,25 @@
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
# Launchpad rejects Go's access to $HOME/.cache, use custom folder
|
||||
# Launchpad rejects Go's access to $HOME, use custom folders
|
||||
export GOCACHE=/tmp/go-build
|
||||
export GOROOT_BOOTSTRAP={{.GoBootPath}}
|
||||
|
||||
override_dh_auto_clean:
|
||||
# Don't try to be smart Launchpad, we know our build rules better than you
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.11/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
# We can't download a fresh Go within Launchpad, so we're shipping and building
|
||||
# one on the fly. However, we can't build it inside the go-ethereum folder as
|
||||
# bootstrapping clashes with go modules, so build in a sibling folder.
|
||||
(mv .go ../ && cd ../.go/src && ./make.bash)
|
||||
|
||||
# We can't download external go modules within Launchpad, so we're shipping the
|
||||
# entire dependency source cache with go-ethereum.
|
||||
(mkdir -p build/_workspace/pkg/mod && mv .mod/* build/_workspace/pkg/mod)
|
||||
|
||||
# A fresh Go was built, all dependency downloads faked, hope build works now
|
||||
build/env.sh ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
@@ -31,19 +32,6 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
var (
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
gitCommit = ""
|
||||
@@ -103,6 +91,10 @@ var (
|
||||
Usage: "Destination language for the bindings (go, java, objc)",
|
||||
Value: "go",
|
||||
}
|
||||
aliasFlag = cli.StringFlag{
|
||||
Name: "alias",
|
||||
Usage: "Comma separated aliases for function and event renaming, e.g. foo=bar",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -120,9 +112,10 @@ func init() {
|
||||
pkgFlag,
|
||||
outFlag,
|
||||
langFlag,
|
||||
aliasFlag,
|
||||
}
|
||||
app.Action = utils.MigrateFlags(abigen)
|
||||
cli.CommandHelpTemplate = commandHelperTemplate
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func abigen(c *cli.Context) error {
|
||||
@@ -144,11 +137,12 @@ func abigen(c *cli.Context) error {
|
||||
}
|
||||
// If the entire solidity code was specified, build and bind based on that
|
||||
var (
|
||||
abis []string
|
||||
bins []string
|
||||
types []string
|
||||
sigs []map[string]string
|
||||
libs = make(map[string]string)
|
||||
abis []string
|
||||
bins []string
|
||||
types []string
|
||||
sigs []map[string]string
|
||||
libs = make(map[string]string)
|
||||
aliases = make(map[string]string)
|
||||
)
|
||||
if c.GlobalString(abiFlag.Name) != "" {
|
||||
// Load up the ABI, optional bytecode and type name from the parameters
|
||||
@@ -232,8 +226,20 @@ func abigen(c *cli.Context) error {
|
||||
libs[libPattern] = nameParts[len(nameParts)-1]
|
||||
}
|
||||
}
|
||||
// Extract all aliases from the flags
|
||||
if c.GlobalIsSet(aliasFlag.Name) {
|
||||
// We support multi-versions for aliasing
|
||||
// e.g.
|
||||
// foo=bar,foo2=bar2
|
||||
// foo:bar,foo2:bar2
|
||||
re := regexp.MustCompile(`(?:(\w+)[:=](\w+))`)
|
||||
submatches := re.FindAllStringSubmatch(c.GlobalString(aliasFlag.Name), -1)
|
||||
for _, match := range submatches {
|
||||
aliases[match[1]] = match[2]
|
||||
}
|
||||
}
|
||||
// Generate the contract binding
|
||||
code, err := bind.Bind(types, abis, bins, sigs, c.GlobalString(pkgFlag.Name), lang, libs)
|
||||
code, err := bind.Bind(types, abis, bins, sigs, c.GlobalString(pkgFlag.Name), lang, libs, aliases)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate ABI binding: %v", err)
|
||||
}
|
||||
|
@@ -28,19 +28,6 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
var (
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
gitCommit = ""
|
||||
@@ -61,7 +48,7 @@ func init() {
|
||||
oracleFlag,
|
||||
nodeURLFlag,
|
||||
}
|
||||
cli.CommandHelpTemplate = commandHelperTemplate
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
// Commonly used command line flags.
|
||||
|
@@ -223,6 +223,7 @@ func init() {
|
||||
}
|
||||
app.Action = signer
|
||||
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, delCredentialCommand, gendocCommand}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -387,7 +388,7 @@ func initialize(c *cli.Context) error {
|
||||
if c.GlobalBool(stdiouiFlag.Name) {
|
||||
logOutput = os.Stderr
|
||||
// If using the stdioui, we can't do the 'confirm'-flow
|
||||
fmt.Fprintf(logOutput, legalWarning)
|
||||
fmt.Fprint(logOutput, legalWarning)
|
||||
} else {
|
||||
if !confirm(legalWarning) {
|
||||
return fmt.Errorf("aborted by user")
|
||||
@@ -404,6 +405,27 @@ func initialize(c *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||
// account the set data folders as well as the designated platform we're currently
|
||||
// running on.
|
||||
func ipcEndpoint(ipcPath, datadir string) string {
|
||||
// On windows we can only use plain top-level pipes
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
|
||||
return ipcPath
|
||||
}
|
||||
return `\\.\pipe\` + ipcPath
|
||||
}
|
||||
// Resolve names into the data directory full paths otherwise
|
||||
if filepath.Base(ipcPath) == ipcPath {
|
||||
if datadir == "" {
|
||||
return filepath.Join(os.TempDir(), ipcPath)
|
||||
}
|
||||
return filepath.Join(datadir, ipcPath)
|
||||
}
|
||||
return ipcPath
|
||||
}
|
||||
|
||||
func signer(c *cli.Context) error {
|
||||
// If we have some unrecognized command, bail out
|
||||
if args := c.Args(); len(args) > 0 {
|
||||
@@ -532,12 +554,8 @@ func signer(c *cli.Context) error {
|
||||
}()
|
||||
}
|
||||
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
|
||||
if c.IsSet(utils.IPCPathFlag.Name) {
|
||||
ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
ipcapiURL = filepath.Join(configDir, "clef.ipc")
|
||||
}
|
||||
|
||||
givenPath := c.GlobalString(utils.IPCPathFlag.Name)
|
||||
ipcapiURL = ipcEndpoint(filepath.Join(givenPath, "clef.ipc"), configDir)
|
||||
listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start IPC api: %v", err)
|
||||
@@ -547,7 +565,6 @@ func signer(c *cli.Context) error {
|
||||
listener.Close()
|
||||
log.Info("IPC endpoint closed", "url", ipcapiURL)
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
if c.GlobalBool(testFlag.Name) {
|
||||
@@ -563,7 +580,7 @@ func signer(c *cli.Context) error {
|
||||
},
|
||||
})
|
||||
|
||||
abortChan := make(chan os.Signal)
|
||||
abortChan := make(chan os.Signal, 1)
|
||||
signal.Notify(abortChan, os.Interrupt)
|
||||
|
||||
sig := <-abortChan
|
||||
@@ -678,7 +695,7 @@ func checkFile(filename string) error {
|
||||
|
||||
// confirm displays a text and asks for user confirmation
|
||||
func confirm(text string) bool {
|
||||
fmt.Printf(text)
|
||||
fmt.Print(text)
|
||||
fmt.Printf("\nEnter 'ok' to proceed:\n> ")
|
||||
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
@@ -744,21 +761,19 @@ func testExternalUI(api *core.SignerAPI) {
|
||||
api.UI.ShowInfo("Please approve the next request for signing a clique header")
|
||||
time.Sleep(delay)
|
||||
cliqueHeader := types.Header{
|
||||
common.HexToHash("0000H45H"),
|
||||
common.HexToHash("0000H45H"),
|
||||
common.HexToAddress("0000H45H"),
|
||||
common.HexToHash("0000H00H"),
|
||||
common.HexToHash("0000H45H"),
|
||||
common.HexToHash("0000H45H"),
|
||||
types.Bloom{},
|
||||
big.NewInt(1337),
|
||||
big.NewInt(1337),
|
||||
1338,
|
||||
1338,
|
||||
1338,
|
||||
[]byte("Extra data Extra data Extra data Extra data Extra data Extra data Extra data Extra data"),
|
||||
common.HexToHash("0x0000H45H"),
|
||||
types.BlockNonce{},
|
||||
ParentHash: common.HexToHash("0000H45H"),
|
||||
UncleHash: common.HexToHash("0000H45H"),
|
||||
Coinbase: common.HexToAddress("0000H45H"),
|
||||
Root: common.HexToHash("0000H00H"),
|
||||
TxHash: common.HexToHash("0000H45H"),
|
||||
ReceiptHash: common.HexToHash("0000H45H"),
|
||||
Difficulty: big.NewInt(1337),
|
||||
Number: big.NewInt(1337),
|
||||
GasLimit: 1338,
|
||||
GasUsed: 1338,
|
||||
Time: 1338,
|
||||
Extra: []byte("Extra data Extra data Extra data Extra data Extra data Extra data Extra data Extra data"),
|
||||
MixDigest: common.HexToHash("0x0000H45H"),
|
||||
}
|
||||
cliqueRlp, err := rlp.EncodeToBytes(cliqueHeader)
|
||||
if err != nil {
|
||||
@@ -922,7 +937,7 @@ func GenDoc(ctx *cli.Context) {
|
||||
"of the work in canonicalizing and making sense of the data, and it's up to the UI to present" +
|
||||
"the user with the contents of the `message`"
|
||||
sighash, msg := accounts.TextAndHash([]byte("hello world"))
|
||||
messages := []*core.NameValueType{{"message", msg, accounts.MimetypeTextPlain}}
|
||||
messages := []*core.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}}
|
||||
|
||||
add("SignDataRequest", desc, &core.SignDataRequest{
|
||||
Address: common.NewMixedcaseAddress(a),
|
||||
@@ -953,8 +968,8 @@ func GenDoc(ctx *cli.Context) {
|
||||
add("SignTxRequest", desc, &core.SignTxRequest{
|
||||
Meta: meta,
|
||||
Callinfo: []core.ValidationInfo{
|
||||
{"Warning", "Something looks odd, show this message as a warning"},
|
||||
{"Info", "User should see this aswell"},
|
||||
{Typ: "Warning", Message: "Something looks odd, show this message as a warning"},
|
||||
{Typ: "Info", Message: "User should see this as well"},
|
||||
},
|
||||
Transaction: core.SendTxArgs{
|
||||
Data: &data,
|
||||
@@ -1020,16 +1035,21 @@ func GenDoc(ctx *cli.Context) {
|
||||
&core.ListRequest{
|
||||
Meta: meta,
|
||||
Accounts: []accounts.Account{
|
||||
{a, accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/a"}},
|
||||
{b, accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/b"}}},
|
||||
{Address: a, URL: accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/a"}},
|
||||
{Address: b, URL: accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/b"}}},
|
||||
})
|
||||
|
||||
add("ListResponse", "Response to list request. The response contains a list of all addresses to show to the caller. "+
|
||||
"Note: the UI is free to respond with any address the caller, regardless of whether it exists or not",
|
||||
&core.ListResponse{
|
||||
Accounts: []accounts.Account{
|
||||
{common.HexToAddress("0xcowbeef000000cowbeef00000000000000000c0w"), accounts.URL{Path: ".. ignored .."}},
|
||||
{common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"), accounts.URL{}},
|
||||
{
|
||||
Address: common.HexToAddress("0xcowbeef000000cowbeef00000000000000000c0w"),
|
||||
URL: accounts.URL{Path: ".. ignored .."},
|
||||
},
|
||||
{
|
||||
Address: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"),
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
|
152
cmd/devp2p/crawl.go
Normal file
152
cmd/devp2p/crawl.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
type crawler struct {
|
||||
input nodeSet
|
||||
output nodeSet
|
||||
disc *discover.UDPv4
|
||||
iters []enode.Iterator
|
||||
inputIter enode.Iterator
|
||||
ch chan *enode.Node
|
||||
closed chan struct{}
|
||||
|
||||
// settings
|
||||
revalidateInterval time.Duration
|
||||
}
|
||||
|
||||
func newCrawler(input nodeSet, disc *discover.UDPv4, iters ...enode.Iterator) *crawler {
|
||||
c := &crawler{
|
||||
input: input,
|
||||
output: make(nodeSet, len(input)),
|
||||
disc: disc,
|
||||
iters: iters,
|
||||
inputIter: enode.IterNodes(input.nodes()),
|
||||
ch: make(chan *enode.Node),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
c.iters = append(c.iters, c.inputIter)
|
||||
// Copy input to output initially. Any nodes that fail validation
|
||||
// will be dropped from output during the run.
|
||||
for id, n := range input {
|
||||
c.output[id] = n
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *crawler) run(timeout time.Duration) nodeSet {
|
||||
var (
|
||||
timeoutTimer = time.NewTimer(timeout)
|
||||
timeoutCh <-chan time.Time
|
||||
doneCh = make(chan enode.Iterator, len(c.iters))
|
||||
liveIters = len(c.iters)
|
||||
)
|
||||
for _, it := range c.iters {
|
||||
go c.runIterator(doneCh, it)
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case n := <-c.ch:
|
||||
c.updateNode(n)
|
||||
case it := <-doneCh:
|
||||
if it == c.inputIter {
|
||||
// Enable timeout when we're done revalidating the input nodes.
|
||||
log.Info("Revalidation of input set is done", "len", len(c.input))
|
||||
if timeout > 0 {
|
||||
timeoutCh = timeoutTimer.C
|
||||
}
|
||||
}
|
||||
if liveIters--; liveIters == 0 {
|
||||
break loop
|
||||
}
|
||||
case <-timeoutCh:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
close(c.closed)
|
||||
for _, it := range c.iters {
|
||||
it.Close()
|
||||
}
|
||||
for ; liveIters > 0; liveIters-- {
|
||||
<-doneCh
|
||||
}
|
||||
return c.output
|
||||
}
|
||||
|
||||
func (c *crawler) runIterator(done chan<- enode.Iterator, it enode.Iterator) {
|
||||
defer func() { done <- it }()
|
||||
for it.Next() {
|
||||
select {
|
||||
case c.ch <- it.Node():
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *crawler) updateNode(n *enode.Node) {
|
||||
node, ok := c.output[n.ID()]
|
||||
|
||||
// Skip validation of recently-seen nodes.
|
||||
if ok && time.Since(node.LastCheck) < c.revalidateInterval {
|
||||
return
|
||||
}
|
||||
|
||||
// Request the node record.
|
||||
nn, err := c.disc.RequestENR(n)
|
||||
node.LastCheck = truncNow()
|
||||
if err != nil {
|
||||
if node.Score == 0 {
|
||||
// Node doesn't implement EIP-868.
|
||||
log.Debug("Skipping node", "id", n.ID())
|
||||
return
|
||||
}
|
||||
node.Score /= 2
|
||||
} else {
|
||||
node.N = nn
|
||||
node.Seq = nn.Seq()
|
||||
node.Score++
|
||||
if node.FirstResponse.IsZero() {
|
||||
node.FirstResponse = node.LastCheck
|
||||
}
|
||||
node.LastResponse = node.LastCheck
|
||||
}
|
||||
|
||||
// Store/update node in output set.
|
||||
if node.Score <= 0 {
|
||||
log.Info("Removing node", "id", n.ID())
|
||||
delete(c.output, n.ID())
|
||||
} else {
|
||||
log.Info("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score)
|
||||
c.output[n.ID()] = node
|
||||
}
|
||||
}
|
||||
|
||||
func truncNow() time.Time {
|
||||
return time.Now().UTC().Truncate(1 * time.Second)
|
||||
}
|
@@ -39,6 +39,7 @@ var (
|
||||
discv4RequestRecordCommand,
|
||||
discv4ResolveCommand,
|
||||
discv4ResolveJSONCommand,
|
||||
discv4CrawlCommand,
|
||||
},
|
||||
}
|
||||
discv4PingCommand = cli.Command{
|
||||
@@ -67,12 +68,25 @@ var (
|
||||
Flags: []cli.Flag{bootnodesFlag},
|
||||
ArgsUsage: "<nodes.json file>",
|
||||
}
|
||||
discv4CrawlCommand = cli.Command{
|
||||
Name: "crawl",
|
||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||
Action: discv4Crawl,
|
||||
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
||||
}
|
||||
)
|
||||
|
||||
var bootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated nodes used for bootstrapping",
|
||||
}
|
||||
var (
|
||||
bootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated nodes used for bootstrapping",
|
||||
}
|
||||
crawlTimeoutFlag = cli.DurationFlag{
|
||||
Name: "timeout",
|
||||
Usage: "Time limit for the crawl.",
|
||||
Value: 30 * time.Minute,
|
||||
}
|
||||
)
|
||||
|
||||
func discv4Ping(ctx *cli.Context) error {
|
||||
n := getNodeArg(ctx)
|
||||
@@ -113,30 +127,48 @@ func discv4ResolveJSON(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
file := ctx.Args().Get(0)
|
||||
|
||||
// Load existing nodes in file.
|
||||
var nodes []*enode.Node
|
||||
if common.FileExist(file) {
|
||||
nodes = loadNodesJSON(file).nodes()
|
||||
nodesFile := ctx.Args().Get(0)
|
||||
inputSet := make(nodeSet)
|
||||
if common.FileExist(nodesFile) {
|
||||
inputSet = loadNodesJSON(nodesFile)
|
||||
}
|
||||
// Add nodes from command line arguments.
|
||||
|
||||
// Add extra nodes from command line arguments.
|
||||
var nodeargs []*enode.Node
|
||||
for i := 1; i < ctx.NArg(); i++ {
|
||||
n, err := parseNode(ctx.Args().Get(i))
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
nodeargs = append(nodeargs, n)
|
||||
}
|
||||
|
||||
result := make(nodeSet, len(nodes))
|
||||
for _, n := range nodes {
|
||||
n = disc.Resolve(n)
|
||||
result[n.ID()] = nodeJSON{Seq: n.Seq(), N: n}
|
||||
// Run the crawler.
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
|
||||
c.revalidateInterval = 0
|
||||
output := c.run(0)
|
||||
writeNodesJSON(nodesFile, output)
|
||||
return nil
|
||||
}
|
||||
|
||||
func discv4Crawl(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
writeNodesJSON(file, result)
|
||||
nodesFile := ctx.Args().First()
|
||||
var inputSet nodeSet
|
||||
if common.FileExist(nodesFile) {
|
||||
inputSet = loadNodesJSON(nodesFile)
|
||||
}
|
||||
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
c := newCrawler(inputSet, disc, disc.RandomNodes())
|
||||
c.revalidateInterval = 10 * time.Minute
|
||||
output := c.run(ctx.Duration(crawlTimeoutFlag.Name))
|
||||
writeNodesJSON(nodesFile, output)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -109,7 +109,8 @@ func dnsSync(ctx *cli.Context) error {
|
||||
}
|
||||
def := treeToDefinition(url, t)
|
||||
def.Meta.LastModified = time.Now()
|
||||
writeTreeDefinition(outdir, def)
|
||||
writeTreeMetadata(outdir, def)
|
||||
writeTreeNodes(outdir, def)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -151,7 +152,7 @@ func dnsSign(ctx *cli.Context) error {
|
||||
|
||||
def = treeToDefinition(url, t)
|
||||
def.Meta.LastModified = time.Now()
|
||||
writeTreeDefinition(defdir, def)
|
||||
writeTreeMetadata(defdir, def)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -315,26 +316,28 @@ func ensureValidTreeSignature(t *dnsdisc.Tree, pubkey *ecdsa.PublicKey, sig stri
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeTreeDefinition writes a DNS node tree definition to the given directory.
|
||||
func writeTreeDefinition(directory string, def *dnsDefinition) {
|
||||
// writeTreeMetadata writes a DNS node tree metadata file to the given directory.
|
||||
func writeTreeMetadata(directory string, def *dnsDefinition) {
|
||||
metaJSON, err := json.MarshalIndent(&def.Meta, "", jsonIndent)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
// Convert nodes.
|
||||
nodes := make(nodeSet, len(def.Nodes))
|
||||
nodes.add(def.Nodes...)
|
||||
// Write.
|
||||
if err := os.Mkdir(directory, 0744); err != nil && !os.IsExist(err) {
|
||||
exit(err)
|
||||
}
|
||||
metaFile, nodesFile := treeDefinitionFiles(directory)
|
||||
writeNodesJSON(nodesFile, nodes)
|
||||
metaFile, _ := treeDefinitionFiles(directory)
|
||||
if err := ioutil.WriteFile(metaFile, metaJSON, 0644); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeTreeNodes(directory string, def *dnsDefinition) {
|
||||
ns := make(nodeSet, len(def.Nodes))
|
||||
ns.add(def.Nodes...)
|
||||
_, nodesFile := treeDefinitionFiles(directory)
|
||||
writeNodesJSON(nodesFile, ns)
|
||||
}
|
||||
|
||||
func treeDefinitionFiles(directory string) (string, string) {
|
||||
meta := filepath.Join(directory, "enrtree-info.json")
|
||||
nodes := filepath.Join(directory, "nodes.json")
|
||||
|
@@ -60,6 +60,7 @@ func init() {
|
||||
enrdumpCommand,
|
||||
discv4Command,
|
||||
dnsCommand,
|
||||
nodesetCommand,
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -21,7 +21,9 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -36,6 +38,15 @@ type nodeSet map[enode.ID]nodeJSON
|
||||
type nodeJSON struct {
|
||||
Seq uint64 `json:"seq"`
|
||||
N *enode.Node `json:"record"`
|
||||
|
||||
// The score tracks how many liveness checks were performed. It is incremented by one
|
||||
// every time the node passes a check, and halved every time it doesn't.
|
||||
Score int `json:"score,omitempty"`
|
||||
// These two track the time of last successful contact.
|
||||
FirstResponse time.Time `json:"firstResponse,omitempty"`
|
||||
LastResponse time.Time `json:"lastResponse,omitempty"`
|
||||
// This one tracks the time of our last attempt to contact the node.
|
||||
LastCheck time.Time `json:"lastCheck,omitempty"`
|
||||
}
|
||||
|
||||
func loadNodesJSON(file string) nodeSet {
|
||||
@@ -51,6 +62,10 @@ func writeNodesJSON(file string, nodes nodeSet) {
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if file == "-" {
|
||||
os.Stdout.Write(nodesJSON)
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(file, nodesJSON, 0644); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
|
193
cmd/devp2p/nodesetcmd.go
Normal file
193
cmd/devp2p/nodesetcmd.go
Normal file
@@ -0,0 +1,193 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
nodesetCommand = cli.Command{
|
||||
Name: "nodeset",
|
||||
Usage: "Node set tools",
|
||||
Subcommands: []cli.Command{
|
||||
nodesetInfoCommand,
|
||||
nodesetFilterCommand,
|
||||
},
|
||||
}
|
||||
nodesetInfoCommand = cli.Command{
|
||||
Name: "info",
|
||||
Usage: "Shows statistics about a node set",
|
||||
Action: nodesetInfo,
|
||||
ArgsUsage: "<nodes.json>",
|
||||
}
|
||||
nodesetFilterCommand = cli.Command{
|
||||
Name: "filter",
|
||||
Usage: "Filters a node set",
|
||||
Action: nodesetFilter,
|
||||
ArgsUsage: "<nodes.json> filters..",
|
||||
|
||||
SkipFlagParsing: true,
|
||||
}
|
||||
)
|
||||
|
||||
func nodesetInfo(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
|
||||
ns := loadNodesJSON(ctx.Args().First())
|
||||
fmt.Printf("Set contains %d nodes.\n", len(ns))
|
||||
return nil
|
||||
}
|
||||
|
||||
func nodesetFilter(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
ns := loadNodesJSON(ctx.Args().First())
|
||||
filter, err := andFilter(ctx.Args().Tail())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result := make(nodeSet)
|
||||
for id, n := range ns {
|
||||
if filter(n) {
|
||||
result[id] = n
|
||||
}
|
||||
}
|
||||
writeNodesJSON("-", result)
|
||||
return nil
|
||||
}
|
||||
|
||||
type nodeFilter func(nodeJSON) bool
|
||||
|
||||
type nodeFilterC struct {
|
||||
narg int
|
||||
fn func([]string) (nodeFilter, error)
|
||||
}
|
||||
|
||||
var filterFlags = map[string]nodeFilterC{
|
||||
"-ip": {1, ipFilter},
|
||||
"-min-age": {1, minAgeFilter},
|
||||
"-eth-network": {1, ethFilter},
|
||||
"-les-server": {0, lesFilter},
|
||||
}
|
||||
|
||||
func parseFilters(args []string) ([]nodeFilter, error) {
|
||||
var filters []nodeFilter
|
||||
for len(args) > 0 {
|
||||
fc, ok := filterFlags[args[0]]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid filter %q", args[0])
|
||||
}
|
||||
if len(args) < fc.narg {
|
||||
return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args))
|
||||
}
|
||||
filter, err := fc.fn(args[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %v", args[0], err)
|
||||
}
|
||||
filters = append(filters, filter)
|
||||
args = args[fc.narg+1:]
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
func andFilter(args []string) (nodeFilter, error) {
|
||||
checks, err := parseFilters(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool {
|
||||
for _, filter := range checks {
|
||||
if !filter(n) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func ipFilter(args []string) (nodeFilter, error) {
|
||||
_, cidr, err := net.ParseCIDR(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool { return cidr.Contains(n.N.IP()) }
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func minAgeFilter(args []string) (nodeFilter, error) {
|
||||
minage, err := time.ParseDuration(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool {
|
||||
age := n.LastResponse.Sub(n.FirstResponse)
|
||||
return age >= minage
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func ethFilter(args []string) (nodeFilter, error) {
|
||||
var filter forkid.Filter
|
||||
switch args[0] {
|
||||
case "mainnet":
|
||||
filter = forkid.NewStaticFilter(params.MainnetChainConfig, params.MainnetGenesisHash)
|
||||
case "rinkeby":
|
||||
filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash)
|
||||
case "goerli":
|
||||
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
||||
case "ropsten":
|
||||
filter = forkid.NewStaticFilter(params.TestnetChainConfig, params.TestnetGenesisHash)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown network %q", args[0])
|
||||
}
|
||||
|
||||
f := func(n nodeJSON) bool {
|
||||
var eth struct {
|
||||
ForkID forkid.ID
|
||||
_ []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
if n.N.Load(enr.WithEntry("eth", ð)) != nil {
|
||||
return false
|
||||
}
|
||||
return filter(eth.ForkID) == nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func lesFilter(args []string) (nodeFilter, error) {
|
||||
f := func(n nodeJSON) bool {
|
||||
var les struct {
|
||||
_ []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
return n.N.Load(enr.WithEntry("les", &les)) == nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
@@ -77,7 +77,7 @@ Change the password of a keyfile.`,
|
||||
}
|
||||
|
||||
// Then write the new keyfile in place of the old one.
|
||||
if err := ioutil.WriteFile(keyfilepath, newJson, 600); err != nil {
|
||||
if err := ioutil.WriteFile(keyfilepath, newJson, 0600); err != nil {
|
||||
utils.Fatalf("Error writing new keyfile to disk: %v", err)
|
||||
}
|
||||
|
||||
|
@@ -43,6 +43,7 @@ func init() {
|
||||
commandSignMessage,
|
||||
commandVerifyMessage,
|
||||
}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
// Commonly used command line flags.
|
||||
|
@@ -79,6 +79,10 @@ var (
|
||||
Name: "input",
|
||||
Usage: "input for the EVM",
|
||||
}
|
||||
InputFileFlag = cli.StringFlag{
|
||||
Name: "inputfile",
|
||||
Usage: "file containing input for the EVM",
|
||||
}
|
||||
VerbosityFlag = cli.IntFlag{
|
||||
Name: "verbosity",
|
||||
Usage: "sets the verbosity level",
|
||||
@@ -130,6 +134,7 @@ func init() {
|
||||
ValueFlag,
|
||||
DumpFlag,
|
||||
InputFlag,
|
||||
InputFileFlag,
|
||||
MemProfileFlag,
|
||||
CPUProfileFlag,
|
||||
StatDumpFlag,
|
||||
@@ -147,6 +152,7 @@ func init() {
|
||||
runCommand,
|
||||
stateTestCommand,
|
||||
}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -145,6 +146,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
} else {
|
||||
hexcode = []byte(codeFlag)
|
||||
}
|
||||
hexcode = bytes.TrimSpace(hexcode)
|
||||
if len(hexcode)%2 != 0 {
|
||||
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
|
||||
os.Exit(1)
|
||||
@@ -203,14 +205,24 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
tstart := time.Now()
|
||||
var leftOverGas uint64
|
||||
var hexInput []byte
|
||||
if inputFileFlag := ctx.GlobalString(InputFileFlag.Name); inputFileFlag != "" {
|
||||
if hexInput, err = ioutil.ReadFile(inputFileFlag); err != nil {
|
||||
fmt.Printf("could not load input from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
hexInput = []byte(ctx.GlobalString(InputFlag.Name))
|
||||
}
|
||||
input := common.FromHex(string(bytes.TrimSpace(hexInput)))
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
input = append(code, input...)
|
||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
||||
} else {
|
||||
if len(code) > 0 {
|
||||
statedb.SetCode(receiver, code)
|
||||
}
|
||||
ret, leftOverGas, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
ret, leftOverGas, err = runtime.Call(receiver, input, &runtimeConfig)
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
|
||||
|
@@ -58,7 +58,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"golang.org/x/net/websocket"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -296,8 +296,7 @@ func (f *faucet) listenAndServe(port int) error {
|
||||
go f.loop()
|
||||
|
||||
http.HandleFunc("/", f.webHandler)
|
||||
http.Handle("/api", websocket.Handler(f.apiHandler))
|
||||
|
||||
http.HandleFunc("/api", f.apiHandler)
|
||||
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
|
||||
}
|
||||
|
||||
@@ -308,7 +307,13 @@ func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// apiHandler handles requests for Ether grants and transaction statuses.
|
||||
func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
upgrader := websocket.Upgrader{}
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start tracking the connection and drop at the end
|
||||
defer conn.Close()
|
||||
|
||||
@@ -331,7 +336,6 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
head *types.Header
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
err error
|
||||
)
|
||||
for head == nil || balance == nil {
|
||||
// Retrieve the current stats cached by the faucet
|
||||
@@ -347,6 +351,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
|
||||
if head == nil || balance == nil {
|
||||
// Report the faucet offline until initial stats are ready
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
|
||||
log.Warn("Failed to send faucet error to client", "err", err)
|
||||
return
|
||||
@@ -376,7 +381,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
Tier uint `json:"tier"`
|
||||
Captcha string `json:"captcha"`
|
||||
}
|
||||
if err = websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
if err = conn.ReadJSON(&msg); err != nil {
|
||||
return
|
||||
}
|
||||
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") &&
|
||||
@@ -388,6 +393,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
continue
|
||||
}
|
||||
if msg.Tier >= uint(*tiersFlag) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
|
||||
log.Warn("Failed to send tier error to client", "err", err)
|
||||
return
|
||||
@@ -425,6 +431,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
//lint:ignore ST1005 it's funny and the robot won't mind
|
||||
if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
|
||||
log.Warn("Failed to send captcha failure to client", "err", err)
|
||||
return
|
||||
@@ -446,6 +453,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
continue
|
||||
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
|
||||
//lint:ignore ST1005 Google is a company name and should be capitalized.
|
||||
if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil {
|
||||
log.Warn("Failed to send Google+ deprecation to client", "err", err)
|
||||
return
|
||||
@@ -458,6 +466,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
case *noauthFlag:
|
||||
username, avatar, address, err = authNoAuth(msg.URL)
|
||||
default:
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -516,7 +525,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil { // nolint: gosimple
|
||||
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
|
||||
log.Warn("Failed to send funding error to client", "err", err)
|
||||
return
|
||||
}
|
||||
@@ -657,7 +666,7 @@ func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
conn.SetWriteDeadline(time.Now().Add(timeout))
|
||||
return websocket.JSON.Send(conn, value)
|
||||
return conn.WriteJSON(value)
|
||||
}
|
||||
|
||||
// sendError transmits an error to the remote end of the websocket, also setting
|
||||
@@ -678,6 +687,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
// Twitter's API isn't really friendly with direct links. Still, we don't
|
||||
@@ -692,6 +702,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
// Resolve the username from the final redirect, no intermediate junk
|
||||
parts = strings.Split(res.Request.URL.String(), "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
@@ -702,6 +713,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
@@ -717,6 +729,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
@@ -736,6 +749,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
@@ -751,6 +765,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
func authNoAuth(url string) (string, string, common.Address, error) {
|
||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
return address.Hex() + "@noauth", "", address, nil
|
||||
|
@@ -28,7 +28,6 @@ import (
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@@ -75,11 +74,10 @@ type ethstatsConfig struct {
|
||||
}
|
||||
|
||||
type gethConfig struct {
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
Dashboard dashboard.Config
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
}
|
||||
|
||||
func loadConfig(file string, cfg *gethConfig) error {
|
||||
@@ -110,10 +108,9 @@ func defaultNodeConfig() node.Config {
|
||||
func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
Dashboard: dashboard.DefaultConfig,
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
}
|
||||
|
||||
// Load config file.
|
||||
@@ -134,7 +131,6 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
|
||||
}
|
||||
utils.SetShhConfig(ctx, stack, &cfg.Shh)
|
||||
utils.SetDashboardConfig(ctx, &cfg.Dashboard)
|
||||
|
||||
return stack, cfg
|
||||
}
|
||||
@@ -154,11 +150,11 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
if ctx.GlobalIsSet(utils.OverrideIstanbulFlag.Name) {
|
||||
cfg.Eth.OverrideIstanbul = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideIstanbulFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.OverrideMuirGlacierFlag.Name) {
|
||||
cfg.Eth.OverrideMuirGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideMuirGlacierFlag.Name))
|
||||
}
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit)
|
||||
}
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DeveloperFlag.Name)
|
||||
|
@@ -70,10 +70,7 @@ var (
|
||||
utils.NoUSBFlag,
|
||||
utils.SmartCardDaemonPathFlag,
|
||||
utils.OverrideIstanbulFlag,
|
||||
utils.DashboardEnabledFlag,
|
||||
utils.DashboardAddrFlag,
|
||||
utils.DashboardPortFlag,
|
||||
utils.DashboardRefreshFlag,
|
||||
utils.OverrideMuirGlacierFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
@@ -236,16 +233,8 @@ func init() {
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
logdir := ""
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
|
||||
}
|
||||
if err := debug.Setup(ctx, logdir); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return debug.Setup(ctx, "")
|
||||
}
|
||||
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
debug.Exit()
|
||||
console.Stdin.Close() // Resets terminal mode.
|
||||
|
@@ -117,7 +117,6 @@ func version(ctx *cli.Context) error {
|
||||
}
|
||||
fmt.Println("Architecture:", runtime.GOARCH)
|
||||
fmt.Println("Protocol Versions:", eth.ProtocolVersions)
|
||||
fmt.Println("Network Id:", eth.DefaultConfig.NetworkId)
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
fmt.Println("Operating System:", runtime.GOOS)
|
||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||
|
@@ -495,7 +495,6 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
txCount := 0
|
||||
var txs []*types.Transaction
|
||||
var receipts []*types.Receipt
|
||||
var coalescedLogs []*types.Log
|
||||
var blockFull = gasPool.Gas() < params.TxGas
|
||||
for address := range api.txSenders {
|
||||
if blockFull {
|
||||
@@ -522,7 +521,6 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
receipts = append(receipts, receipt)
|
||||
coalescedLogs = append(coalescedLogs, receipt.Logs...)
|
||||
delete(m, nonce)
|
||||
if len(m) == 0 {
|
||||
// Last tx for the sender
|
||||
@@ -682,9 +680,6 @@ func (api *RetestethAPI) AccountRange(ctx context.Context,
|
||||
for i := 0; i < int(maxResults) && it.Next(); i++ {
|
||||
if preimage := accountTrie.GetKey(it.Key); preimage != nil {
|
||||
result.AddressMap[common.BytesToHash(it.Key)] = common.BytesToAddress(preimage)
|
||||
//fmt.Printf("%x: %x\n", it.Key, preimage)
|
||||
} else {
|
||||
//fmt.Printf("could not find preimage for %x\n", it.Key)
|
||||
}
|
||||
}
|
||||
//fmt.Printf("Number of entries returned: %d\n", len(result.AddressMap))
|
||||
@@ -808,9 +803,6 @@ func (api *RetestethAPI) StorageRangeAt(ctx context.Context,
|
||||
Key: string(ks),
|
||||
Value: string(vs),
|
||||
}
|
||||
//fmt.Printf("Key: %s, Value: %s\n", ks, vs)
|
||||
} else {
|
||||
//fmt.Printf("Did not find preimage for %x\n", it.Key)
|
||||
}
|
||||
}
|
||||
if it.Next() {
|
||||
@@ -889,7 +881,7 @@ func retesteth(ctx *cli.Context) error {
|
||||
log.Info("HTTP endpoint closed", "url", httpEndpoint)
|
||||
}()
|
||||
|
||||
abortChan := make(chan os.Signal)
|
||||
abortChan := make(chan os.Signal, 11)
|
||||
signal.Notify(abortChan, os.Interrupt)
|
||||
|
||||
sig := <-abortChan
|
||||
|
@@ -22,8 +22,6 @@ import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@@ -116,16 +114,6 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
},
|
||||
},
|
||||
//{
|
||||
// Name: "DASHBOARD",
|
||||
// Flags: []cli.Flag{
|
||||
// utils.DashboardEnabledFlag,
|
||||
// utils.DashboardAddrFlag,
|
||||
// utils.DashboardPortFlag,
|
||||
// utils.DashboardRefreshFlag,
|
||||
// utils.DashboardAssetsFlag,
|
||||
// },
|
||||
//},
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
@@ -324,9 +312,6 @@ func init() {
|
||||
var uncategorized []cli.Flag
|
||||
for _, flag := range data.(*cli.App).Flags {
|
||||
if _, ok := categorized[flag.String()]; !ok {
|
||||
if strings.HasPrefix(flag.GetName(), "dashboard") {
|
||||
continue
|
||||
}
|
||||
uncategorized = append(uncategorized, flag)
|
||||
}
|
||||
}
|
||||
|
@@ -17,7 +17,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
math2 "github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@@ -36,36 +36,38 @@ import (
|
||||
type alethGenesisSpec struct {
|
||||
SealEngine string `json:"sealEngine"`
|
||||
Params struct {
|
||||
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
||||
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
|
||||
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
||||
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
||||
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
||||
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
TieBreakingGas bool `json:"tieBreakingGas"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
AllowFutureBlocks bool `json:"allowFutureBlocks"`
|
||||
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
HomesteadForkBlock *hexutil.Big `json:"homesteadForkBlock,omitempty"`
|
||||
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
|
||||
EIP150ForkBlock *hexutil.Big `json:"EIP150ForkBlock,omitempty"`
|
||||
EIP158ForkBlock *hexutil.Big `json:"EIP158ForkBlock,omitempty"`
|
||||
ByzantiumForkBlock *hexutil.Big `json:"byzantiumForkBlock,omitempty"`
|
||||
ConstantinopleForkBlock *hexutil.Big `json:"constantinopleForkBlock,omitempty"`
|
||||
ConstantinopleFixForkBlock *hexutil.Big `json:"constantinopleFixForkBlock,omitempty"`
|
||||
IstanbulForkBlock *hexutil.Big `json:"istanbulForkBlock,omitempty"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
TieBreakingGas bool `json:"tieBreakingGas"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
AllowFutureBlocks bool `json:"allowFutureBlocks"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
MixHash common.Hash `json:"mixHash"`
|
||||
Author common.Address `json:"author"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
Nonce types.BlockNonce `json:"nonce"`
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
MixHash common.Hash `json:"mixHash"`
|
||||
Author common.Address `json:"author"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
} `json:"genesis"`
|
||||
|
||||
Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
|
||||
@@ -74,7 +76,7 @@ type alethGenesisSpec struct {
|
||||
// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type alethGenesisSpecAccount struct {
|
||||
Balance *math2.HexOrDecimal256 `json:"balance"`
|
||||
Balance *math2.HexOrDecimal256 `json:"balance,omitempty"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
||||
}
|
||||
@@ -82,7 +84,7 @@ type alethGenesisSpecAccount struct {
|
||||
// alethGenesisSpecBuiltin is the precompiled contract definition.
|
||||
type alethGenesisSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
||||
StartingBlock *hexutil.Big `json:"startingBlock,omitempty"`
|
||||
Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
|
||||
}
|
||||
|
||||
@@ -106,21 +108,33 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp
|
||||
spec.Params.AccountStartNonce = 0
|
||||
spec.Params.TieBreakingGas = false
|
||||
spec.Params.AllowFutureBlocks = false
|
||||
|
||||
// Dao hardfork block is a special one. The fork block is listed as 0 in the
|
||||
// config but aleth will sync with ETC clients up until the actual dao hard
|
||||
// fork block.
|
||||
spec.Params.DaoHardforkBlock = 0
|
||||
|
||||
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
|
||||
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
|
||||
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
|
||||
|
||||
// Byzantium
|
||||
if num := genesis.Config.HomesteadBlock; num != nil {
|
||||
spec.Params.HomesteadForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.EIP150Block; num != nil {
|
||||
spec.Params.EIP150ForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.EIP158Block; num != nil {
|
||||
spec.Params.EIP158ForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.ByzantiumBlock; num != nil {
|
||||
spec.setByzantium(num)
|
||||
spec.Params.ByzantiumForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
// Constantinople
|
||||
if num := genesis.Config.ConstantinopleBlock; num != nil {
|
||||
spec.setConstantinople(num)
|
||||
spec.Params.ConstantinopleForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.PetersburgBlock; num != nil {
|
||||
spec.Params.ConstantinopleFixForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.IstanbulBlock; num != nil {
|
||||
spec.Params.IstanbulForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
@@ -132,9 +146,7 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp
|
||||
spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
|
||||
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
|
||||
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Nonce[:], genesis.Nonce)
|
||||
|
||||
spec.Genesis.Nonce = types.EncodeNonce(genesis.Nonce)
|
||||
spec.Genesis.MixHash = genesis.Mixhash
|
||||
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
|
||||
spec.Genesis.Author = genesis.Coinbase
|
||||
@@ -157,15 +169,32 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)})
|
||||
spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 500}})
|
||||
spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
|
||||
spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)})
|
||||
}
|
||||
if genesis.Config.IstanbulBlock != nil {
|
||||
if genesis.Config.ByzantiumBlock == nil {
|
||||
return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not")
|
||||
}
|
||||
spec.setPrecompile(6, &alethGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_add",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
}) // Aleth hardcoded the gas policy
|
||||
spec.setPrecompile(7, &alethGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_mul",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
}) // Aleth hardcoded the gas policy
|
||||
spec.setPrecompile(9, &alethGenesisSpecBuiltin{
|
||||
Name: "blake2_compression",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.IstanbulBlock),
|
||||
})
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
@@ -196,14 +225,6 @@ func (spec *alethGenesisSpec) setAccount(address common.Address, account core.Ge
|
||||
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
|
||||
spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
|
||||
spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// parityChainSpec is the chain specification format used by Parity.
|
||||
type parityChainSpec struct {
|
||||
Name string `json:"name"`
|
||||
@@ -223,36 +244,40 @@ type parityChainSpec struct {
|
||||
} `json:"engine"`
|
||||
|
||||
Params struct {
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
|
||||
EIP1283ReenableTransition hexutil.Uint64 `json:"eip1283ReenableTransition"`
|
||||
EIP1344Transition hexutil.Uint64 `json:"eip1344Transition"`
|
||||
EIP1884Transition hexutil.Uint64 `json:"eip1884Transition"`
|
||||
EIP2028Transition hexutil.Uint64 `json:"eip2028Transition"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
Seal struct {
|
||||
Ethereum struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
MixHash hexutil.Bytes `json:"mixHash"`
|
||||
Nonce types.BlockNonce `json:"nonce"`
|
||||
MixHash hexutil.Bytes `json:"mixHash"`
|
||||
} `json:"ethereum"`
|
||||
} `json:"seal"`
|
||||
|
||||
@@ -278,17 +303,23 @@ type parityChainSpecAccount struct {
|
||||
|
||||
// parityChainSpecBuiltin is the precompiled contract definition.
|
||||
type parityChainSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
|
||||
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
|
||||
Name string `json:"name"` // Each builtin should has it own name
|
||||
Pricing interface{} `json:"pricing"` // Each builtin should has it own price strategy
|
||||
ActivateAt *hexutil.Big `json:"activate_at,omitempty"` // ActivateAt can't be omitted if empty, default means no fork
|
||||
}
|
||||
|
||||
// parityChainSpecPricing represents the different pricing models that builtin
|
||||
// contracts might advertise using.
|
||||
type parityChainSpecPricing struct {
|
||||
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
|
||||
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
|
||||
AltBnPairing *parityChainSpecAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
|
||||
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
|
||||
|
||||
// Before the https://github.com/paritytech/parity-ethereum/pull/11039,
|
||||
// Parity uses this format to config bn pairing price policy.
|
||||
AltBnPairing *parityChainSepcAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
|
||||
// Blake2F is the price per round of Blake2 compression
|
||||
Blake2F *parityChainSpecBlakePricing `json:"blake2_f,omitempty"`
|
||||
}
|
||||
|
||||
type parityChainSpecLinearPricing struct {
|
||||
@@ -300,11 +331,36 @@ type parityChainSpecModExpPricing struct {
|
||||
Divisor uint64 `json:"divisor"`
|
||||
}
|
||||
|
||||
type parityChainSpecAltBnPairingPricing struct {
|
||||
// parityChainSpecAltBnConstOperationPricing defines the price
|
||||
// policy for bn const operation(used after istanbul)
|
||||
type parityChainSpecAltBnConstOperationPricing struct {
|
||||
Price uint64 `json:"price"`
|
||||
}
|
||||
|
||||
// parityChainSepcAltBnPairingPricing defines the price policy
|
||||
// for bn pairing.
|
||||
type parityChainSepcAltBnPairingPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Pair uint64 `json:"pair"`
|
||||
}
|
||||
|
||||
// parityChainSpecBlakePricing defines the price policy for blake2 f
|
||||
// compression.
|
||||
type parityChainSpecBlakePricing struct {
|
||||
GasPerRound uint64 `json:"gas_per_round"`
|
||||
}
|
||||
|
||||
type parityChainSpecAlternativePrice struct {
|
||||
AltBnConstOperationPrice *parityChainSpecAltBnConstOperationPricing `json:"alt_bn128_const_operations,omitempty"`
|
||||
AltBnPairingPrice *parityChainSepcAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
}
|
||||
|
||||
// parityChainSpecVersionedPricing represents a single version price policy.
|
||||
type parityChainSpecVersionedPricing struct {
|
||||
Price *parityChainSpecAlternativePrice `json:"price,omitempty"`
|
||||
Info string `json:"info,omitempty"`
|
||||
}
|
||||
|
||||
// newParityChainSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// chain specification format.
|
||||
func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []string) (*parityChainSpec, error) {
|
||||
@@ -352,7 +408,10 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
if num := genesis.Config.PetersburgBlock; num != nil {
|
||||
spec.setConstantinopleFix(num)
|
||||
}
|
||||
|
||||
// Istanbul
|
||||
if num := genesis.Config.IstanbulBlock; num != nil {
|
||||
spec.setIstanbul(num)
|
||||
}
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
|
||||
@@ -365,10 +424,8 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
// Disable this one
|
||||
spec.Params.EIP98Transition = math.MaxInt64
|
||||
|
||||
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
|
||||
|
||||
spec.Genesis.Seal.Ethereum.MixHash = (hexutil.Bytes)(genesis.Mixhash[:])
|
||||
spec.Genesis.Seal.Ethereum.Nonce = types.EncodeNonce(genesis.Nonce)
|
||||
spec.Genesis.Seal.Ethereum.MixHash = (genesis.Mixhash[:])
|
||||
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
|
||||
spec.Genesis.Author = genesis.Coinbase
|
||||
spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp)
|
||||
@@ -398,18 +455,93 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
|
||||
})
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.setPrecompile(5, &parityChainSpecBuiltin{
|
||||
Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||
Name: "modexp",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
ModExp: &parityChainSpecModExpPricing{Divisor: 20},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(6, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
||||
Name: "alt_bn128_add",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
Linear: &parityChainSpecLinearPricing{Base: 500, Word: 0},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(7, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
||||
Name: "alt_bn128_mul",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
Linear: &parityChainSpecLinearPricing{Base: 40000, Word: 0},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(8, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||
Name: "alt_bn128_pairing",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
AltBnPairing: &parityChainSepcAltBnPairingPricing{Base: 100000, Pair: 80000},
|
||||
},
|
||||
})
|
||||
}
|
||||
if genesis.Config.IstanbulBlock != nil {
|
||||
if genesis.Config.ByzantiumBlock == nil {
|
||||
return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not")
|
||||
}
|
||||
spec.setPrecompile(6, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
|
||||
(*hexutil.Big)(big.NewInt(0)): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 500},
|
||||
},
|
||||
},
|
||||
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 150},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(7, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
|
||||
(*hexutil.Big)(big.NewInt(0)): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 40000},
|
||||
},
|
||||
},
|
||||
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 6000},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(8, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
|
||||
(*hexutil.Big)(big.NewInt(0)): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnPairingPrice: &parityChainSepcAltBnPairingPricing{Base: 100000, Pair: 80000},
|
||||
},
|
||||
},
|
||||
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnPairingPrice: &parityChainSepcAltBnPairingPricing{Base: 45000, Pair: 34000},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(9, &parityChainSpecBuiltin{
|
||||
Name: "blake2_f",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.IstanbulBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
Blake2F: &parityChainSpecBlakePricing{GasPerRound: 1},
|
||||
},
|
||||
})
|
||||
}
|
||||
return spec, nil
|
||||
@@ -451,10 +583,17 @@ func (spec *parityChainSpec) setConstantinopleFix(num *big.Int) {
|
||||
spec.Params.EIP1283DisableTransition = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
func (spec *parityChainSpec) setIstanbul(num *big.Int) {
|
||||
spec.Params.EIP1344Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP1884Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP2028Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP1283ReenableTransition = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// Python Ethereum implementation.
|
||||
type pyEthereumGenesisSpec struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
Nonce types.BlockNonce `json:"nonce"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
@@ -473,6 +612,7 @@ func newPyEthereumGenesisSpec(network string, genesis *core.Genesis) (*pyEthereu
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
spec := &pyEthereumGenesisSpec{
|
||||
Nonce: types.EncodeNonce(genesis.Nonce),
|
||||
Timestamp: (hexutil.Uint64)(genesis.Timestamp),
|
||||
ExtraData: genesis.ExtraData,
|
||||
GasLimit: (hexutil.Uint64)(genesis.GasLimit),
|
||||
@@ -482,8 +622,5 @@ func newPyEthereumGenesisSpec(network string, genesis *core.Genesis) (*pyEthereu
|
||||
Alloc: genesis.Alloc,
|
||||
ParentHash: genesis.ParentHash,
|
||||
}
|
||||
spec.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Nonce[:], genesis.Nonce)
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
@@ -76,33 +77,19 @@ func TestParitySturebyConverter(t *testing.T) {
|
||||
if err := json.Unmarshal(blob, &genesis); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
spec, err := newParityChainSpec("Stureby", &genesis, []string{})
|
||||
spec, err := newParityChainSpec("stureby", &genesis, []string{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed creating chainspec: %v", err)
|
||||
}
|
||||
|
||||
enc, err := json.MarshalIndent(spec, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("failed encoding chainspec: %v", err)
|
||||
}
|
||||
expBlob, err := ioutil.ReadFile("testdata/stureby_parity.json")
|
||||
if err != nil {
|
||||
t.Fatalf("could not read file: %v", err)
|
||||
}
|
||||
expspec := &parityChainSpec{}
|
||||
if err := json.Unmarshal(expBlob, expspec); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
expspec.Nodes = []string{}
|
||||
|
||||
if !reflect.DeepEqual(expspec, spec) {
|
||||
t.Errorf("chainspec mismatch")
|
||||
c := spew.ConfigState{
|
||||
DisablePointerAddresses: true,
|
||||
SortKeys: true,
|
||||
}
|
||||
exp := strings.Split(c.Sdump(expspec), "\n")
|
||||
got := strings.Split(c.Sdump(spec), "\n")
|
||||
for i := 0; i < len(exp) && i < len(got); i++ {
|
||||
if exp[i] != got[i] {
|
||||
t.Logf("got: %v\nexp: %v\n", exp[i], got[i])
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(expBlob, enc) {
|
||||
t.Fatalf("chainspec mismatch")
|
||||
}
|
||||
}
|
||||
|
@@ -129,15 +129,20 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
fmt.Printf("SSH key fingerprint is %s [MD5]\n", ssh.FingerprintLegacyMD5(key))
|
||||
fmt.Printf("Are you sure you want to continue connecting (yes/no)? ")
|
||||
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case strings.TrimSpace(text) == "yes":
|
||||
pubkey = key.Marshal()
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown auth choice: %v", text)
|
||||
for {
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case strings.TrimSpace(text) == "yes":
|
||||
pubkey = key.Marshal()
|
||||
return nil
|
||||
case strings.TrimSpace(text) == "no":
|
||||
return errors.New("users says no")
|
||||
default:
|
||||
fmt.Println("Please answer 'yes' or 'no'")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// If a public key exists for this SSH server, check that it matches
|
||||
|
177
cmd/puppeth/testdata/stureby_aleth.json
vendored
177
cmd/puppeth/testdata/stureby_aleth.json
vendored
@@ -1,112 +1,113 @@
|
||||
{
|
||||
"sealEngine":"Ethash",
|
||||
"params":{
|
||||
"accountStartNonce":"0x00",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"homesteadForkBlock":"0x2710",
|
||||
"daoHardforkBlock":"0x00",
|
||||
"EIP150ForkBlock":"0x3a98",
|
||||
"EIP158ForkBlock":"0x59d8",
|
||||
"byzantiumForkBlock":"0x7530",
|
||||
"constantinopleForkBlock":"0x9c40",
|
||||
"minGasLimit":"0x1388",
|
||||
"maxGasLimit":"0x7fffffffffffffff",
|
||||
"tieBreakingGas":false,
|
||||
"gasLimitBoundDivisor":"0x0400",
|
||||
"minimumDifficulty":"0x20000",
|
||||
"difficultyBoundDivisor":"0x0800",
|
||||
"durationLimit":"0x0d",
|
||||
"blockReward":"0x4563918244F40000",
|
||||
"networkID":"0x4cb2e",
|
||||
"chainID":"0x4cb2e",
|
||||
"allowFutureBlocks":false
|
||||
"sealEngine": "Ethash",
|
||||
"params": {
|
||||
"accountStartNonce": "0x0",
|
||||
"maximumExtraDataSize": "0x20",
|
||||
"homesteadForkBlock": "0x2710",
|
||||
"daoHardforkBlock": "0x0",
|
||||
"EIP150ForkBlock": "0x3a98",
|
||||
"EIP158ForkBlock": "0x59d8",
|
||||
"byzantiumForkBlock": "0x7530",
|
||||
"constantinopleForkBlock": "0x9c40",
|
||||
"constantinopleFixForkBlock": "0x9c40",
|
||||
"istanbulForkBlock": "0xc350",
|
||||
"minGasLimit": "0x1388",
|
||||
"maxGasLimit": "0x7fffffffffffffff",
|
||||
"tieBreakingGas": false,
|
||||
"gasLimitBoundDivisor": "0x400",
|
||||
"minimumDifficulty": "0x20000",
|
||||
"difficultyBoundDivisor": "0x800",
|
||||
"durationLimit": "0xd",
|
||||
"blockReward": "0x4563918244f40000",
|
||||
"networkID": "0x4cb2e",
|
||||
"chainID": "0x4cb2e",
|
||||
"allowFutureBlocks": false
|
||||
},
|
||||
"genesis":{
|
||||
"nonce":"0x0000000000000000",
|
||||
"difficulty":"0x20000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x59a4e76d",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit":"0x47b760"
|
||||
"genesis": {
|
||||
"nonce": "0x0000000000000000",
|
||||
"difficulty": "0x20000",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"author": "0x0000000000000000000000000000000000000000",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760"
|
||||
},
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"ecrecover",
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
"accounts": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "ecrecover",
|
||||
"linear": {
|
||||
"base": 3000,
|
||||
"word": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"sha256",
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "sha256",
|
||||
"linear": {
|
||||
"base": 60,
|
||||
"word": 12
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"ripemd160",
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "ripemd160",
|
||||
"linear": {
|
||||
"base": 600,
|
||||
"word": 120
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"identity",
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "identity",
|
||||
"linear": {
|
||||
"base": 15,
|
||||
"word": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"modexp",
|
||||
"startingBlock":"0x7530"
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "modexp",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_G1_add",
|
||||
"startingBlock":"0x7530",
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
}
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_G1_add",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_G1_mul",
|
||||
"startingBlock":"0x7530",
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
}
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_G1_mul",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_pairing_product",
|
||||
"startingBlock":"0x7530"
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_pairing_product",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "blake2_compression",
|
||||
"startingBlock": "0xc350"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
33
cmd/puppeth/testdata/stureby_geth.json
vendored
33
cmd/puppeth/testdata/stureby_geth.json
vendored
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"config": {
|
||||
"ethash":{},
|
||||
"chainId": 314158,
|
||||
"homesteadBlock": 10000,
|
||||
"eip150Block": 15000,
|
||||
@@ -8,11 +7,13 @@
|
||||
"eip155Block": 23000,
|
||||
"eip158Block": 23000,
|
||||
"byzantiumBlock": 30000,
|
||||
"constantinopleBlock": 40000
|
||||
"constantinopleBlock": 40000,
|
||||
"petersburgBlock": 40000,
|
||||
"istanbulBlock": 50000,
|
||||
"ethash": {}
|
||||
},
|
||||
"nonce": "0x0",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760",
|
||||
"difficulty": "0x20000",
|
||||
@@ -20,28 +21,34 @@
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
"alloc": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"number": "0x0",
|
||||
"gasUsed": "0x0",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
316
cmd/puppeth/testdata/stureby_parity.json
vendored
316
cmd/puppeth/testdata/stureby_parity.json
vendored
@@ -1,181 +1,213 @@
|
||||
{
|
||||
"name":"Stureby",
|
||||
"dataDir":"stureby",
|
||||
"engine":{
|
||||
"Ethash":{
|
||||
"params":{
|
||||
"minimumDifficulty":"0x20000",
|
||||
"difficultyBoundDivisor":"0x800",
|
||||
"durationLimit":"0xd",
|
||||
"blockReward":{
|
||||
"0x0":"0x4563918244f40000",
|
||||
"0x7530":"0x29a2241af62c0000",
|
||||
"0x9c40":"0x1bc16d674ec80000"
|
||||
"name": "stureby",
|
||||
"dataDir": "stureby",
|
||||
"engine": {
|
||||
"Ethash": {
|
||||
"params": {
|
||||
"minimumDifficulty": "0x20000",
|
||||
"difficultyBoundDivisor": "0x800",
|
||||
"durationLimit": "0xd",
|
||||
"blockReward": {
|
||||
"0x0": "0x4563918244f40000",
|
||||
"0x7530": "0x29a2241af62c0000",
|
||||
"0x9c40": "0x1bc16d674ec80000"
|
||||
},
|
||||
"homesteadTransition":"0x2710",
|
||||
"eip100bTransition":"0x7530",
|
||||
"difficultyBombDelays":{
|
||||
"0x7530":"0x2dc6c0",
|
||||
"0x9c40":"0x1e8480"
|
||||
}
|
||||
"difficultyBombDelays": {
|
||||
"0x7530": "0x2dc6c0",
|
||||
"0x9c40": "0x1e8480"
|
||||
},
|
||||
"homesteadTransition": "0x2710",
|
||||
"eip100bTransition": "0x7530"
|
||||
}
|
||||
}
|
||||
},
|
||||
"params":{
|
||||
"accountStartNonce":"0x0",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"gasLimitBoundDivisor":"0x400",
|
||||
"minGasLimit":"0x1388",
|
||||
"networkID":"0x4cb2e",
|
||||
"chainID":"0x4cb2e",
|
||||
"maxCodeSize":"0x6000",
|
||||
"maxCodeSizeTransition":"0x0",
|
||||
"params": {
|
||||
"accountStartNonce": "0x0",
|
||||
"maximumExtraDataSize": "0x20",
|
||||
"minGasLimit": "0x1388",
|
||||
"gasLimitBoundDivisor": "0x400",
|
||||
"networkID": "0x4cb2e",
|
||||
"chainID": "0x4cb2e",
|
||||
"maxCodeSize": "0x6000",
|
||||
"maxCodeSizeTransition": "0x0",
|
||||
"eip98Transition": "0x7fffffffffffffff",
|
||||
"eip150Transition":"0x3a98",
|
||||
"eip160Transition":"0x59d8",
|
||||
"eip161abcTransition":"0x59d8",
|
||||
"eip161dTransition":"0x59d8",
|
||||
"eip155Transition":"0x59d8",
|
||||
"eip140Transition":"0x7530",
|
||||
"eip211Transition":"0x7530",
|
||||
"eip214Transition":"0x7530",
|
||||
"eip658Transition":"0x7530",
|
||||
"eip145Transition":"0x9c40",
|
||||
"eip1014Transition":"0x9c40",
|
||||
"eip1052Transition":"0x9c40",
|
||||
"eip1283Transition":"0x9c40"
|
||||
"eip150Transition": "0x3a98",
|
||||
"eip160Transition": "0x59d8",
|
||||
"eip161abcTransition": "0x59d8",
|
||||
"eip161dTransition": "0x59d8",
|
||||
"eip155Transition": "0x59d8",
|
||||
"eip140Transition": "0x7530",
|
||||
"eip211Transition": "0x7530",
|
||||
"eip214Transition": "0x7530",
|
||||
"eip658Transition": "0x7530",
|
||||
"eip145Transition": "0x9c40",
|
||||
"eip1014Transition": "0x9c40",
|
||||
"eip1052Transition": "0x9c40",
|
||||
"eip1283Transition": "0x9c40",
|
||||
"eip1283DisableTransition": "0x9c40",
|
||||
"eip1283ReenableTransition": "0xc350",
|
||||
"eip1344Transition": "0xc350",
|
||||
"eip1884Transition": "0xc350",
|
||||
"eip2028Transition": "0xc350"
|
||||
},
|
||||
"genesis":{
|
||||
"seal":{
|
||||
"ethereum":{
|
||||
"nonce":"0x0000000000000000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
"genesis": {
|
||||
"seal": {
|
||||
"ethereum": {
|
||||
"nonce": "0x0000000000000000",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
},
|
||||
"difficulty":"0x20000",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x59a4e76d",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit":"0x47b760"
|
||||
"difficulty": "0x20000",
|
||||
"author": "0x0000000000000000000000000000000000000000",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760"
|
||||
},
|
||||
"nodes":[
|
||||
"enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
|
||||
"enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
|
||||
"enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
|
||||
"enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
|
||||
"enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
|
||||
"enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
|
||||
"enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
|
||||
"enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
|
||||
],
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"ecrecover",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
"nodes": [],
|
||||
"accounts": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "ecrecover",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 3000,
|
||||
"word": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"sha256",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "sha256",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 60,
|
||||
"word": 12
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"ripemd160",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "ripemd160",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 600,
|
||||
"word": 120
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"identity",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "identity",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 15,
|
||||
"word": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"modexp",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"modexp":{
|
||||
"divisor":20
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "modexp",
|
||||
"pricing": {
|
||||
"modexp": {
|
||||
"divisor": 20
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_add",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_add",
|
||||
"pricing": {
|
||||
"0x0": {
|
||||
"price": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 500
|
||||
}
|
||||
}
|
||||
},
|
||||
"0xc350": {
|
||||
"price": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 150
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_mul",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_mul",
|
||||
"pricing": {
|
||||
"0x0": {
|
||||
"price": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 40000
|
||||
}
|
||||
}
|
||||
},
|
||||
"0xc350": {
|
||||
"price": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 6000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_pairing",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"alt_bn128_pairing":{
|
||||
"base":100000,
|
||||
"pair":80000
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_pairing",
|
||||
"pricing": {
|
||||
"0x0": {
|
||||
"price": {
|
||||
"alt_bn128_pairing": {
|
||||
"base": 100000,
|
||||
"pair": 80000
|
||||
}
|
||||
}
|
||||
},
|
||||
"0xc350": {
|
||||
"price": {
|
||||
"alt_bn128_pairing": {
|
||||
"base": 45000,
|
||||
"pair": 34000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "blake2_f",
|
||||
"pricing": {
|
||||
"blake2_f": {
|
||||
"gas_per_round": 1
|
||||
}
|
||||
},
|
||||
"activate_at": "0xc350"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -51,6 +51,7 @@ func (w *wizard) makeGenesis() {
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
},
|
||||
}
|
||||
// Figure out which consensus engine to choose
|
||||
@@ -230,6 +231,10 @@ func (w *wizard) manageGenesis() {
|
||||
fmt.Printf("Which block should Petersburg come into effect? (default = %v)\n", w.conf.Genesis.Config.PetersburgBlock)
|
||||
w.conf.Genesis.Config.PetersburgBlock = w.readDefaultBigInt(w.conf.Genesis.Config.PetersburgBlock)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Istanbul come into effect? (default = %v)\n", w.conf.Genesis.Config.IstanbulBlock)
|
||||
w.conf.Genesis.Config.IstanbulBlock = w.readDefaultBigInt(w.conf.Genesis.Config.IstanbulBlock)
|
||||
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
|
||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||
|
||||
@@ -268,7 +273,7 @@ func (w *wizard) manageGenesis() {
|
||||
} else {
|
||||
saveGenesis(folder, w.network, "parity", spec)
|
||||
}
|
||||
// Export the genesis spec used by Harmony (formerly EthereumJ
|
||||
// Export the genesis spec used by Harmony (formerly EthereumJ)
|
||||
saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
|
||||
|
||||
case "3":
|
||||
@@ -291,7 +296,7 @@ func (w *wizard) manageGenesis() {
|
||||
func saveGenesis(folder, network, client string, spec interface{}) {
|
||||
path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
|
||||
|
||||
out, _ := json.Marshal(spec)
|
||||
out, _ := json.MarshalIndent(spec, "", " ")
|
||||
if err := ioutil.WriteFile(path, out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "client", client, "err", err)
|
||||
return
|
||||
|
@@ -185,28 +185,6 @@ func GlobalBig(ctx *cli.Context, name string) *big.Int {
|
||||
return (*big.Int)(val.(*bigValue))
|
||||
}
|
||||
|
||||
func prefixFor(name string) (prefix string) {
|
||||
if len(name) == 1 {
|
||||
prefix = "-"
|
||||
} else {
|
||||
prefix = "--"
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func prefixedNames(fullName string) (prefixed string) {
|
||||
parts := strings.Split(fullName, ",")
|
||||
for i, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
prefixed += prefixFor(name) + name
|
||||
if i < len(parts)-1 {
|
||||
prefixed += ", "
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Expands a file path
|
||||
// 1. replace tilde with users home dir
|
||||
// 2. expands embedded environment variables
|
||||
|
@@ -42,7 +42,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
@@ -78,6 +77,17 @@ SUBCOMMANDS:
|
||||
{{range $categorized.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}{{end}}`
|
||||
|
||||
OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -227,6 +237,10 @@ var (
|
||||
Name: "override.istanbul",
|
||||
Usage: "Manually specify Istanbul fork-block, overriding the bundled setting",
|
||||
}
|
||||
OverrideMuirGlacierFlag = cli.Uint64Flag{
|
||||
Name: "override.muirglacier",
|
||||
Usage: "Manually specify Muir Glacier fork-block, overriding the bundled setting",
|
||||
}
|
||||
// Light server and client settings
|
||||
LightLegacyServFlag = cli.IntFlag{ // Deprecated in favor of light.serve, remove in 2021
|
||||
Name: "lightserv",
|
||||
@@ -272,26 +286,6 @@ var (
|
||||
Name: "ulc.onlyannounce",
|
||||
Usage: "Ultra light server sends announcements only",
|
||||
}
|
||||
// Dashboard settings
|
||||
DashboardEnabledFlag = cli.BoolFlag{
|
||||
Name: "dashboard",
|
||||
Usage: "Enable the dashboard",
|
||||
}
|
||||
DashboardAddrFlag = cli.StringFlag{
|
||||
Name: "dashboard.addr",
|
||||
Usage: "Dashboard listening interface",
|
||||
Value: dashboard.DefaultConfig.Host,
|
||||
}
|
||||
DashboardPortFlag = cli.IntFlag{
|
||||
Name: "dashboard.host",
|
||||
Usage: "Dashboard listening port",
|
||||
Value: dashboard.DefaultConfig.Port,
|
||||
}
|
||||
DashboardRefreshFlag = cli.DurationFlag{
|
||||
Name: "dashboard.refresh",
|
||||
Usage: "Dashboard metrics collection refresh rate",
|
||||
Value: dashboard.DefaultConfig.Refresh,
|
||||
}
|
||||
// Ethash settings
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
@@ -1453,9 +1447,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
|
||||
cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
|
||||
|
||||
if ctx.GlobalIsSet(GCModeFlag.Name) {
|
||||
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheNoPrefetchFlag.Name) {
|
||||
cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
@@ -1527,13 +1524,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// SetDashboardConfig applies dashboard related command line flags to the config.
|
||||
func SetDashboardConfig(ctx *cli.Context, cfg *dashboard.Config) {
|
||||
cfg.Host = ctx.GlobalString(DashboardAddrFlag.Name)
|
||||
cfg.Port = ctx.GlobalInt(DashboardPortFlag.Name)
|
||||
cfg.Refresh = ctx.GlobalDuration(DashboardRefreshFlag.Name)
|
||||
}
|
||||
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
var err error
|
||||
@@ -1556,13 +1546,6 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterDashboardService adds a dashboard to the stack.
|
||||
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) {
|
||||
stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return dashboard.New(cfg, commit, ctx.ResolvePath("logs")), nil
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterShhService configures Whisper and adds it to the given node.
|
||||
func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
|
||||
|
@@ -37,6 +37,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -165,7 +166,7 @@ func echo() {
|
||||
fmt.Printf("pow = %f \n", *argPoW)
|
||||
fmt.Printf("mspow = %f \n", *argServerPoW)
|
||||
fmt.Printf("ip = %s \n", *argIP)
|
||||
fmt.Printf("pub = %s \n", common.ToHex(crypto.FromECDSAPub(pub)))
|
||||
fmt.Printf("pub = %s \n", hexutil.Encode(crypto.FromECDSAPub(pub)))
|
||||
fmt.Printf("idfile = %s \n", *argIDFile)
|
||||
fmt.Printf("dbpath = %s \n", *argDBPath)
|
||||
fmt.Printf("boot = %s \n", *argEnode)
|
||||
@@ -298,7 +299,7 @@ func startServer() error {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("my public key: %s \n", common.ToHex(crypto.FromECDSAPub(&asymKey.PublicKey)))
|
||||
fmt.Printf("my public key: %s \n", hexutil.Encode(crypto.FromECDSAPub(&asymKey.PublicKey)))
|
||||
fmt.Println(server.NodeInfo().Enode)
|
||||
|
||||
if *bootstrapMode {
|
||||
|
@@ -19,41 +19,43 @@ package common
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
checker "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type BytesSuite struct{}
|
||||
func TestCopyBytes(t *testing.T) {
|
||||
input := []byte{1, 2, 3, 4}
|
||||
|
||||
var _ = checker.Suite(&BytesSuite{})
|
||||
|
||||
func (s *BytesSuite) TestCopyBytes(c *checker.C) {
|
||||
data1 := []byte{1, 2, 3, 4}
|
||||
exp1 := []byte{1, 2, 3, 4}
|
||||
res1 := CopyBytes(data1)
|
||||
c.Assert(res1, checker.DeepEquals, exp1)
|
||||
v := CopyBytes(input)
|
||||
if !bytes.Equal(v, []byte{1, 2, 3, 4}) {
|
||||
t.Fatal("not equal after copy")
|
||||
}
|
||||
v[0] = 99
|
||||
if bytes.Equal(v, input) {
|
||||
t.Fatal("result is not a copy")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BytesSuite) TestLeftPadBytes(c *checker.C) {
|
||||
val1 := []byte{1, 2, 3, 4}
|
||||
exp1 := []byte{0, 0, 0, 0, 1, 2, 3, 4}
|
||||
|
||||
res1 := LeftPadBytes(val1, 8)
|
||||
res2 := LeftPadBytes(val1, 2)
|
||||
|
||||
c.Assert(res1, checker.DeepEquals, exp1)
|
||||
c.Assert(res2, checker.DeepEquals, val1)
|
||||
}
|
||||
|
||||
func (s *BytesSuite) TestRightPadBytes(c *checker.C) {
|
||||
func TestLeftPadBytes(t *testing.T) {
|
||||
val := []byte{1, 2, 3, 4}
|
||||
exp := []byte{1, 2, 3, 4, 0, 0, 0, 0}
|
||||
padded := []byte{0, 0, 0, 0, 1, 2, 3, 4}
|
||||
|
||||
resstd := RightPadBytes(val, 8)
|
||||
resshrt := RightPadBytes(val, 2)
|
||||
if r := LeftPadBytes(val, 8); !bytes.Equal(r, padded) {
|
||||
t.Fatalf("LeftPadBytes(%v, 8) == %v", val, r)
|
||||
}
|
||||
if r := LeftPadBytes(val, 2); !bytes.Equal(r, val) {
|
||||
t.Fatalf("LeftPadBytes(%v, 2) == %v", val, r)
|
||||
}
|
||||
}
|
||||
|
||||
c.Assert(resstd, checker.DeepEquals, exp)
|
||||
c.Assert(resshrt, checker.DeepEquals, val)
|
||||
func TestRightPadBytes(t *testing.T) {
|
||||
val := []byte{1, 2, 3, 4}
|
||||
padded := []byte{1, 2, 3, 4, 0, 0, 0, 0}
|
||||
|
||||
if r := RightPadBytes(val, 8); !bytes.Equal(r, padded) {
|
||||
t.Fatalf("RightPadBytes(%v, 8) == %v", val, r)
|
||||
}
|
||||
if r := RightPadBytes(val, 2); !bytes.Equal(r, val) {
|
||||
t.Fatalf("RightPadBytes(%v, 2) == %v", val, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromHex(t *testing.T) {
|
||||
|
@@ -86,7 +86,7 @@ func (b *Bytes) UnmarshalGraphQL(input interface{}) error {
|
||||
}
|
||||
*b = data
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Bytes: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for Bytes", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func (b *Big) UnmarshalGraphQL(input interface{}) error {
|
||||
num.SetInt64(int64(input))
|
||||
*b = Big(num)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for BigInt: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for BigInt", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -284,7 +284,7 @@ func (b *Uint64) UnmarshalGraphQL(input interface{}) error {
|
||||
case int32:
|
||||
*b = Uint64(input)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Long: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for Long", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -1,25 +0,0 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
checker "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { checker.TestingT(t) }
|
@@ -20,6 +20,7 @@ import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
@@ -142,7 +143,7 @@ func (h Hash) Value() (driver.Value, error) {
|
||||
}
|
||||
|
||||
// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
|
||||
func (_ Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" }
|
||||
func (Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" }
|
||||
|
||||
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
|
||||
func (h *Hash) UnmarshalGraphQL(input interface{}) error {
|
||||
@@ -151,7 +152,7 @@ func (h *Hash) UnmarshalGraphQL(input interface{}) error {
|
||||
case string:
|
||||
err = h.UnmarshalText([]byte(input))
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Bytes32: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for Hash", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -290,7 +291,7 @@ func (a *Address) UnmarshalGraphQL(input interface{}) error {
|
||||
case string:
|
||||
err = a.UnmarshalText([]byte(input))
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Address: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for Address", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -323,7 +324,7 @@ func NewMixedcaseAddress(addr Address) MixedcaseAddress {
|
||||
// NewMixedcaseAddressFromString is mainly meant for unit-testing
|
||||
func NewMixedcaseAddressFromString(hexaddr string) (*MixedcaseAddress, error) {
|
||||
if !IsHexAddress(hexaddr) {
|
||||
return nil, fmt.Errorf("Invalid address")
|
||||
return nil, errors.New("invalid address")
|
||||
}
|
||||
a := FromHex(hexaddr)
|
||||
return &MixedcaseAddress{addr: BytesToAddress(a), original: hexaddr}, nil
|
||||
|
@@ -17,6 +17,8 @@
|
||||
package clique
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@@ -117,3 +119,59 @@ func (api *API) Discard(address common.Address) {
|
||||
|
||||
delete(api.clique.proposals, address)
|
||||
}
|
||||
|
||||
type status struct {
|
||||
InturnPercent float64 `json:"inturnPercent"`
|
||||
SigningStatus map[common.Address]int `json:"sealerActivity"`
|
||||
NumBlocks uint64 `json:"numBlocks"`
|
||||
}
|
||||
|
||||
// Status returns the status of the last N blocks,
|
||||
// - the number of active signers,
|
||||
// - the number of signers,
|
||||
// - the percentage of in-turn blocks
|
||||
func (api *API) Status() (*status, error) {
|
||||
var (
|
||||
numBlocks = uint64(64)
|
||||
header = api.chain.CurrentHeader()
|
||||
diff = uint64(0)
|
||||
optimals = 0
|
||||
)
|
||||
snap, err := api.clique.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
signers = snap.signers()
|
||||
end = header.Number.Uint64()
|
||||
start = end - numBlocks
|
||||
)
|
||||
if numBlocks > end {
|
||||
start = 1
|
||||
numBlocks = end - start
|
||||
}
|
||||
signStatus := make(map[common.Address]int)
|
||||
for _, s := range signers {
|
||||
signStatus[s] = 0
|
||||
}
|
||||
for n := start; n < end; n++ {
|
||||
h := api.chain.GetHeaderByNumber(n)
|
||||
if h == nil {
|
||||
return nil, fmt.Errorf("missing block %d", n)
|
||||
}
|
||||
if h.Difficulty.Cmp(diffInTurn) == 0 {
|
||||
optimals++
|
||||
}
|
||||
diff += h.Difficulty.Uint64()
|
||||
sealer, err := api.clique.Author(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signStatus[sealer]++
|
||||
}
|
||||
return &status{
|
||||
InturnPercent: float64((100 * optimals)) / float64(numBlocks),
|
||||
SigningStatus: signStatus,
|
||||
NumBlocks: numBlocks,
|
||||
}, nil
|
||||
}
|
||||
|
@@ -311,7 +311,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
|
||||
if number == 0 {
|
||||
return nil
|
||||
}
|
||||
// Ensure that the block's timestamp isn't too close to it's parent
|
||||
// Ensure that the block's timestamp isn't too close to its parent
|
||||
var parent *types.Header
|
||||
if len(parents) > 0 {
|
||||
parent = parents[len(parents)-1]
|
||||
@@ -522,7 +522,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
// Set the correct difficulty
|
||||
header.Difficulty = CalcDifficulty(snap, c.signer)
|
||||
|
||||
// Ensure the extra data has all it's components
|
||||
// Ensure the extra data has all its components
|
||||
if len(header.Extra) < extraVanity {
|
||||
header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...)
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ var (
|
||||
// to the current node.
|
||||
ErrFutureBlock = errors.New("block in the future")
|
||||
|
||||
// ErrInvalidNumber is returned if a block's number doesn't equal it's parent's
|
||||
// ErrInvalidNumber is returned if a block's number doesn't equal its parent's
|
||||
// plus one.
|
||||
ErrInvalidNumber = errors.New("invalid block number")
|
||||
)
|
||||
|
@@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||
|
||||
go func(idx int) {
|
||||
defer pend.Done()
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil, false)
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal, nil}, nil, false)
|
||||
defer ethash.Close()
|
||||
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||
|
@@ -28,7 +28,7 @@ var errEthashStopped = errors.New("ethash stopped")
|
||||
|
||||
// API exposes ethash related methods for the RPC interface.
|
||||
type API struct {
|
||||
ethash *Ethash // Make sure the mode of ethash is normal.
|
||||
ethash *Ethash
|
||||
}
|
||||
|
||||
// GetWork returns a work package for external miner.
|
||||
@@ -39,7 +39,7 @@ type API struct {
|
||||
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3] - hex encoded block number
|
||||
func (api *API) GetWork() ([4]string, error) {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return [4]string{}, errors.New("not supported")
|
||||
}
|
||||
|
||||
@@ -47,13 +47,11 @@ func (api *API) GetWork() ([4]string, error) {
|
||||
workCh = make(chan [4]string, 1)
|
||||
errc = make(chan error, 1)
|
||||
)
|
||||
|
||||
select {
|
||||
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.exitCh:
|
||||
case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return [4]string{}, errEthashStopped
|
||||
}
|
||||
|
||||
select {
|
||||
case work := <-workCh:
|
||||
return work, nil
|
||||
@@ -66,23 +64,21 @@ func (api *API) GetWork() ([4]string, error) {
|
||||
// It returns an indication if the work was accepted.
|
||||
// Note either an invalid solution, a stale work a non-existent work will return false.
|
||||
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var errc = make(chan error, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitWorkCh <- &mineResult{
|
||||
case api.ethash.remote.submitWorkCh <- &mineResult{
|
||||
nonce: nonce,
|
||||
mixDigest: digest,
|
||||
hash: hash,
|
||||
errc: errc,
|
||||
}:
|
||||
case <-api.ethash.exitCh:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
err := <-errc
|
||||
return err == nil
|
||||
}
|
||||
@@ -94,21 +90,19 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo
|
||||
// It accepts the miner hash rate and an identifier which must be unique
|
||||
// between nodes.
|
||||
func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = make(chan struct{}, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.exitCh:
|
||||
case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
// Block until hash rate submitted successfully.
|
||||
<-done
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
@@ -44,6 +44,11 @@ var (
|
||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
||||
|
||||
// calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
|
||||
// It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
|
||||
// Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384
|
||||
calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000))
|
||||
|
||||
// calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople.
|
||||
// It returns the difficulty that a new block should have when created at time given the
|
||||
// parent block's time and difficulty. The calculation uses the Byzantium rules, but with
|
||||
@@ -86,7 +91,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
return nil
|
||||
}
|
||||
// Short circuit if the header is known, or it's parent not
|
||||
// Short circuit if the header is known, or its parent not
|
||||
number := header.Number.Uint64()
|
||||
if chain.GetHeader(header.Hash(), number) != nil {
|
||||
return nil
|
||||
@@ -252,7 +257,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
if header.Time <= parent.Time {
|
||||
return errZeroBlockTime
|
||||
}
|
||||
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||
// Verify the block's difficulty based in its timestamp and parent's difficulty
|
||||
expected := ethash.CalcDifficulty(chain, header.Time, parent)
|
||||
|
||||
if expected.Cmp(header.Difficulty) != 0 {
|
||||
@@ -311,6 +316,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p
|
||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||
next := new(big.Int).Add(parent.Number, big1)
|
||||
switch {
|
||||
case config.IsMuirGlacier(next):
|
||||
return calcDifficultyEip2384(time, parent)
|
||||
case config.IsConstantinople(next):
|
||||
return calcDifficultyConstantinople(time, parent)
|
||||
case config.IsByzantium(next):
|
||||
|
@@ -34,9 +34,7 @@ import (
|
||||
"unsafe"
|
||||
|
||||
mmap "github.com/edsrzf/mmap-go"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
@@ -50,7 +48,7 @@ var (
|
||||
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
|
||||
// sharedEthash is a full instance that can be shared between multiple users.
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false)
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal, nil}, nil, false)
|
||||
|
||||
// algorithmRevision is the data structure version used for file naming.
|
||||
algorithmRevision = 23
|
||||
@@ -403,36 +401,8 @@ type Config struct {
|
||||
DatasetsInMem int
|
||||
DatasetsOnDisk int
|
||||
PowMode Mode
|
||||
}
|
||||
|
||||
// sealTask wraps a seal block with relative result channel for remote sealer thread.
|
||||
type sealTask struct {
|
||||
block *types.Block
|
||||
results chan<- *types.Block
|
||||
}
|
||||
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [4]string
|
||||
Log log.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
@@ -448,52 +418,42 @@ type Ethash struct {
|
||||
threads int // Number of threads to mine on if mining
|
||||
update chan struct{} // Notification channel to update mining parameters
|
||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||
|
||||
// Remote sealer related fields
|
||||
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
remote *remoteSealer
|
||||
|
||||
// The fields below are hooks for testing
|
||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
exitCh chan chan error // Notification channel to exiting backend threads
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
}
|
||||
|
||||
// New creates a full sized ethash PoW scheme and starts a background thread for
|
||||
// remote mining, also optionally notifying a batch of remote services of new work
|
||||
// packages.
|
||||
func New(config Config, notify []string, noverify bool) *Ethash {
|
||||
if config.Log == nil {
|
||||
config.Log = log.Root()
|
||||
}
|
||||
if config.CachesInMem <= 0 {
|
||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.CachesInMem = 1
|
||||
}
|
||||
if config.CacheDir != "" && config.CachesOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
|
||||
config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
|
||||
}
|
||||
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
}
|
||||
ethash := &Ethash{
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
}
|
||||
go ethash.remote(notify, noverify)
|
||||
ethash.remote = startRemoteSealer(ethash, notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
@@ -501,19 +461,13 @@ func New(config Config, notify []string, noverify bool) *Ethash {
|
||||
// purposes.
|
||||
func NewTester(notify []string, noverify bool) *Ethash {
|
||||
ethash := &Ethash{
|
||||
config: Config{PowMode: ModeTest},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
config: Config{PowMode: ModeTest, Log: log.Root()},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
}
|
||||
go ethash.remote(notify, noverify)
|
||||
ethash.remote = startRemoteSealer(ethash, notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
@@ -524,6 +478,7 @@ func NewFaker() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -535,6 +490,7 @@ func NewFakeFailer(fail uint64) *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeFail: fail,
|
||||
}
|
||||
@@ -547,6 +503,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeDelay: delay,
|
||||
}
|
||||
@@ -558,6 +515,7 @@ func NewFullFaker() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFullFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -573,13 +531,11 @@ func (ethash *Ethash) Close() error {
|
||||
var err error
|
||||
ethash.closeOnce.Do(func() {
|
||||
// Short circuit if the exit channel is not allocated.
|
||||
if ethash.exitCh == nil {
|
||||
if ethash.remote == nil {
|
||||
return
|
||||
}
|
||||
errc := make(chan error)
|
||||
ethash.exitCh <- errc
|
||||
err = <-errc
|
||||
close(ethash.exitCh)
|
||||
close(ethash.remote.requestExit)
|
||||
<-ethash.remote.exitCh
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -680,8 +636,8 @@ func (ethash *Ethash) Hashrate() float64 {
|
||||
var res = make(chan uint64, 1)
|
||||
|
||||
select {
|
||||
case ethash.fetchRateCh <- res:
|
||||
case <-ethash.exitCh:
|
||||
case ethash.remote.fetchRateCh <- res:
|
||||
case <-ethash.remote.exitCh:
|
||||
// Return local hashrate only if ethash is stopped.
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
|
@@ -49,7 +49,7 @@ func TestTestMode(t *testing.T) {
|
||||
if err := ethash.VerifySeal(nil, header); err != nil {
|
||||
t.Fatalf("unexpected verification error: %v", err)
|
||||
}
|
||||
case <-time.NewTimer(time.Second).C:
|
||||
case <-time.NewTimer(2 * time.Second).C:
|
||||
t.Error("sealing result timeout")
|
||||
}
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -33,7 +34,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,7 +56,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
select {
|
||||
case results <- block.WithSeal(header):
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
||||
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -85,8 +85,8 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
threads = 0 // Allows disabling local mining without extra logic around local/remote
|
||||
}
|
||||
// Push new work to remote sealer
|
||||
if ethash.workCh != nil {
|
||||
ethash.workCh <- &sealTask{block: block, results: results}
|
||||
if ethash.remote != nil {
|
||||
ethash.remote.workCh <- &sealTask{block: block, results: results}
|
||||
}
|
||||
var (
|
||||
pend sync.WaitGroup
|
||||
@@ -111,14 +111,14 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
select {
|
||||
case results <- result:
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
||||
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
close(abort)
|
||||
case <-ethash.update:
|
||||
// Thread count was changed on user request, restart
|
||||
close(abort)
|
||||
if err := ethash.Seal(chain, block, results, stop); err != nil {
|
||||
log.Error("Failed to restart sealing after update", "err", err)
|
||||
ethash.config.Log.Error("Failed to restart sealing after update", "err", err)
|
||||
}
|
||||
}
|
||||
// Wait for all miners to terminate and return the block
|
||||
@@ -143,7 +143,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||
attempts = int64(0)
|
||||
nonce = seed
|
||||
)
|
||||
logger := log.New("miner", id)
|
||||
logger := ethash.config.Log.New("miner", id)
|
||||
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
||||
search:
|
||||
for {
|
||||
@@ -186,160 +186,128 @@ search:
|
||||
runtime.KeepAlive(dataset)
|
||||
}
|
||||
|
||||
// remote is a standalone goroutine to handle remote mining related stuff.
|
||||
func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
var (
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
rates = make(map[common.Hash]hashrate)
|
||||
// This is the timeout for HTTP requests to notify external miners.
|
||||
const remoteSealerTimeout = 1 * time.Second
|
||||
|
||||
results chan<- *types.Block
|
||||
currentBlock *types.Block
|
||||
currentWork [4]string
|
||||
type remoteSealer struct {
|
||||
works map[common.Hash]*types.Block
|
||||
rates map[common.Hash]hashrate
|
||||
currentBlock *types.Block
|
||||
currentWork [4]string
|
||||
notifyCtx context.Context
|
||||
cancelNotify context.CancelFunc // cancels all notification requests
|
||||
reqWG sync.WaitGroup // tracks notification request goroutines
|
||||
|
||||
notifyTransport = &http.Transport{}
|
||||
notifyClient = &http.Client{
|
||||
Transport: notifyTransport,
|
||||
Timeout: time.Second,
|
||||
}
|
||||
notifyReqs = make([]*http.Request, len(notify))
|
||||
)
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
notifyWork := func() {
|
||||
work := currentWork
|
||||
blob, _ := json.Marshal(work)
|
||||
ethash *Ethash
|
||||
noverify bool
|
||||
notifyURLs []string
|
||||
results chan<- *types.Block
|
||||
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
requestExit chan struct{}
|
||||
exitCh chan struct{}
|
||||
}
|
||||
|
||||
for i, url := range notify {
|
||||
// Terminate any previously pending request and create the new work
|
||||
if notifyReqs[i] != nil {
|
||||
notifyTransport.CancelRequest(notifyReqs[i])
|
||||
}
|
||||
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
|
||||
notifyReqs[i].Header.Set("Content-Type", "application/json")
|
||||
// sealTask wraps a seal block with relative result channel for remote sealer thread.
|
||||
type sealTask struct {
|
||||
block *types.Block
|
||||
results chan<- *types.Block
|
||||
}
|
||||
|
||||
// Push the new work concurrently to all the remote nodes
|
||||
go func(req *http.Request, url string) {
|
||||
res, err := notifyClient.Do(req)
|
||||
if err != nil {
|
||||
log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
|
||||
res.Body.Close()
|
||||
}
|
||||
}(notifyReqs[i], url)
|
||||
}
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [4]string
|
||||
}
|
||||
|
||||
func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s := &remoteSealer{
|
||||
ethash: ethash,
|
||||
noverify: noverify,
|
||||
notifyURLs: urls,
|
||||
notifyCtx: ctx,
|
||||
cancelNotify: cancel,
|
||||
works: make(map[common.Hash]*types.Block),
|
||||
rates: make(map[common.Hash]hashrate),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
requestExit: make(chan struct{}),
|
||||
exitCh: make(chan struct{}),
|
||||
}
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3], hex encoded block number
|
||||
makeWork := func(block *types.Block) {
|
||||
hash := ethash.SealHash(block.Header())
|
||||
go s.loop()
|
||||
return s
|
||||
}
|
||||
|
||||
currentWork[0] = hash.Hex()
|
||||
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
currentWork[3] = hexutil.EncodeBig(block.Number())
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
currentBlock = block
|
||||
works[hash] = block
|
||||
}
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if currentBlock == nil {
|
||||
log.Error("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
block := works[sealhash]
|
||||
if block == nil {
|
||||
log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64())
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if !noverify {
|
||||
if err := ethash.verifySeal(nil, header, true); err != nil {
|
||||
log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Make sure the result channel is assigned.
|
||||
if results == nil {
|
||||
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
solution := block.WithSeal(header)
|
||||
|
||||
// The submitted solution is within the scope of acceptance.
|
||||
if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() {
|
||||
select {
|
||||
case results <- solution:
|
||||
log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// The submitted block is too old to accept, drop it.
|
||||
log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return false
|
||||
}
|
||||
func (s *remoteSealer) loop() {
|
||||
defer func() {
|
||||
s.ethash.config.Log.Trace("Ethash remote sealer is exiting")
|
||||
s.cancelNotify()
|
||||
s.reqWG.Wait()
|
||||
close(s.exitCh)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case work := <-ethash.workCh:
|
||||
case work := <-s.workCh:
|
||||
// Update current work with new received block.
|
||||
// Note same work can be past twice, happens when changing CPU threads.
|
||||
results = work.results
|
||||
s.results = work.results
|
||||
s.makeWork(work.block)
|
||||
s.notifyWork()
|
||||
|
||||
makeWork(work.block)
|
||||
|
||||
// Notify and requested URLs of the new work availability
|
||||
notifyWork()
|
||||
|
||||
case work := <-ethash.fetchWorkCh:
|
||||
case work := <-s.fetchWorkCh:
|
||||
// Return current mining work to remote miner.
|
||||
if currentBlock == nil {
|
||||
if s.currentBlock == nil {
|
||||
work.errc <- errNoMiningWork
|
||||
} else {
|
||||
work.res <- currentWork
|
||||
work.res <- s.currentWork
|
||||
}
|
||||
|
||||
case result := <-ethash.submitWorkCh:
|
||||
case result := <-s.submitWorkCh:
|
||||
// Verify submitted PoW solution based on maintained mining blocks.
|
||||
if submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
if s.submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
result.errc <- nil
|
||||
} else {
|
||||
result.errc <- errInvalidSealResult
|
||||
}
|
||||
|
||||
case result := <-ethash.submitRateCh:
|
||||
case result := <-s.submitRateCh:
|
||||
// Trace remote sealer's hash rate by submitted value.
|
||||
rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
close(result.done)
|
||||
|
||||
case req := <-ethash.fetchRateCh:
|
||||
case req := <-s.fetchRateCh:
|
||||
// Gather all hash rate submitted by remote sealer.
|
||||
var total uint64
|
||||
for _, rate := range rates {
|
||||
for _, rate := range s.rates {
|
||||
// this could overflow
|
||||
total += rate.rate
|
||||
}
|
||||
@@ -347,25 +315,126 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
|
||||
case <-ticker.C:
|
||||
// Clear stale submitted hash rate.
|
||||
for id, rate := range rates {
|
||||
for id, rate := range s.rates {
|
||||
if time.Since(rate.ping) > 10*time.Second {
|
||||
delete(rates, id)
|
||||
delete(s.rates, id)
|
||||
}
|
||||
}
|
||||
// Clear stale pending blocks
|
||||
if currentBlock != nil {
|
||||
for hash, block := range works {
|
||||
if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() {
|
||||
delete(works, hash)
|
||||
if s.currentBlock != nil {
|
||||
for hash, block := range s.works {
|
||||
if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() {
|
||||
delete(s.works, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case errc := <-ethash.exitCh:
|
||||
// Exit remote loop if ethash is closed and return relevant error.
|
||||
errc <- nil
|
||||
log.Trace("Ethash remote sealer is exiting")
|
||||
case <-s.requestExit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3], hex encoded block number
|
||||
func (s *remoteSealer) makeWork(block *types.Block) {
|
||||
hash := s.ethash.SealHash(block.Header())
|
||||
s.currentWork[0] = hash.Hex()
|
||||
s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
s.currentWork[3] = hexutil.EncodeBig(block.Number())
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
s.currentBlock = block
|
||||
s.works[hash] = block
|
||||
}
|
||||
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
func (s *remoteSealer) notifyWork() {
|
||||
work := s.currentWork
|
||||
blob, _ := json.Marshal(work)
|
||||
s.reqWG.Add(len(s.notifyURLs))
|
||||
for _, url := range s.notifyURLs {
|
||||
go s.sendNotification(s.notifyCtx, url, blob, work)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) {
|
||||
defer s.reqWG.Done()
|
||||
|
||||
req, err := http.NewRequest("POST", url, bytes.NewReader(json))
|
||||
if err != nil {
|
||||
s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout)
|
||||
defer cancel()
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2])
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if s.currentBlock == nil {
|
||||
s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
block := s.works[sealhash]
|
||||
if block == nil {
|
||||
s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64())
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if !s.noverify {
|
||||
if err := s.ethash.verifySeal(nil, header, true); err != nil {
|
||||
s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Make sure the result channel is assigned.
|
||||
if s.results == nil {
|
||||
s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
solution := block.WithSeal(header)
|
||||
|
||||
// The submitted solution is within the scope of acceptance.
|
||||
if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
|
||||
select {
|
||||
case s.results <- solution:
|
||||
s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// The submitted block is too old to accept, drop it.
|
||||
s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return false
|
||||
}
|
||||
|
@@ -20,59 +20,39 @@ import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/internal/testlog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// Tests whether remote HTTP servers are correctly notified of new work.
|
||||
func TestRemoteNotify(t *testing.T) {
|
||||
// Start a simple webserver to capture notifications
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan [3]string)
|
||||
|
||||
server := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}),
|
||||
}
|
||||
// Open a custom listener to extract its local address
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open notification server: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go server.Serve(listener)
|
||||
|
||||
// Wait for server to start listening
|
||||
var tries int
|
||||
for tries = 0; tries < 10; tries++ {
|
||||
conn, _ := net.DialTimeout("tcp", listener.Addr().String(), 1*time.Second)
|
||||
if conn != nil {
|
||||
break
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
}
|
||||
if tries == 10 {
|
||||
t.Fatal("tcp listener not ready for more than 10 seconds")
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create the custom ethash engine
|
||||
ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
|
||||
// Create the custom ethash engine.
|
||||
ethash := NewTester([]string{server.URL}, false)
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a work task and ensure the notification bubbles out
|
||||
// Stream a work task and ensure the notification bubbles out.
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
@@ -97,46 +77,37 @@ func TestRemoteNotify(t *testing.T) {
|
||||
// Tests that pushing work packages fast to the miner doesn't cause any data race
|
||||
// issues in the notifications.
|
||||
func TestRemoteMultiNotify(t *testing.T) {
|
||||
// Start a simple webserver to capture notifications
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan [3]string, 64)
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
server := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}),
|
||||
}
|
||||
// Open a custom listener to extract its local address
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open notification server: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go server.Serve(listener)
|
||||
|
||||
// Create the custom ethash engine
|
||||
ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
|
||||
// Create the custom ethash engine.
|
||||
ethash := NewTester([]string{server.URL}, false)
|
||||
ethash.config.Log = testlog.Logger(t, log.LvlWarn)
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a lot of work task and ensure all the notifications bubble out
|
||||
// Stream a lot of work task and ensure all the notifications bubble out.
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
ethash.Seal(nil, block, nil, nil)
|
||||
}
|
||||
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
select {
|
||||
case <-sink:
|
||||
case <-time.After(3 * time.Second):
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("notification %d timed out", i)
|
||||
}
|
||||
}
|
||||
@@ -206,10 +177,10 @@ func TestStaleSubmission(t *testing.T) {
|
||||
select {
|
||||
case res := <-results:
|
||||
if res.Header().Nonce != fakeNonce {
|
||||
t.Errorf("case %d block nonce mismatch, want %s, get %s", id+1, fakeNonce, res.Header().Nonce)
|
||||
t.Errorf("case %d block nonce mismatch, want %x, get %x", id+1, fakeNonce, res.Header().Nonce)
|
||||
}
|
||||
if res.Header().MixDigest != fakeDigest {
|
||||
t.Errorf("case %d block digest mismatch, want %s, get %s", id+1, fakeDigest, res.Header().MixDigest)
|
||||
t.Errorf("case %d block digest mismatch, want %x, get %x", id+1, fakeDigest, res.Header().MixDigest)
|
||||
}
|
||||
if res.Header().Difficulty.Uint64() != c.headers[c.submitIndex].Difficulty.Uint64() {
|
||||
t.Errorf("case %d block difficulty mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Difficulty, res.Header().Difficulty)
|
||||
|
@@ -57,6 +57,7 @@ func NewCompiler(debug bool) *Compiler {
|
||||
// second stage to push labels and determine the right
|
||||
// position.
|
||||
func (c *Compiler) Feed(ch <-chan token) {
|
||||
var prev token
|
||||
for i := range ch {
|
||||
switch i.typ {
|
||||
case number:
|
||||
@@ -73,10 +74,14 @@ func (c *Compiler) Feed(ch <-chan token) {
|
||||
c.labels[i.text] = c.pc
|
||||
c.pc++
|
||||
case label:
|
||||
c.pc += 5
|
||||
c.pc += 4
|
||||
if prev.typ == element && isJump(prev.text) {
|
||||
c.pc++
|
||||
}
|
||||
}
|
||||
|
||||
c.tokens = append(c.tokens, i)
|
||||
prev = i
|
||||
}
|
||||
if c.debug {
|
||||
fmt.Fprintln(os.Stderr, "found", len(c.labels), "labels")
|
||||
@@ -181,6 +186,8 @@ func (c *Compiler) compileElement(element token) error {
|
||||
pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes()
|
||||
pos = append(make([]byte, 4-len(pos)), pos...)
|
||||
c.pushBin(pos)
|
||||
case lineEnd:
|
||||
c.pos--
|
||||
default:
|
||||
return compileErr(rvalue, rvalue.text, "number, string or label")
|
||||
}
|
||||
@@ -201,8 +208,8 @@ func (c *Compiler) compileElement(element token) error {
|
||||
case stringValue:
|
||||
value = []byte(rvalue.text[1 : len(rvalue.text)-1])
|
||||
case label:
|
||||
value = make([]byte, 4)
|
||||
copy(value, big.NewInt(int64(c.labels[rvalue.text])).Bytes())
|
||||
value = big.NewInt(int64(c.labels[rvalue.text])).Bytes()
|
||||
value = append(make([]byte, 4-len(value)), value...)
|
||||
default:
|
||||
return compileErr(rvalue, rvalue.text, "number, string or label")
|
||||
}
|
||||
|
@@ -14,22 +14,58 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const webpack = require('webpack');
|
||||
const merge = require('webpack-merge');
|
||||
const WebpackDashboard = require('webpack-dashboard/plugin');
|
||||
const common = require('./webpack.config.common.js');
|
||||
package asm
|
||||
|
||||
module.exports = merge(common, {
|
||||
mode: 'development',
|
||||
plugins: [
|
||||
new WebpackDashboard(),
|
||||
new webpack.HotModuleReplacementPlugin(),
|
||||
],
|
||||
// devtool: 'eval',
|
||||
devtool: 'source-map',
|
||||
devServer: {
|
||||
port: 8081,
|
||||
hot: true,
|
||||
compress: true,
|
||||
},
|
||||
});
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCompiler(t *testing.T) {
|
||||
tests := []struct {
|
||||
input, output string
|
||||
}{
|
||||
{
|
||||
input: `
|
||||
GAS
|
||||
label:
|
||||
PUSH @label
|
||||
`,
|
||||
output: "5a5b6300000001",
|
||||
},
|
||||
{
|
||||
input: `
|
||||
PUSH @label
|
||||
label:
|
||||
`,
|
||||
output: "63000000055b",
|
||||
},
|
||||
{
|
||||
input: `
|
||||
PUSH @label
|
||||
JUMP
|
||||
label:
|
||||
`,
|
||||
output: "6300000006565b",
|
||||
},
|
||||
{
|
||||
input: `
|
||||
JUMP @label
|
||||
label:
|
||||
`,
|
||||
output: "6300000006565b",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ch := Lex([]byte(test.input), false)
|
||||
c := NewCompiler(false)
|
||||
c.Feed(ch)
|
||||
output, err := c.Compile()
|
||||
if len(err) != 0 {
|
||||
t.Errorf("compile error: %v\ninput: %s", err, test.input)
|
||||
continue
|
||||
}
|
||||
if output != test.output {
|
||||
t.Errorf("incorrect output\ninput: %sgot: %s\nwant: %s\n", test.input, output, test.output)
|
||||
}
|
||||
}
|
||||
}
|
@@ -60,6 +60,14 @@ func TestLexer(t *testing.T) {
|
||||
input: "0123abc",
|
||||
tokens: []token{{typ: lineStart}, {typ: number, text: "0123"}, {typ: element, text: "abc"}, {typ: eof}},
|
||||
},
|
||||
{
|
||||
input: "@foo",
|
||||
tokens: []token{{typ: lineStart}, {typ: label, text: "foo"}, {typ: eof}},
|
||||
},
|
||||
{
|
||||
input: "@label123",
|
||||
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@@ -234,7 +234,7 @@ func lexComment(l *lexer) stateFn {
|
||||
// the lex text state function to advance the parsing
|
||||
// process.
|
||||
func lexLabel(l *lexer) stateFn {
|
||||
l.acceptRun(Alpha + "_")
|
||||
l.acceptRun(Alpha + "_" + Numbers)
|
||||
|
||||
l.emit(label)
|
||||
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"io"
|
||||
"math/big"
|
||||
mrand "math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -42,7 +43,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -855,8 +856,9 @@ func (bc *BlockChain) procFutureBlocks() {
|
||||
}
|
||||
}
|
||||
if len(blocks) > 0 {
|
||||
types.BlockBy(types.Number).Sort(blocks)
|
||||
|
||||
sort.Slice(blocks, func(i, j int) bool {
|
||||
return blocks[i].NumberU64() < blocks[j].NumberU64()
|
||||
})
|
||||
// Insert one by one as chain insertion needs contiguous ancestry between blocks
|
||||
for i := range blocks {
|
||||
bc.InsertChain(blocks[i : i+1])
|
||||
@@ -1258,16 +1260,16 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
|
||||
}
|
||||
|
||||
// WriteBlockWithState writes the block and all associated state to the database.
|
||||
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
|
||||
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
|
||||
bc.chainmu.Lock()
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
return bc.writeBlockWithState(block, receipts, state)
|
||||
return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent)
|
||||
}
|
||||
|
||||
// writeBlockWithState writes the block and all associated state to the database,
|
||||
// but is expects the chain mutex to be held.
|
||||
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
|
||||
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
|
||||
bc.wg.Add(1)
|
||||
defer bc.wg.Done()
|
||||
|
||||
@@ -1392,6 +1394,23 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||
bc.insert(block)
|
||||
}
|
||||
bc.futureBlocks.Remove(block.Hash())
|
||||
|
||||
if status == CanonStatTy {
|
||||
bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
|
||||
if len(logs) > 0 {
|
||||
bc.logsFeed.Send(logs)
|
||||
}
|
||||
// In theory we should fire a ChainHeadEvent when we inject
|
||||
// a canonical block, but sometimes we can insert a batch of
|
||||
// canonicial blocks. Avoid firing too much ChainHeadEvents,
|
||||
// we will fire an accumulated ChainHeadEvent and disable fire
|
||||
// event here.
|
||||
if emitHeadEvent {
|
||||
bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
|
||||
}
|
||||
} else {
|
||||
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
@@ -1442,11 +1461,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
// Pre-checks passed, start the full block imports
|
||||
bc.wg.Add(1)
|
||||
bc.chainmu.Lock()
|
||||
n, events, logs, err := bc.insertChain(chain, true)
|
||||
n, err := bc.insertChain(chain, true)
|
||||
bc.chainmu.Unlock()
|
||||
bc.wg.Done()
|
||||
|
||||
bc.PostChainEvents(events, logs)
|
||||
return n, err
|
||||
}
|
||||
|
||||
@@ -1458,23 +1476,24 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
// racey behaviour. If a sidechain import is in progress, and the historic state
|
||||
// is imported, but then new canon-head is added before the actual sidechain
|
||||
// completes, then the historic state could be pruned again
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) {
|
||||
// If the chain is terminating, don't even bother starting up
|
||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||
return 0, nil, nil, nil
|
||||
return 0, nil
|
||||
}
|
||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
|
||||
|
||||
// A queued approach to delivering events. This is generally
|
||||
// faster than direct delivery and requires much less mutex
|
||||
// acquiring.
|
||||
var (
|
||||
stats = insertStats{startTime: mclock.Now()}
|
||||
events = make([]interface{}, 0, len(chain))
|
||||
lastCanon *types.Block
|
||||
coalescedLogs []*types.Log
|
||||
stats = insertStats{startTime: mclock.Now()}
|
||||
lastCanon *types.Block
|
||||
)
|
||||
// Fire a single chain head event if we've progressed the chain
|
||||
defer func() {
|
||||
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
||||
bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
|
||||
}
|
||||
}()
|
||||
// Start the parallel header verifier
|
||||
headers := make([]*types.Header, len(chain))
|
||||
seals := make([]bool, len(chain))
|
||||
@@ -1524,7 +1543,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
for block != nil && err == ErrKnownBlock {
|
||||
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
|
||||
if err := bc.writeKnownBlock(block); err != nil {
|
||||
return it.index, nil, nil, err
|
||||
return it.index, err
|
||||
}
|
||||
lastCanon = block
|
||||
|
||||
@@ -1543,7 +1562,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
|
||||
log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
block, err = it.next()
|
||||
}
|
||||
@@ -1551,14 +1570,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
stats.ignored += it.remaining()
|
||||
|
||||
// If there are any still remaining, mark as ignored
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
|
||||
// Some other error occurred, abort
|
||||
case err != nil:
|
||||
bc.futureBlocks.Remove(block.Hash())
|
||||
stats.ignored += len(it.chain)
|
||||
bc.reportBlock(block, nil, err)
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
// No validation errors for the first block (or chain prefix skipped)
|
||||
for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
|
||||
@@ -1570,7 +1589,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
// If the header is a banned one, straight out abort
|
||||
if BadHashes[block.Hash()] {
|
||||
bc.reportBlock(block, nil, ErrBlacklistedHash)
|
||||
return it.index, events, coalescedLogs, ErrBlacklistedHash
|
||||
return it.index, ErrBlacklistedHash
|
||||
}
|
||||
// If the block is known (in the middle of the chain), it's a special case for
|
||||
// Clique blocks where they can share state among each other, so importing an
|
||||
@@ -1587,15 +1606,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
"root", block.Root())
|
||||
|
||||
if err := bc.writeKnownBlock(block); err != nil {
|
||||
return it.index, nil, nil, err
|
||||
return it.index, err
|
||||
}
|
||||
stats.processed++
|
||||
|
||||
// We can assume that logs are empty here, since the only way for consecutive
|
||||
// Clique blocks to have the same state is if there are no transactions.
|
||||
events = append(events, ChainEvent{block, block.Hash(), nil})
|
||||
lastCanon = block
|
||||
|
||||
continue
|
||||
}
|
||||
// Retrieve the parent block and it's state to execute on top
|
||||
@@ -1607,7 +1624,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
}
|
||||
statedb, err := state.New(parent.Root, bc.stateCache)
|
||||
if err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
// If we have a followup block, run that against the current state to pre-cache
|
||||
// transactions and probabilistically some of the account/storage trie nodes.
|
||||
@@ -1632,7 +1649,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
if err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
// Update the metrics touched during block processing
|
||||
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
|
||||
@@ -1651,7 +1668,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
proctime := time.Since(start)
|
||||
|
||||
@@ -1663,10 +1680,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
|
||||
// Write the block to the chain and get the status.
|
||||
substart = time.Now()
|
||||
status, err := bc.writeBlockWithState(block, receipts, statedb)
|
||||
status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false)
|
||||
if err != nil {
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
|
||||
@@ -1684,8 +1701,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
"elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"root", block.Root())
|
||||
|
||||
coalescedLogs = append(coalescedLogs, logs...)
|
||||
events = append(events, ChainEvent{block, block.Hash(), logs})
|
||||
lastCanon = block
|
||||
|
||||
// Only count canonical blocks for GC processing time
|
||||
@@ -1696,7 +1711,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
||||
"root", block.Root())
|
||||
events = append(events, ChainSideEvent{block})
|
||||
|
||||
default:
|
||||
// This in theory is impossible, but lets be nice to our future selves and leave
|
||||
@@ -1715,24 +1729,20 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
// Any blocks remaining here? The only ones we care about are the future ones
|
||||
if block != nil && err == consensus.ErrFutureBlock {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
block, err = it.next()
|
||||
|
||||
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
stats.queued++
|
||||
}
|
||||
}
|
||||
stats.ignored += it.remaining()
|
||||
|
||||
// Append a single chain head event if we've progressed the chain
|
||||
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
||||
events = append(events, ChainHeadEvent{lastCanon})
|
||||
}
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
|
||||
// insertSideChain is called when an import batch hits upon a pruned ancestor
|
||||
@@ -1741,7 +1751,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
//
|
||||
// The method writes all (header-and-body-valid) blocks to disk, then tries to
|
||||
// switch over to the new chain if the TD exceeded the current chain.
|
||||
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
|
||||
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
|
||||
var (
|
||||
externTd *big.Int
|
||||
current = bc.CurrentBlock()
|
||||
@@ -1777,7 +1787,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
|
||||
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
|
||||
// mechanism.
|
||||
return it.index, nil, nil, errors.New("sidechain ghost-state attack")
|
||||
return it.index, errors.New("sidechain ghost-state attack")
|
||||
}
|
||||
}
|
||||
if externTd == nil {
|
||||
@@ -1788,7 +1798,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
|
||||
start := time.Now()
|
||||
if err := bc.writeBlockWithoutState(block, externTd); err != nil {
|
||||
return it.index, nil, nil, err
|
||||
return it.index, err
|
||||
}
|
||||
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
|
||||
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
@@ -1805,7 +1815,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
localTd := bc.GetTd(current.Hash(), current.NumberU64())
|
||||
if localTd.Cmp(externTd) > 0 {
|
||||
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
|
||||
return it.index, nil, nil, err
|
||||
return it.index, err
|
||||
}
|
||||
// Gather all the sidechain hashes (full blocks may be memory heavy)
|
||||
var (
|
||||
@@ -1820,7 +1830,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
|
||||
}
|
||||
if parent == nil {
|
||||
return it.index, nil, nil, errors.New("missing parent")
|
||||
return it.index, errors.New("missing parent")
|
||||
}
|
||||
// Import all the pruned blocks to make the state available
|
||||
var (
|
||||
@@ -1839,15 +1849,15 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
// memory here.
|
||||
if len(blocks) >= 2048 || memory > 64*1024*1024 {
|
||||
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
|
||||
if _, _, _, err := bc.insertChain(blocks, false); err != nil {
|
||||
return 0, nil, nil, err
|
||||
if _, err := bc.insertChain(blocks, false); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
blocks, memory = blocks[:0], 0
|
||||
|
||||
// If the chain is terminating, stop processing blocks
|
||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||
log.Debug("Premature abort during blocks processing")
|
||||
return 0, nil, nil, nil
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1855,7 +1865,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
|
||||
return bc.insertChain(blocks, false)
|
||||
}
|
||||
return 0, nil, nil, nil
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
|
||||
@@ -1870,11 +1880,11 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
deletedTxs types.Transactions
|
||||
addedTxs types.Transactions
|
||||
|
||||
deletedLogs []*types.Log
|
||||
rebirthLogs []*types.Log
|
||||
deletedLogs [][]*types.Log
|
||||
rebirthLogs [][]*types.Log
|
||||
|
||||
// collectLogs collects the logs that were generated during the
|
||||
// processing of the block that corresponds with the given hash.
|
||||
// collectLogs collects the logs that were generated or removed during
|
||||
// the processing of the block that corresponds with the given hash.
|
||||
// These logs are later announced as deleted or reborn
|
||||
collectLogs = func(hash common.Hash, removed bool) {
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
@@ -1882,17 +1892,39 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
return
|
||||
}
|
||||
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
|
||||
|
||||
var logs []*types.Log
|
||||
for _, receipt := range receipts {
|
||||
for _, log := range receipt.Logs {
|
||||
l := *log
|
||||
if removed {
|
||||
l.Removed = true
|
||||
deletedLogs = append(deletedLogs, &l)
|
||||
} else {
|
||||
rebirthLogs = append(rebirthLogs, &l)
|
||||
}
|
||||
logs = append(logs, &l)
|
||||
}
|
||||
}
|
||||
if len(logs) > 0 {
|
||||
if removed {
|
||||
deletedLogs = append(deletedLogs, logs)
|
||||
} else {
|
||||
rebirthLogs = append(rebirthLogs, logs)
|
||||
}
|
||||
}
|
||||
}
|
||||
// mergeLogs returns a merged log slice with specified sort order.
|
||||
mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
|
||||
var ret []*types.Log
|
||||
if reverse {
|
||||
for i := len(logs) - 1; i >= 0; i-- {
|
||||
ret = append(ret, logs[i]...)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < len(logs); i++ {
|
||||
ret = append(ret, logs[i]...)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
)
|
||||
// Reduce the longer chain to the same number as the shorter one
|
||||
@@ -1988,47 +2020,20 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
// this goroutine if there are no events to fire, but realistcally that only
|
||||
// ever happens if we're reorging empty blocks, which will only happen on idle
|
||||
// networks where performance is not an issue either way.
|
||||
//
|
||||
// TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
|
||||
// event ordering?
|
||||
go func() {
|
||||
if len(deletedLogs) > 0 {
|
||||
bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
|
||||
if len(deletedLogs) > 0 {
|
||||
bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
|
||||
}
|
||||
if len(rebirthLogs) > 0 {
|
||||
bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
|
||||
}
|
||||
if len(oldChain) > 0 {
|
||||
for i := len(oldChain) - 1; i >= 0; i-- {
|
||||
bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
|
||||
}
|
||||
if len(rebirthLogs) > 0 {
|
||||
bc.logsFeed.Send(rebirthLogs)
|
||||
}
|
||||
if len(oldChain) > 0 {
|
||||
for _, block := range oldChain {
|
||||
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PostChainEvents iterates over the events generated by a chain insertion and
|
||||
// posts them into the event feed.
|
||||
// TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
|
||||
func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
|
||||
// post event logs for further processing
|
||||
if logs != nil {
|
||||
bc.logsFeed.Send(logs)
|
||||
}
|
||||
for _, event := range events {
|
||||
switch ev := event.(type) {
|
||||
case ChainEvent:
|
||||
bc.chainFeed.Send(ev)
|
||||
|
||||
case ChainHeadEvent:
|
||||
bc.chainHeadFeed.Send(ev)
|
||||
|
||||
case ChainSideEvent:
|
||||
bc.chainSideFeed.Send(ev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *BlockChain) update() {
|
||||
futureTimer := time.NewTicker(5 * time.Second)
|
||||
defer futureTimer.Stop()
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -960,16 +961,20 @@ func TestLogReorgs(t *testing.T) {
|
||||
}
|
||||
|
||||
chain, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
|
||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
|
||||
timeout := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case ev := <-rmLogsCh:
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
ev := <-rmLogsCh
|
||||
if len(ev.Logs) == 0 {
|
||||
t.Error("expected logs")
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
timeout := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-done:
|
||||
case <-timeout.C:
|
||||
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
||||
}
|
||||
@@ -982,39 +987,47 @@ func TestLogRebirth(t *testing.T) {
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
|
||||
// this code generates a log
|
||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
newLogCh = make(chan bool)
|
||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
newLogCh = make(chan bool)
|
||||
removeLogCh = make(chan bool)
|
||||
)
|
||||
|
||||
// listenNewLog checks whether the received logs number is equal with expected.
|
||||
listenNewLog := func(sink chan []*types.Log, expect int) {
|
||||
// validateLogEvent checks whether the received logs number is equal with expected.
|
||||
validateLogEvent := func(sink interface{}, result chan bool, expect int) {
|
||||
chanval := reflect.ValueOf(sink)
|
||||
chantyp := chanval.Type()
|
||||
if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.RecvDir == 0 {
|
||||
t.Fatalf("invalid channel, given type %v", chantyp)
|
||||
}
|
||||
cnt := 0
|
||||
var recv []reflect.Value
|
||||
timeout := time.After(1 * time.Second)
|
||||
cases := []reflect.SelectCase{{Chan: chanval, Dir: reflect.SelectRecv}, {Chan: reflect.ValueOf(timeout), Dir: reflect.SelectRecv}}
|
||||
for {
|
||||
select {
|
||||
case logs := <-sink:
|
||||
cnt += len(logs)
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
// new logs timeout
|
||||
newLogCh <- false
|
||||
chose, v, _ := reflect.Select(cases)
|
||||
if chose == 1 {
|
||||
// Not enough event received
|
||||
result <- false
|
||||
return
|
||||
}
|
||||
cnt += 1
|
||||
recv = append(recv, v)
|
||||
if cnt == expect {
|
||||
break
|
||||
} else if cnt > expect {
|
||||
// redundant logs received
|
||||
newLogCh <- false
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-sink:
|
||||
// redundant logs received
|
||||
newLogCh <- false
|
||||
case <-time.NewTimer(100 * time.Millisecond).C:
|
||||
newLogCh <- true
|
||||
done := time.After(50 * time.Millisecond)
|
||||
cases = cases[:1]
|
||||
cases = append(cases, reflect.SelectCase{Chan: reflect.ValueOf(done), Dir: reflect.SelectRecv})
|
||||
chose, _, _ := reflect.Select(cases)
|
||||
// If chose equal 0, it means receiving redundant events.
|
||||
if chose == 1 {
|
||||
result <- true
|
||||
} else {
|
||||
result <- false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1038,12 +1051,12 @@ func TestLogRebirth(t *testing.T) {
|
||||
})
|
||||
|
||||
// Spawn a goroutine to receive log events
|
||||
go listenNewLog(logsCh, 1)
|
||||
go validateLogEvent(logsCh, newLogCh, 1)
|
||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert chain: %v", err)
|
||||
}
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
t.Fatal("failed to receive new log event")
|
||||
}
|
||||
|
||||
// Generate long reorg chain
|
||||
@@ -1060,40 +1073,31 @@ func TestLogRebirth(t *testing.T) {
|
||||
})
|
||||
|
||||
// Spawn a goroutine to receive log events
|
||||
go listenNewLog(logsCh, 1)
|
||||
go validateLogEvent(logsCh, newLogCh, 1)
|
||||
go validateLogEvent(rmLogsCh, removeLogCh, 1)
|
||||
if _, err := blockchain.InsertChain(forkChain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
t.Fatal("failed to receive new log event")
|
||||
}
|
||||
// Ensure removedLog events received
|
||||
select {
|
||||
case ev := <-rmLogsCh:
|
||||
if len(ev.Logs) == 0 {
|
||||
t.Error("expected logs")
|
||||
}
|
||||
case <-time.NewTimer(1 * time.Second).C:
|
||||
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
||||
if !<-removeLogCh {
|
||||
t.Fatal("failed to receive removed log event")
|
||||
}
|
||||
|
||||
newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
|
||||
go listenNewLog(logsCh, 1)
|
||||
go validateLogEvent(logsCh, newLogCh, 1)
|
||||
go validateLogEvent(rmLogsCh, removeLogCh, 1)
|
||||
if _, err := blockchain.InsertChain(newBlocks); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
// Ensure removedLog events received
|
||||
select {
|
||||
case ev := <-rmLogsCh:
|
||||
if len(ev.Logs) == 0 {
|
||||
t.Error("expected logs")
|
||||
}
|
||||
case <-time.NewTimer(1 * time.Second).C:
|
||||
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
||||
}
|
||||
// Rebirth logs should omit a newLogEvent
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
t.Fatal("failed to receive new log event")
|
||||
}
|
||||
// Ensure removedLog events received
|
||||
if !<-removeLogCh {
|
||||
t.Fatal("failed to receive removed log event")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1145,7 +1149,6 @@ func TestSideLogRebirth(t *testing.T) {
|
||||
|
||||
logsCh := make(chan []*types.Log)
|
||||
blockchain.SubscribeLogsEvent(logsCh)
|
||||
|
||||
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
|
||||
if i == 1 {
|
||||
// Higher block difficulty
|
||||
@@ -1289,14 +1292,17 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
|
||||
continue // busy wait for canonical hash to be written
|
||||
}
|
||||
if ch != block.Hash() {
|
||||
t.Fatalf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
|
||||
t.Errorf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
|
||||
return
|
||||
}
|
||||
fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64())
|
||||
if fb == nil {
|
||||
t.Fatalf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
|
||||
t.Errorf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
|
||||
return
|
||||
}
|
||||
if fb.Hash() != block.Hash() {
|
||||
t.Fatalf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex())
|
||||
t.Errorf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex())
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1318,7 +1324,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
funds = big.NewInt(1000000000)
|
||||
deleteAddr = common.Address{1}
|
||||
gspec = &Genesis{
|
||||
Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
|
||||
Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
|
||||
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
@@ -1389,7 +1395,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
}
|
||||
|
||||
// generate an invalid chain id transaction
|
||||
config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}
|
||||
config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}
|
||||
blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
|
||||
var (
|
||||
tx *types.Transaction
|
||||
@@ -1425,6 +1431,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: new(big.Int),
|
||||
EIP155Block: new(big.Int),
|
||||
EIP150Block: new(big.Int),
|
||||
EIP158Block: big.NewInt(2),
|
||||
},
|
||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
||||
@@ -1915,13 +1922,6 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
||||
inserter func(blocks []*types.Block, receipts []types.Receipts) error
|
||||
asserter func(t *testing.T, block *types.Block)
|
||||
)
|
||||
headers, headers2 := make([]*types.Header, 0, len(blocks)), make([]*types.Header, 0, len(blocks2))
|
||||
for _, block := range blocks {
|
||||
headers = append(headers, block.Header())
|
||||
}
|
||||
for _, block := range blocks2 {
|
||||
headers2 = append(headers2, block.Header())
|
||||
}
|
||||
if typ == "headers" {
|
||||
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
||||
headers := make([]*types.Header, 0, len(blocks))
|
||||
|
@@ -283,8 +283,7 @@ func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethd
|
||||
}
|
||||
|
||||
type fakeChainReader struct {
|
||||
config *params.ChainConfig
|
||||
genesis *types.Block
|
||||
config *params.ChainConfig
|
||||
}
|
||||
|
||||
// Config returns the chain configuration.
|
||||
|
@@ -50,6 +50,9 @@ type ID struct {
|
||||
Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known
|
||||
}
|
||||
|
||||
// Filter is a fork id filter to validate a remotely advertised ID.
|
||||
type Filter func(id ID) error
|
||||
|
||||
// NewID calculates the Ethereum fork ID from the chain config and head.
|
||||
func NewID(chain *core.BlockChain) ID {
|
||||
return newID(
|
||||
@@ -80,9 +83,9 @@ func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
|
||||
return ID{Hash: checksumToBytes(hash), Next: next}
|
||||
}
|
||||
|
||||
// NewFilter creates an filter that returns if a fork ID should be rejected or not
|
||||
// NewFilter creates a filter that returns if a fork ID should be rejected or not
|
||||
// based on the local chain's status.
|
||||
func NewFilter(chain *core.BlockChain) func(id ID) error {
|
||||
func NewFilter(chain *core.BlockChain) Filter {
|
||||
return newFilter(
|
||||
chain.Config(),
|
||||
chain.Genesis().Hash(),
|
||||
@@ -92,10 +95,16 @@ func NewFilter(chain *core.BlockChain) func(id ID) error {
|
||||
)
|
||||
}
|
||||
|
||||
// NewStaticFilter creates a filter at block zero.
|
||||
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
|
||||
head := func() uint64 { return 0 }
|
||||
return newFilter(config, genesis, head)
|
||||
}
|
||||
|
||||
// newFilter is the internal version of NewFilter, taking closures as its arguments
|
||||
// instead of a chain. The reason is to allow testing it without having to simulate
|
||||
// an entire blockchain.
|
||||
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) func(id ID) error {
|
||||
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
|
||||
// Calculate the all the valid fork hash and fork next combos
|
||||
var (
|
||||
forks = gatherForks(config)
|
||||
@@ -114,10 +123,13 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
||||
// Create a validator that will filter out incompatible chains
|
||||
return func(id ID) error {
|
||||
// Run the fork checksum validation ruleset:
|
||||
// 1. If local and remote FORK_CSUM matches, connect.
|
||||
// 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT.
|
||||
// The two nodes are in the same fork state currently. They might know
|
||||
// of differing future forks, but that's not relevant until the fork
|
||||
// triggers (might be postponed, nodes might be updated to match).
|
||||
// 1a. A remotely announced but remotely not passed block is already passed
|
||||
// locally, disconnect, since the chains are incompatible.
|
||||
// 1b. No remotely announced fork; or not yet passed locally, connect.
|
||||
// 2. If the remote FORK_CSUM is a subset of the local past forks and the
|
||||
// remote FORK_NEXT matches with the locally following fork block number,
|
||||
// connect.
|
||||
@@ -139,7 +151,12 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
||||
// Found the first unpassed fork block, check if our current state matches
|
||||
// the remote checksum (rule #1).
|
||||
if sums[i] == id.Hash {
|
||||
// Yay, fork checksum matched, ignore any upcoming fork
|
||||
// Fork checksum matched, check if a remote future fork block already passed
|
||||
// locally without the local node being aware of it (rule #1a).
|
||||
if id.Next > 0 && head >= id.Next {
|
||||
return ErrLocalIncompatibleOrStale
|
||||
}
|
||||
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
|
||||
return nil
|
||||
}
|
||||
// The local and remote nodes are in different forks currently, check if the
|
||||
@@ -169,13 +186,6 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
||||
}
|
||||
}
|
||||
|
||||
// checksum calculates the IEEE CRC32 checksum of a block number.
|
||||
func checksum(fork uint64) uint32 {
|
||||
var blob [8]byte
|
||||
binary.BigEndian.PutUint64(blob[:], fork)
|
||||
return crc32.ChecksumIEEE(blob[:])
|
||||
}
|
||||
|
||||
// checksumUpdate calculates the next IEEE CRC32 checksum based on the previous
|
||||
// one and a fork block number (equivalent to CRC32(original-blob || fork)).
|
||||
func checksumUpdate(hash uint32, fork uint64) uint32 {
|
||||
|
@@ -55,8 +55,12 @@ func TestCreation(t *testing.T) {
|
||||
{4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
||||
{4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // First and last Constantinople, first Petersburg block
|
||||
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // Today Petersburg block
|
||||
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
||||
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
||||
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
||||
{9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
||||
{9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // First Muir Glacier block
|
||||
{10000000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // Future Muir Glacier block
|
||||
},
|
||||
},
|
||||
// Ropsten test cases
|
||||
@@ -74,8 +78,10 @@ func TestCreation(t *testing.T) {
|
||||
{4939393, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
|
||||
{4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // First Petersburg block
|
||||
{6485845, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // Last Petersburg block
|
||||
{6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // First Istanbul block
|
||||
{7500000, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // Future Istanbul block
|
||||
{6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // First Istanbul block
|
||||
{7117116, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // Last Istanbul block
|
||||
{7117117, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // First Muir Glacier block
|
||||
{7500000, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // Future
|
||||
},
|
||||
},
|
||||
// Rinkeby test cases
|
||||
@@ -151,7 +157,7 @@ func TestValidation(t *testing.T) {
|
||||
|
||||
// Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
|
||||
// is simply out of sync, accept.
|
||||
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 7280000}, nil},
|
||||
{7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||
|
||||
// Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote
|
||||
// is definitely out of sync. It may or may not need the Petersburg update, we don't know yet.
|
||||
@@ -178,6 +184,16 @@ func TestValidation(t *testing.T) {
|
||||
|
||||
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
|
||||
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||
|
||||
// Local is mainnet Muir Glacier, far in the future. Remote announces Gopherium (non existing fork)
|
||||
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
||||
//
|
||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||
{88888888, ID{Hash: checksumToBytes(0xe029e991), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
||||
|
||||
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
||||
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() uint64 { return tt.head })
|
||||
|
@@ -152,10 +152,10 @@ func (e *GenesisMismatchError) Error() string {
|
||||
//
|
||||
// The returned chain configuration is never nil.
|
||||
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
|
||||
return SetupGenesisBlockWithOverride(db, genesis, nil)
|
||||
return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
|
||||
}
|
||||
|
||||
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul *big.Int) (*params.ChainConfig, common.Hash, error) {
|
||||
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul, overrideMuirGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
|
||||
if genesis != nil && genesis.Config == nil {
|
||||
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
||||
}
|
||||
@@ -207,6 +207,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
||||
if overrideIstanbul != nil {
|
||||
newcfg.IstanbulBlock = overrideIstanbul
|
||||
}
|
||||
if overrideMuirGlacier != nil {
|
||||
newcfg.MuirGlacierBlock = overrideMuirGlacier
|
||||
}
|
||||
if err := newcfg.CheckConfigForkOrder(); err != nil {
|
||||
return newcfg, common.Hash{}, err
|
||||
}
|
||||
storedcfg := rawdb.ReadChainConfig(db, stored)
|
||||
if storedcfg == nil {
|
||||
log.Warn("Found genesis block without chain config")
|
||||
@@ -295,6 +301,13 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
||||
if block.Number().Sign() != 0 {
|
||||
return nil, fmt.Errorf("can't commit genesis block with number > 0")
|
||||
}
|
||||
config := g.Config
|
||||
if config == nil {
|
||||
config = params.AllEthashProtocolChanges
|
||||
}
|
||||
if err := config.CheckConfigForkOrder(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty)
|
||||
rawdb.WriteBlock(db, block)
|
||||
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
||||
@@ -302,11 +315,6 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
||||
rawdb.WriteHeadBlockHash(db, block.Hash())
|
||||
rawdb.WriteHeadFastBlockHash(db, block.Hash())
|
||||
rawdb.WriteHeadHeaderHash(db, block.Hash())
|
||||
|
||||
config := g.Config
|
||||
if config == nil {
|
||||
config = params.AllEthashProtocolChanges
|
||||
}
|
||||
rawdb.WriteChainConfig(db, block.Hash(), config)
|
||||
return block, nil
|
||||
}
|
||||
|
@@ -1,87 +0,0 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
// Implement our EthTest Manager
|
||||
type TestManager struct {
|
||||
// stateManager *StateManager
|
||||
eventMux *event.TypeMux
|
||||
|
||||
db ethdb.Database
|
||||
txPool *TxPool
|
||||
blockChain *BlockChain
|
||||
Blocks []*types.Block
|
||||
}
|
||||
|
||||
func (tm *TestManager) IsListening() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (tm *TestManager) IsMining() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (tm *TestManager) PeerCount() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (tm *TestManager) Peers() *list.List {
|
||||
return list.New()
|
||||
}
|
||||
|
||||
func (tm *TestManager) BlockChain() *BlockChain {
|
||||
return tm.blockChain
|
||||
}
|
||||
|
||||
func (tm *TestManager) TxPool() *TxPool {
|
||||
return tm.txPool
|
||||
}
|
||||
|
||||
// func (tm *TestManager) StateManager() *StateManager {
|
||||
// return tm.stateManager
|
||||
// }
|
||||
|
||||
func (tm *TestManager) EventMux() *event.TypeMux {
|
||||
return tm.eventMux
|
||||
}
|
||||
|
||||
// func (tm *TestManager) KeyManager() *crypto.KeyManager {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
func (tm *TestManager) Db() ethdb.Database {
|
||||
return tm.db
|
||||
}
|
||||
|
||||
func NewTestManager() *TestManager {
|
||||
testManager := &TestManager{}
|
||||
testManager.eventMux = new(event.TypeMux)
|
||||
testManager.db = rawdb.NewMemoryDatabase()
|
||||
// testManager.txPool = NewTxPool(testManager)
|
||||
// testManager.blockChain = NewBlockChain(testManager)
|
||||
// testManager.stateManager = NewStateManager(testManager)
|
||||
return testManager
|
||||
}
|
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@@ -173,18 +174,27 @@ func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
|
||||
|
||||
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
||||
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||
// First try to look up the data in ancient database. Extra hash
|
||||
// comparison is necessary since ancient database only maintains
|
||||
// the canonical data.
|
||||
data, _ := db.Ancient(freezerHeaderTable, number)
|
||||
if len(data) == 0 {
|
||||
data, _ = db.Get(headerKey(number, hash))
|
||||
// In the background freezer is moving data from leveldb to flatten files.
|
||||
// So during the first check for ancient db, the data is not yet in there,
|
||||
// but when we reach into leveldb, the data was already moved. That would
|
||||
// result in a not found error.
|
||||
if len(data) == 0 {
|
||||
data, _ = db.Ancient(freezerHeaderTable, number)
|
||||
}
|
||||
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
||||
return data
|
||||
}
|
||||
return data
|
||||
// Then try to look up the data in leveldb.
|
||||
data, _ = db.Get(headerKey(number, hash))
|
||||
if len(data) > 0 {
|
||||
return data
|
||||
}
|
||||
// In the background freezer is moving data from leveldb to flatten files.
|
||||
// So during the first check for ancient db, the data is not yet in there,
|
||||
// but when we reach into leveldb, the data was already moved. That would
|
||||
// result in a not found error.
|
||||
data, _ = db.Ancient(freezerHeaderTable, number)
|
||||
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
||||
return data
|
||||
}
|
||||
return nil // Can't find the data anywhere.
|
||||
}
|
||||
|
||||
// HasHeader verifies the existence of a block header corresponding to the hash.
|
||||
@@ -251,18 +261,33 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
|
||||
|
||||
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
||||
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||
// First try to look up the data in ancient database. Extra hash
|
||||
// comparison is necessary since ancient database only maintains
|
||||
// the canonical data.
|
||||
data, _ := db.Ancient(freezerBodiesTable, number)
|
||||
if len(data) == 0 {
|
||||
data, _ = db.Get(blockBodyKey(number, hash))
|
||||
// In the background freezer is moving data from leveldb to flatten files.
|
||||
// So during the first check for ancient db, the data is not yet in there,
|
||||
// but when we reach into leveldb, the data was already moved. That would
|
||||
// result in a not found error.
|
||||
if len(data) == 0 {
|
||||
data, _ = db.Ancient(freezerBodiesTable, number)
|
||||
if len(data) > 0 {
|
||||
h, _ := db.Ancient(freezerHashTable, number)
|
||||
if common.BytesToHash(h) == hash {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return data
|
||||
// Then try to look up the data in leveldb.
|
||||
data, _ = db.Get(blockBodyKey(number, hash))
|
||||
if len(data) > 0 {
|
||||
return data
|
||||
}
|
||||
// In the background freezer is moving data from leveldb to flatten files.
|
||||
// So during the first check for ancient db, the data is not yet in there,
|
||||
// but when we reach into leveldb, the data was already moved. That would
|
||||
// result in a not found error.
|
||||
data, _ = db.Ancient(freezerBodiesTable, number)
|
||||
if len(data) > 0 {
|
||||
h, _ := db.Ancient(freezerHashTable, number)
|
||||
if common.BytesToHash(h) == hash {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return nil // Can't find the data anywhere.
|
||||
}
|
||||
|
||||
// WriteBodyRLP stores an RLP encoded block body into the database.
|
||||
@@ -315,18 +340,33 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
||||
|
||||
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
||||
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||
// First try to look up the data in ancient database. Extra hash
|
||||
// comparison is necessary since ancient database only maintains
|
||||
// the canonical data.
|
||||
data, _ := db.Ancient(freezerDifficultyTable, number)
|
||||
if len(data) == 0 {
|
||||
data, _ = db.Get(headerTDKey(number, hash))
|
||||
// In the background freezer is moving data from leveldb to flatten files.
|
||||
// So during the first check for ancient db, the data is not yet in there,
|
||||
// but when we reach into leveldb, the data was already moved. That would
|
||||
// result in a not found error.
|
||||
if len(data) == 0 {
|
||||
data, _ = db.Ancient(freezerDifficultyTable, number)
|
||||
if len(data) > 0 {
|
||||
h, _ := db.Ancient(freezerHashTable, number)
|
||||
if common.BytesToHash(h) == hash {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return data
|
||||
// Then try to look up the data in leveldb.
|
||||
data, _ = db.Get(headerTDKey(number, hash))
|
||||
if len(data) > 0 {
|
||||
return data
|
||||
}
|
||||
// In the background freezer is moving data from leveldb to flatten files.
|
||||
// So during the first check for ancient db, the data is not yet in there,
|
||||
// but when we reach into leveldb, the data was already moved. That would
|
||||
// result in a not found error.
|
||||
data, _ = db.Ancient(freezerDifficultyTable, number)
|
||||
if len(data) > 0 {
|
||||
h, _ := db.Ancient(freezerHashTable, number)
|
||||
if common.BytesToHash(h) == hash {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return nil // Can't find the data anywhere.
|
||||
}
|
||||
|
||||
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
||||
@@ -375,18 +415,33 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
||||
|
||||
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
|
||||
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||
// First try to look up the data in ancient database. Extra hash
|
||||
// comparison is necessary since ancient database only maintains
|
||||
// the canonical data.
|
||||
data, _ := db.Ancient(freezerReceiptTable, number)
|
||||
if len(data) == 0 {
|
||||
data, _ = db.Get(blockReceiptsKey(number, hash))
|
||||
// In the background freezer is moving data from leveldb to flatten files.
|
||||
// So during the first check for ancient db, the data is not yet in there,
|
||||
// but when we reach into leveldb, the data was already moved. That would
|
||||
// result in a not found error.
|
||||
if len(data) == 0 {
|
||||
data, _ = db.Ancient(freezerReceiptTable, number)
|
||||
if len(data) > 0 {
|
||||
h, _ := db.Ancient(freezerHashTable, number)
|
||||
if common.BytesToHash(h) == hash {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return data
|
||||
// Then try to look up the data in leveldb.
|
||||
data, _ = db.Get(blockReceiptsKey(number, hash))
|
||||
if len(data) > 0 {
|
||||
return data
|
||||
}
|
||||
// In the background freezer is moving data from leveldb to flatten files.
|
||||
// So during the first check for ancient db, the data is not yet in there,
|
||||
// but when we reach into leveldb, the data was already moved. That would
|
||||
// result in a not found error.
|
||||
data, _ = db.Ancient(freezerReceiptTable, number)
|
||||
if len(data) > 0 {
|
||||
h, _ := db.Ancient(freezerHashTable, number)
|
||||
if common.BytesToHash(h) == hash {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return nil // Can't find the data anywhere.
|
||||
}
|
||||
|
||||
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
|
||||
|
@@ -20,7 +20,9 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -358,3 +360,67 @@ func checkReceiptsRLP(have, want types.Receipts) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestAncientStorage(t *testing.T) {
|
||||
// Freezer style fast import the chain.
|
||||
frdir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
||||
}
|
||||
defer os.Remove(frdir)
|
||||
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
// Create a test block
|
||||
block := types.NewBlockWithHeader(&types.Header{
|
||||
Number: big.NewInt(0),
|
||||
Extra: []byte("test block"),
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
TxHash: types.EmptyRootHash,
|
||||
ReceiptHash: types.EmptyRootHash,
|
||||
})
|
||||
// Ensure nothing non-existent will be read
|
||||
hash, number := block.Hash(), block.NumberU64()
|
||||
if blob := ReadHeaderRLP(db, hash, number); len(blob) > 0 {
|
||||
t.Fatalf("non existent header returned")
|
||||
}
|
||||
if blob := ReadBodyRLP(db, hash, number); len(blob) > 0 {
|
||||
t.Fatalf("non existent body returned")
|
||||
}
|
||||
if blob := ReadReceiptsRLP(db, hash, number); len(blob) > 0 {
|
||||
t.Fatalf("non existent receipts returned")
|
||||
}
|
||||
if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
|
||||
t.Fatalf("non existent td returned")
|
||||
}
|
||||
// Write and verify the header in the database
|
||||
WriteAncientBlock(db, block, nil, big.NewInt(100))
|
||||
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
|
||||
t.Fatalf("no header returned")
|
||||
}
|
||||
if blob := ReadBodyRLP(db, hash, number); len(blob) == 0 {
|
||||
t.Fatalf("no body returned")
|
||||
}
|
||||
if blob := ReadReceiptsRLP(db, hash, number); len(blob) == 0 {
|
||||
t.Fatalf("no receipts returned")
|
||||
}
|
||||
if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
|
||||
t.Fatalf("no td returned")
|
||||
}
|
||||
// Use a fake hash for data retrieval, nothing should be returned.
|
||||
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
|
||||
if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
|
||||
t.Fatalf("invalid header returned")
|
||||
}
|
||||
if blob := ReadBodyRLP(db, fakeHash, number); len(blob) != 0 {
|
||||
t.Fatalf("invalid body returned")
|
||||
}
|
||||
if blob := ReadReceiptsRLP(db, fakeHash, number); len(blob) != 0 {
|
||||
t.Fatalf("invalid receipts returned")
|
||||
}
|
||||
if blob := ReadTdRLP(db, fakeHash, number); len(blob) != 0 {
|
||||
t.Fatalf("invalid td returned")
|
||||
}
|
||||
}
|
||||
|
@@ -150,11 +150,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
|
||||
}
|
||||
// Database contains only older data than the freezer, this happens if the
|
||||
// state was wiped and reinited from an existing freezer.
|
||||
} else {
|
||||
// Key-value store continues where the freezer left off, all is fine. We might
|
||||
// have duplicate blocks (crash after freezer write but before kay-value store
|
||||
// deletion, but that's fine).
|
||||
}
|
||||
// Otherwise, key-value store continues where the freezer left off, all is fine.
|
||||
// We might have duplicate blocks (crash after freezer write but before key-value
|
||||
// store deletion, but that's fine).
|
||||
} else {
|
||||
// If the freezer is empty, ensure nothing was moved yet from the key-value
|
||||
// store, otherwise we'll end up missing data. We check block #1 to decide
|
||||
@@ -167,9 +166,9 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
|
||||
return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
|
||||
}
|
||||
// Block #1 is still in the database, we're allowed to init a new feezer
|
||||
} else {
|
||||
// The head header is still the genesis, we're allowed to init a new feezer
|
||||
}
|
||||
// Otherwise, the head header is still the genesis, we're allowed to init a new
|
||||
// feezer.
|
||||
}
|
||||
}
|
||||
// Freezer is consistent with the key-value database, permit combining the two
|
||||
|
@@ -55,10 +55,10 @@ func InitDatabaseFromFreezer(db ethdb.Database) error {
|
||||
if n >= frozen {
|
||||
return
|
||||
}
|
||||
// Retrieve the block from the freezer (no need for the hash, we pull by
|
||||
// number from the freezer). If successful, pre-cache the block hash and
|
||||
// the individual transaction hashes for storing into the database.
|
||||
block := ReadBlock(db, common.Hash{}, n)
|
||||
// Retrieve the block from the freezer. If successful, pre-cache
|
||||
// the block hash and the individual transaction hashes for storing
|
||||
// into the database.
|
||||
block := ReadBlock(db, ReadCanonicalHash(db, n), n)
|
||||
if block != nil {
|
||||
block.Hash()
|
||||
for _, tx := range block.Transactions() {
|
||||
|
@@ -41,14 +41,6 @@ func getChunk(size int, b int) []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
func print(t *testing.T, f *freezerTable, item uint64) {
|
||||
a, err := f.Retrieve(item)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("db[%d] = %x\n", item, a)
|
||||
}
|
||||
|
||||
// TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
|
||||
// and reading it back.
|
||||
func TestFreezerBasics(t *testing.T) {
|
||||
|
@@ -47,7 +47,9 @@ type Dump struct {
|
||||
}
|
||||
|
||||
// iterativeDump is a 'collector'-implementation which dump output line-by-line iteratively
|
||||
type iterativeDump json.Encoder
|
||||
type iterativeDump struct {
|
||||
*json.Encoder
|
||||
}
|
||||
|
||||
// Collector interface which the state trie calls during iteration
|
||||
type collector interface {
|
||||
@@ -55,15 +57,15 @@ type collector interface {
|
||||
onAccount(common.Address, DumpAccount)
|
||||
}
|
||||
|
||||
func (self *Dump) onRoot(root common.Hash) {
|
||||
self.Root = fmt.Sprintf("%x", root)
|
||||
func (d *Dump) onRoot(root common.Hash) {
|
||||
d.Root = fmt.Sprintf("%x", root)
|
||||
}
|
||||
|
||||
func (self *Dump) onAccount(addr common.Address, account DumpAccount) {
|
||||
self.Accounts[addr] = account
|
||||
func (d *Dump) onAccount(addr common.Address, account DumpAccount) {
|
||||
d.Accounts[addr] = account
|
||||
}
|
||||
|
||||
func (self iterativeDump) onAccount(addr common.Address, account DumpAccount) {
|
||||
func (d iterativeDump) onAccount(addr common.Address, account DumpAccount) {
|
||||
dumpAccount := &DumpAccount{
|
||||
Balance: account.Balance,
|
||||
Nonce: account.Nonce,
|
||||
@@ -77,25 +79,26 @@ func (self iterativeDump) onAccount(addr common.Address, account DumpAccount) {
|
||||
if addr != (common.Address{}) {
|
||||
dumpAccount.Address = &addr
|
||||
}
|
||||
(*json.Encoder)(&self).Encode(dumpAccount)
|
||||
d.Encode(dumpAccount)
|
||||
}
|
||||
func (self iterativeDump) onRoot(root common.Hash) {
|
||||
(*json.Encoder)(&self).Encode(struct {
|
||||
|
||||
func (d iterativeDump) onRoot(root common.Hash) {
|
||||
d.Encode(struct {
|
||||
Root common.Hash `json:"root"`
|
||||
}{root})
|
||||
}
|
||||
|
||||
func (self *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingPreimages bool) {
|
||||
func (s *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingPreimages bool) {
|
||||
emptyAddress := (common.Address{})
|
||||
missingPreimages := 0
|
||||
c.onRoot(self.trie.Hash())
|
||||
it := trie.NewIterator(self.trie.NodeIterator(nil))
|
||||
c.onRoot(s.trie.Hash())
|
||||
it := trie.NewIterator(s.trie.NodeIterator(nil))
|
||||
for it.Next() {
|
||||
var data Account
|
||||
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
addr := common.BytesToAddress(self.trie.GetKey(it.Key))
|
||||
addr := common.BytesToAddress(s.trie.GetKey(it.Key))
|
||||
obj := newObject(nil, addr, data)
|
||||
account := DumpAccount{
|
||||
Balance: data.Balance.String(),
|
||||
@@ -112,18 +115,18 @@ func (self *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissi
|
||||
account.SecureKey = it.Key
|
||||
}
|
||||
if !excludeCode {
|
||||
account.Code = common.Bytes2Hex(obj.Code(self.db))
|
||||
account.Code = common.Bytes2Hex(obj.Code(s.db))
|
||||
}
|
||||
if !excludeStorage {
|
||||
account.Storage = make(map[common.Hash]string)
|
||||
storageIt := trie.NewIterator(obj.getTrie(self.db).NodeIterator(nil))
|
||||
storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil))
|
||||
for storageIt.Next() {
|
||||
_, content, _, err := rlp.Split(storageIt.Value)
|
||||
if err != nil {
|
||||
log.Error("Failed to decode the value returned by iterator", "error", err)
|
||||
continue
|
||||
}
|
||||
account.Storage[common.BytesToHash(self.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content)
|
||||
account.Storage[common.BytesToHash(s.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content)
|
||||
}
|
||||
}
|
||||
c.onAccount(addr, account)
|
||||
@@ -134,17 +137,17 @@ func (self *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissi
|
||||
}
|
||||
|
||||
// RawDump returns the entire state an a single large object
|
||||
func (self *StateDB) RawDump(excludeCode, excludeStorage, excludeMissingPreimages bool) Dump {
|
||||
func (s *StateDB) RawDump(excludeCode, excludeStorage, excludeMissingPreimages bool) Dump {
|
||||
dump := &Dump{
|
||||
Accounts: make(map[common.Address]DumpAccount),
|
||||
}
|
||||
self.dump(dump, excludeCode, excludeStorage, excludeMissingPreimages)
|
||||
s.dump(dump, excludeCode, excludeStorage, excludeMissingPreimages)
|
||||
return *dump
|
||||
}
|
||||
|
||||
// Dump returns a JSON string representing the entire state as a single json-object
|
||||
func (self *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool) []byte {
|
||||
dump := self.RawDump(excludeCode, excludeStorage, excludeMissingPreimages)
|
||||
func (s *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool) []byte {
|
||||
dump := s.RawDump(excludeCode, excludeStorage, excludeMissingPreimages)
|
||||
json, err := json.MarshalIndent(dump, "", " ")
|
||||
if err != nil {
|
||||
fmt.Println("dump err", err)
|
||||
@@ -153,6 +156,6 @@ func (self *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages b
|
||||
}
|
||||
|
||||
// IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout
|
||||
func (self *StateDB) IterativeDump(excludeCode, excludeStorage, excludeMissingPreimages bool, output *json.Encoder) {
|
||||
self.dump(iterativeDump(*output), excludeCode, excludeStorage, excludeMissingPreimages)
|
||||
func (s *StateDB) IterativeDump(excludeCode, excludeStorage, excludeMissingPreimages bool, output *json.Encoder) {
|
||||
s.dump(iterativeDump{output}, excludeCode, excludeStorage, excludeMissingPreimages)
|
||||
}
|
||||
|
@@ -127,9 +127,7 @@ type (
|
||||
hash common.Hash
|
||||
}
|
||||
touchChange struct {
|
||||
account *common.Address
|
||||
prev bool
|
||||
prevDirty bool
|
||||
account *common.Address
|
||||
}
|
||||
)
|
||||
|
||||
|
@@ -1,25 +0,0 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
checker "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { checker.TestingT(t) }
|
@@ -18,10 +18,7 @@ package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
@@ -35,9 +32,7 @@ func BenchmarkCutOriginal(b *testing.B) {
|
||||
|
||||
func BenchmarkCutsetterFn(b *testing.B) {
|
||||
value := common.HexToHash("0x01")
|
||||
cutSetFn := func(r rune) bool {
|
||||
return int32(r) == int32(0)
|
||||
}
|
||||
cutSetFn := func(r rune) bool { return r == 0 }
|
||||
for i := 0; i < b.N; i++ {
|
||||
bytes.TrimLeftFunc(value[:], cutSetFn)
|
||||
}
|
||||
@@ -49,22 +44,3 @@ func BenchmarkCutCustomTrim(b *testing.B) {
|
||||
common.TrimLeftZeroes(value[:])
|
||||
}
|
||||
}
|
||||
|
||||
func xTestFuzzCutter(t *testing.T) {
|
||||
rand.Seed(time.Now().Unix())
|
||||
for {
|
||||
v := make([]byte, 20)
|
||||
zeroes := rand.Intn(21)
|
||||
rand.Read(v[zeroes:])
|
||||
exp := bytes.TrimLeft(v[:], "\x00")
|
||||
got := common.TrimLeftZeroes(v)
|
||||
if !bytes.Equal(exp, got) {
|
||||
|
||||
fmt.Printf("Input %x\n", v)
|
||||
fmt.Printf("Exp %x\n", exp)
|
||||
fmt.Printf("Got %x\n", got)
|
||||
t.Fatalf("Error")
|
||||
}
|
||||
//break
|
||||
}
|
||||
}
|
||||
|
@@ -25,19 +25,24 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
checker "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type StateSuite struct {
|
||||
var toAddr = common.BytesToAddress
|
||||
|
||||
type stateTest struct {
|
||||
db ethdb.Database
|
||||
state *StateDB
|
||||
}
|
||||
|
||||
var _ = checker.Suite(&StateSuite{})
|
||||
func newStateTest() *stateTest {
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
sdb, _ := New(common.Hash{}, NewDatabase(db))
|
||||
return &stateTest{db: db, state: sdb}
|
||||
}
|
||||
|
||||
var toAddr = common.BytesToAddress
|
||||
func TestDump(t *testing.T) {
|
||||
s := newStateTest()
|
||||
|
||||
func (s *StateSuite) TestDump(c *checker.C) {
|
||||
// generate a few entries
|
||||
obj1 := s.state.GetOrNewStateObject(toAddr([]byte{0x01}))
|
||||
obj1.AddBalance(big.NewInt(22))
|
||||
@@ -78,16 +83,12 @@ func (s *StateSuite) TestDump(c *checker.C) {
|
||||
}
|
||||
}`
|
||||
if got != want {
|
||||
c.Errorf("dump mismatch:\ngot: %s\nwant: %s\n", got, want)
|
||||
t.Errorf("dump mismatch:\ngot: %s\nwant: %s\n", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StateSuite) SetUpTest(c *checker.C) {
|
||||
s.db = rawdb.NewMemoryDatabase()
|
||||
s.state, _ = New(common.Hash{}, NewDatabase(s.db))
|
||||
}
|
||||
|
||||
func (s *StateSuite) TestNull(c *checker.C) {
|
||||
func TestNull(t *testing.T) {
|
||||
s := newStateTest()
|
||||
address := common.HexToAddress("0x823140710bf13990e4500136726d8b55")
|
||||
s.state.CreateAccount(address)
|
||||
//value := common.FromHex("0x823140710bf13990e4500136726d8b55")
|
||||
@@ -97,18 +98,19 @@ func (s *StateSuite) TestNull(c *checker.C) {
|
||||
s.state.Commit(false)
|
||||
|
||||
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
|
||||
c.Errorf("expected empty current value, got %x", value)
|
||||
t.Errorf("expected empty current value, got %x", value)
|
||||
}
|
||||
if value := s.state.GetCommittedState(address, common.Hash{}); value != (common.Hash{}) {
|
||||
c.Errorf("expected empty committed value, got %x", value)
|
||||
t.Errorf("expected empty committed value, got %x", value)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StateSuite) TestSnapshot(c *checker.C) {
|
||||
func TestSnapshot(t *testing.T) {
|
||||
stateobjaddr := toAddr([]byte("aa"))
|
||||
var storageaddr common.Hash
|
||||
data1 := common.BytesToHash([]byte{42})
|
||||
data2 := common.BytesToHash([]byte{43})
|
||||
s := newStateTest()
|
||||
|
||||
// snapshot the genesis state
|
||||
genesis := s.state.Snapshot()
|
||||
@@ -121,21 +123,28 @@ func (s *StateSuite) TestSnapshot(c *checker.C) {
|
||||
s.state.SetState(stateobjaddr, storageaddr, data2)
|
||||
s.state.RevertToSnapshot(snapshot)
|
||||
|
||||
c.Assert(s.state.GetState(stateobjaddr, storageaddr), checker.DeepEquals, data1)
|
||||
c.Assert(s.state.GetCommittedState(stateobjaddr, storageaddr), checker.DeepEquals, common.Hash{})
|
||||
if v := s.state.GetState(stateobjaddr, storageaddr); v != data1 {
|
||||
t.Errorf("wrong storage value %v, want %v", v, data1)
|
||||
}
|
||||
if v := s.state.GetCommittedState(stateobjaddr, storageaddr); v != (common.Hash{}) {
|
||||
t.Errorf("wrong committed storage value %v, want %v", v, common.Hash{})
|
||||
}
|
||||
|
||||
// revert up to the genesis state and ensure correct content
|
||||
s.state.RevertToSnapshot(genesis)
|
||||
c.Assert(s.state.GetState(stateobjaddr, storageaddr), checker.DeepEquals, common.Hash{})
|
||||
c.Assert(s.state.GetCommittedState(stateobjaddr, storageaddr), checker.DeepEquals, common.Hash{})
|
||||
if v := s.state.GetState(stateobjaddr, storageaddr); v != (common.Hash{}) {
|
||||
t.Errorf("wrong storage value %v, want %v", v, common.Hash{})
|
||||
}
|
||||
if v := s.state.GetCommittedState(stateobjaddr, storageaddr); v != (common.Hash{}) {
|
||||
t.Errorf("wrong committed storage value %v, want %v", v, common.Hash{})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StateSuite) TestSnapshotEmpty(c *checker.C) {
|
||||
func TestSnapshotEmpty(t *testing.T) {
|
||||
s := newStateTest()
|
||||
s.state.RevertToSnapshot(s.state.Snapshot())
|
||||
}
|
||||
|
||||
// use testing instead of checker because checker does not support
|
||||
// printing/logging in tests (-check.vv does not work)
|
||||
func TestSnapshot2(t *testing.T) {
|
||||
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
|
||||
|
||||
|
@@ -124,115 +124,115 @@ func New(root common.Hash, db Database) (*StateDB, error) {
|
||||
}
|
||||
|
||||
// setError remembers the first non-nil error it is called with.
|
||||
func (self *StateDB) setError(err error) {
|
||||
if self.dbErr == nil {
|
||||
self.dbErr = err
|
||||
func (s *StateDB) setError(err error) {
|
||||
if s.dbErr == nil {
|
||||
s.dbErr = err
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateDB) Error() error {
|
||||
return self.dbErr
|
||||
func (s *StateDB) Error() error {
|
||||
return s.dbErr
|
||||
}
|
||||
|
||||
// Reset clears out all ephemeral state objects from the state db, but keeps
|
||||
// the underlying state trie to avoid reloading data for the next operations.
|
||||
func (self *StateDB) Reset(root common.Hash) error {
|
||||
tr, err := self.db.OpenTrie(root)
|
||||
func (s *StateDB) Reset(root common.Hash) error {
|
||||
tr, err := s.db.OpenTrie(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.trie = tr
|
||||
self.stateObjects = make(map[common.Address]*stateObject)
|
||||
self.stateObjectsPending = make(map[common.Address]struct{})
|
||||
self.stateObjectsDirty = make(map[common.Address]struct{})
|
||||
self.thash = common.Hash{}
|
||||
self.bhash = common.Hash{}
|
||||
self.txIndex = 0
|
||||
self.logs = make(map[common.Hash][]*types.Log)
|
||||
self.logSize = 0
|
||||
self.preimages = make(map[common.Hash][]byte)
|
||||
self.clearJournalAndRefund()
|
||||
s.trie = tr
|
||||
s.stateObjects = make(map[common.Address]*stateObject)
|
||||
s.stateObjectsPending = make(map[common.Address]struct{})
|
||||
s.stateObjectsDirty = make(map[common.Address]struct{})
|
||||
s.thash = common.Hash{}
|
||||
s.bhash = common.Hash{}
|
||||
s.txIndex = 0
|
||||
s.logs = make(map[common.Hash][]*types.Log)
|
||||
s.logSize = 0
|
||||
s.preimages = make(map[common.Hash][]byte)
|
||||
s.clearJournalAndRefund()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *StateDB) AddLog(log *types.Log) {
|
||||
self.journal.append(addLogChange{txhash: self.thash})
|
||||
func (s *StateDB) AddLog(log *types.Log) {
|
||||
s.journal.append(addLogChange{txhash: s.thash})
|
||||
|
||||
log.TxHash = self.thash
|
||||
log.BlockHash = self.bhash
|
||||
log.TxIndex = uint(self.txIndex)
|
||||
log.Index = self.logSize
|
||||
self.logs[self.thash] = append(self.logs[self.thash], log)
|
||||
self.logSize++
|
||||
log.TxHash = s.thash
|
||||
log.BlockHash = s.bhash
|
||||
log.TxIndex = uint(s.txIndex)
|
||||
log.Index = s.logSize
|
||||
s.logs[s.thash] = append(s.logs[s.thash], log)
|
||||
s.logSize++
|
||||
}
|
||||
|
||||
func (self *StateDB) GetLogs(hash common.Hash) []*types.Log {
|
||||
return self.logs[hash]
|
||||
func (s *StateDB) GetLogs(hash common.Hash) []*types.Log {
|
||||
return s.logs[hash]
|
||||
}
|
||||
|
||||
func (self *StateDB) Logs() []*types.Log {
|
||||
func (s *StateDB) Logs() []*types.Log {
|
||||
var logs []*types.Log
|
||||
for _, lgs := range self.logs {
|
||||
for _, lgs := range s.logs {
|
||||
logs = append(logs, lgs...)
|
||||
}
|
||||
return logs
|
||||
}
|
||||
|
||||
// AddPreimage records a SHA3 preimage seen by the VM.
|
||||
func (self *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
|
||||
if _, ok := self.preimages[hash]; !ok {
|
||||
self.journal.append(addPreimageChange{hash: hash})
|
||||
func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
|
||||
if _, ok := s.preimages[hash]; !ok {
|
||||
s.journal.append(addPreimageChange{hash: hash})
|
||||
pi := make([]byte, len(preimage))
|
||||
copy(pi, preimage)
|
||||
self.preimages[hash] = pi
|
||||
s.preimages[hash] = pi
|
||||
}
|
||||
}
|
||||
|
||||
// Preimages returns a list of SHA3 preimages that have been submitted.
|
||||
func (self *StateDB) Preimages() map[common.Hash][]byte {
|
||||
return self.preimages
|
||||
func (s *StateDB) Preimages() map[common.Hash][]byte {
|
||||
return s.preimages
|
||||
}
|
||||
|
||||
// AddRefund adds gas to the refund counter
|
||||
func (self *StateDB) AddRefund(gas uint64) {
|
||||
self.journal.append(refundChange{prev: self.refund})
|
||||
self.refund += gas
|
||||
func (s *StateDB) AddRefund(gas uint64) {
|
||||
s.journal.append(refundChange{prev: s.refund})
|
||||
s.refund += gas
|
||||
}
|
||||
|
||||
// SubRefund removes gas from the refund counter.
|
||||
// This method will panic if the refund counter goes below zero
|
||||
func (self *StateDB) SubRefund(gas uint64) {
|
||||
self.journal.append(refundChange{prev: self.refund})
|
||||
if gas > self.refund {
|
||||
func (s *StateDB) SubRefund(gas uint64) {
|
||||
s.journal.append(refundChange{prev: s.refund})
|
||||
if gas > s.refund {
|
||||
panic("Refund counter below zero")
|
||||
}
|
||||
self.refund -= gas
|
||||
s.refund -= gas
|
||||
}
|
||||
|
||||
// Exist reports whether the given account address exists in the state.
|
||||
// Notably this also returns true for suicided accounts.
|
||||
func (self *StateDB) Exist(addr common.Address) bool {
|
||||
return self.getStateObject(addr) != nil
|
||||
func (s *StateDB) Exist(addr common.Address) bool {
|
||||
return s.getStateObject(addr) != nil
|
||||
}
|
||||
|
||||
// Empty returns whether the state object is either non-existent
|
||||
// or empty according to the EIP161 specification (balance = nonce = code = 0)
|
||||
func (self *StateDB) Empty(addr common.Address) bool {
|
||||
so := self.getStateObject(addr)
|
||||
func (s *StateDB) Empty(addr common.Address) bool {
|
||||
so := s.getStateObject(addr)
|
||||
return so == nil || so.empty()
|
||||
}
|
||||
|
||||
// Retrieve the balance from the given address or 0 if object not found
|
||||
func (self *StateDB) GetBalance(addr common.Address) *big.Int {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) GetBalance(addr common.Address) *big.Int {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
return stateObject.Balance()
|
||||
}
|
||||
return common.Big0
|
||||
}
|
||||
|
||||
func (self *StateDB) GetNonce(addr common.Address) uint64 {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) GetNonce(addr common.Address) uint64 {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
return stateObject.Nonce()
|
||||
}
|
||||
@@ -241,40 +241,40 @@ func (self *StateDB) GetNonce(addr common.Address) uint64 {
|
||||
}
|
||||
|
||||
// TxIndex returns the current transaction index set by Prepare.
|
||||
func (self *StateDB) TxIndex() int {
|
||||
return self.txIndex
|
||||
func (s *StateDB) TxIndex() int {
|
||||
return s.txIndex
|
||||
}
|
||||
|
||||
// BlockHash returns the current block hash set by Prepare.
|
||||
func (self *StateDB) BlockHash() common.Hash {
|
||||
return self.bhash
|
||||
func (s *StateDB) BlockHash() common.Hash {
|
||||
return s.bhash
|
||||
}
|
||||
|
||||
func (self *StateDB) GetCode(addr common.Address) []byte {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) GetCode(addr common.Address) []byte {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
return stateObject.Code(self.db)
|
||||
return stateObject.Code(s.db)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *StateDB) GetCodeSize(addr common.Address) int {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) GetCodeSize(addr common.Address) int {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject == nil {
|
||||
return 0
|
||||
}
|
||||
if stateObject.code != nil {
|
||||
return len(stateObject.code)
|
||||
}
|
||||
size, err := self.db.ContractCodeSize(stateObject.addrHash, common.BytesToHash(stateObject.CodeHash()))
|
||||
size, err := s.db.ContractCodeSize(stateObject.addrHash, common.BytesToHash(stateObject.CodeHash()))
|
||||
if err != nil {
|
||||
self.setError(err)
|
||||
s.setError(err)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (self *StateDB) GetCodeHash(addr common.Address) common.Hash {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject == nil {
|
||||
return common.Hash{}
|
||||
}
|
||||
@@ -282,25 +282,25 @@ func (self *StateDB) GetCodeHash(addr common.Address) common.Hash {
|
||||
}
|
||||
|
||||
// GetState retrieves a value from the given account's storage trie.
|
||||
func (self *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
return stateObject.GetState(self.db, hash)
|
||||
return stateObject.GetState(s.db, hash)
|
||||
}
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
// GetProof returns the MerkleProof for a given Account
|
||||
func (self *StateDB) GetProof(a common.Address) ([][]byte, error) {
|
||||
func (s *StateDB) GetProof(a common.Address) ([][]byte, error) {
|
||||
var proof proofList
|
||||
err := self.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof)
|
||||
err := s.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof)
|
||||
return [][]byte(proof), err
|
||||
}
|
||||
|
||||
// GetProof returns the StorageProof for given key
|
||||
func (self *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
|
||||
func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
|
||||
var proof proofList
|
||||
trie := self.StorageTrie(a)
|
||||
trie := s.StorageTrie(a)
|
||||
if trie == nil {
|
||||
return proof, errors.New("storage trie for requested address does not exist")
|
||||
}
|
||||
@@ -309,32 +309,32 @@ func (self *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byt
|
||||
}
|
||||
|
||||
// GetCommittedState retrieves a value from the given account's committed storage trie.
|
||||
func (self *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
return stateObject.GetCommittedState(self.db, hash)
|
||||
return stateObject.GetCommittedState(s.db, hash)
|
||||
}
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
// Database retrieves the low level database supporting the lower level trie ops.
|
||||
func (self *StateDB) Database() Database {
|
||||
return self.db
|
||||
func (s *StateDB) Database() Database {
|
||||
return s.db
|
||||
}
|
||||
|
||||
// StorageTrie returns the storage trie of an account.
|
||||
// The return value is a copy and is nil for non-existent accounts.
|
||||
func (self *StateDB) StorageTrie(addr common.Address) Trie {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) StorageTrie(addr common.Address) Trie {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject == nil {
|
||||
return nil
|
||||
}
|
||||
cpy := stateObject.deepCopy(self)
|
||||
return cpy.updateTrie(self.db)
|
||||
cpy := stateObject.deepCopy(s)
|
||||
return cpy.updateTrie(s.db)
|
||||
}
|
||||
|
||||
func (self *StateDB) HasSuicided(addr common.Address) bool {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) HasSuicided(addr common.Address) bool {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
return stateObject.suicided
|
||||
}
|
||||
@@ -346,53 +346,53 @@ func (self *StateDB) HasSuicided(addr common.Address) bool {
|
||||
*/
|
||||
|
||||
// AddBalance adds amount to the account associated with addr.
|
||||
func (self *StateDB) AddBalance(addr common.Address, amount *big.Int) {
|
||||
stateObject := self.GetOrNewStateObject(addr)
|
||||
func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) {
|
||||
stateObject := s.GetOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.AddBalance(amount)
|
||||
}
|
||||
}
|
||||
|
||||
// SubBalance subtracts amount from the account associated with addr.
|
||||
func (self *StateDB) SubBalance(addr common.Address, amount *big.Int) {
|
||||
stateObject := self.GetOrNewStateObject(addr)
|
||||
func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) {
|
||||
stateObject := s.GetOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SubBalance(amount)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateDB) SetBalance(addr common.Address, amount *big.Int) {
|
||||
stateObject := self.GetOrNewStateObject(addr)
|
||||
func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) {
|
||||
stateObject := s.GetOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SetBalance(amount)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateDB) SetNonce(addr common.Address, nonce uint64) {
|
||||
stateObject := self.GetOrNewStateObject(addr)
|
||||
func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
|
||||
stateObject := s.GetOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SetNonce(nonce)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateDB) SetCode(addr common.Address, code []byte) {
|
||||
stateObject := self.GetOrNewStateObject(addr)
|
||||
func (s *StateDB) SetCode(addr common.Address, code []byte) {
|
||||
stateObject := s.GetOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SetCode(crypto.Keccak256Hash(code), code)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateDB) SetState(addr common.Address, key, value common.Hash) {
|
||||
stateObject := self.GetOrNewStateObject(addr)
|
||||
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
|
||||
stateObject := s.GetOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SetState(self.db, key, value)
|
||||
stateObject.SetState(s.db, key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// SetStorage replaces the entire storage for the specified account with given
|
||||
// storage. This function should only be used for debugging.
|
||||
func (self *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
|
||||
stateObject := self.GetOrNewStateObject(addr)
|
||||
func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
|
||||
stateObject := s.GetOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SetStorage(storage)
|
||||
}
|
||||
@@ -403,12 +403,12 @@ func (self *StateDB) SetStorage(addr common.Address, storage map[common.Hash]com
|
||||
//
|
||||
// The account's state object is still available until the state is committed,
|
||||
// getStateObject will return a non-nil account after Suicide.
|
||||
func (self *StateDB) Suicide(addr common.Address) bool {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) Suicide(addr common.Address) bool {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject == nil {
|
||||
return false
|
||||
}
|
||||
self.journal.append(suicideChange{
|
||||
s.journal.append(suicideChange{
|
||||
account: &addr,
|
||||
prev: stateObject.suicided,
|
||||
prevbalance: new(big.Int).Set(stateObject.Balance()),
|
||||
@@ -462,7 +462,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
|
||||
|
||||
// getDeletedStateObject is similar to getStateObject, but instead of returning
|
||||
// nil for a deleted state object, it returns the actual object with the deleted
|
||||
// flag set. This is needed by the state journal to revert to the correct self-
|
||||
// flag set. This is needed by the state journal to revert to the correct s-
|
||||
// destructed object instead of wiping all knowledge about the state object.
|
||||
func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
||||
// Prefer live objects if any is available
|
||||
@@ -490,32 +490,32 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
||||
return obj
|
||||
}
|
||||
|
||||
func (self *StateDB) setStateObject(object *stateObject) {
|
||||
self.stateObjects[object.Address()] = object
|
||||
func (s *StateDB) setStateObject(object *stateObject) {
|
||||
s.stateObjects[object.Address()] = object
|
||||
}
|
||||
|
||||
// Retrieve a state object or create a new state object if nil.
|
||||
func (self *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
|
||||
stateObject := self.getStateObject(addr)
|
||||
func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject == nil {
|
||||
stateObject, _ = self.createObject(addr)
|
||||
stateObject, _ = s.createObject(addr)
|
||||
}
|
||||
return stateObject
|
||||
}
|
||||
|
||||
// createObject creates a new state object. If there is an existing account with
|
||||
// the given address, it is overwritten and returned as the second return value.
|
||||
func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
|
||||
prev = self.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
|
||||
func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
|
||||
prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
|
||||
|
||||
newobj = newObject(self, addr, Account{})
|
||||
newobj = newObject(s, addr, Account{})
|
||||
newobj.setNonce(0) // sets the object to dirty
|
||||
if prev == nil {
|
||||
self.journal.append(createObjectChange{account: &addr})
|
||||
s.journal.append(createObjectChange{account: &addr})
|
||||
} else {
|
||||
self.journal.append(resetObjectChange{prev: prev})
|
||||
s.journal.append(resetObjectChange{prev: prev})
|
||||
}
|
||||
self.setStateObject(newobj)
|
||||
s.setStateObject(newobj)
|
||||
return newobj, prev
|
||||
}
|
||||
|
||||
@@ -529,8 +529,8 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
|
||||
// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
|
||||
//
|
||||
// Carrying over the balance ensures that Ether doesn't disappear.
|
||||
func (self *StateDB) CreateAccount(addr common.Address) {
|
||||
newObj, prev := self.createObject(addr)
|
||||
func (s *StateDB) CreateAccount(addr common.Address) {
|
||||
newObj, prev := s.createObject(addr)
|
||||
if prev != nil {
|
||||
newObj.setBalance(prev.data.Balance)
|
||||
}
|
||||
@@ -567,27 +567,27 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common
|
||||
|
||||
// Copy creates a deep, independent copy of the state.
|
||||
// Snapshots of the copied state cannot be applied to the copy.
|
||||
func (self *StateDB) Copy() *StateDB {
|
||||
func (s *StateDB) Copy() *StateDB {
|
||||
// Copy all the basic fields, initialize the memory ones
|
||||
state := &StateDB{
|
||||
db: self.db,
|
||||
trie: self.db.CopyTrie(self.trie),
|
||||
stateObjects: make(map[common.Address]*stateObject, len(self.journal.dirties)),
|
||||
stateObjectsPending: make(map[common.Address]struct{}, len(self.stateObjectsPending)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(self.journal.dirties)),
|
||||
refund: self.refund,
|
||||
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
|
||||
logSize: self.logSize,
|
||||
preimages: make(map[common.Hash][]byte, len(self.preimages)),
|
||||
db: s.db,
|
||||
trie: s.db.CopyTrie(s.trie),
|
||||
stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
|
||||
stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
|
||||
refund: s.refund,
|
||||
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
|
||||
logSize: s.logSize,
|
||||
preimages: make(map[common.Hash][]byte, len(s.preimages)),
|
||||
journal: newJournal(),
|
||||
}
|
||||
// Copy the dirty states, logs, and preimages
|
||||
for addr := range self.journal.dirties {
|
||||
for addr := range s.journal.dirties {
|
||||
// As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527),
|
||||
// and in the Finalise-method, there is a case where an object is in the journal but not
|
||||
// in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for
|
||||
// nil
|
||||
if object, exist := self.stateObjects[addr]; exist {
|
||||
if object, exist := s.stateObjects[addr]; exist {
|
||||
// Even though the original object is dirty, we are not copying the journal,
|
||||
// so we need to make sure that anyside effect the journal would have caused
|
||||
// during a commit (or similar op) is already applied to the copy.
|
||||
@@ -600,19 +600,19 @@ func (self *StateDB) Copy() *StateDB {
|
||||
// Above, we don't copy the actual journal. This means that if the copy is copied, the
|
||||
// loop above will be a no-op, since the copy's journal is empty.
|
||||
// Thus, here we iterate over stateObjects, to enable copies of copies
|
||||
for addr := range self.stateObjectsPending {
|
||||
for addr := range s.stateObjectsPending {
|
||||
if _, exist := state.stateObjects[addr]; !exist {
|
||||
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state)
|
||||
state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state)
|
||||
}
|
||||
state.stateObjectsPending[addr] = struct{}{}
|
||||
}
|
||||
for addr := range self.stateObjectsDirty {
|
||||
for addr := range s.stateObjectsDirty {
|
||||
if _, exist := state.stateObjects[addr]; !exist {
|
||||
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state)
|
||||
state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state)
|
||||
}
|
||||
state.stateObjectsDirty[addr] = struct{}{}
|
||||
}
|
||||
for hash, logs := range self.logs {
|
||||
for hash, logs := range s.logs {
|
||||
cpy := make([]*types.Log, len(logs))
|
||||
for i, l := range logs {
|
||||
cpy[i] = new(types.Log)
|
||||
@@ -620,42 +620,42 @@ func (self *StateDB) Copy() *StateDB {
|
||||
}
|
||||
state.logs[hash] = cpy
|
||||
}
|
||||
for hash, preimage := range self.preimages {
|
||||
for hash, preimage := range s.preimages {
|
||||
state.preimages[hash] = preimage
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// Snapshot returns an identifier for the current revision of the state.
|
||||
func (self *StateDB) Snapshot() int {
|
||||
id := self.nextRevisionId
|
||||
self.nextRevisionId++
|
||||
self.validRevisions = append(self.validRevisions, revision{id, self.journal.length()})
|
||||
func (s *StateDB) Snapshot() int {
|
||||
id := s.nextRevisionId
|
||||
s.nextRevisionId++
|
||||
s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()})
|
||||
return id
|
||||
}
|
||||
|
||||
// RevertToSnapshot reverts all state changes made since the given revision.
|
||||
func (self *StateDB) RevertToSnapshot(revid int) {
|
||||
func (s *StateDB) RevertToSnapshot(revid int) {
|
||||
// Find the snapshot in the stack of valid snapshots.
|
||||
idx := sort.Search(len(self.validRevisions), func(i int) bool {
|
||||
return self.validRevisions[i].id >= revid
|
||||
idx := sort.Search(len(s.validRevisions), func(i int) bool {
|
||||
return s.validRevisions[i].id >= revid
|
||||
})
|
||||
if idx == len(self.validRevisions) || self.validRevisions[idx].id != revid {
|
||||
if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid {
|
||||
panic(fmt.Errorf("revision id %v cannot be reverted", revid))
|
||||
}
|
||||
snapshot := self.validRevisions[idx].journalIndex
|
||||
snapshot := s.validRevisions[idx].journalIndex
|
||||
|
||||
// Replay the journal to undo changes and remove invalidated snapshots
|
||||
self.journal.revert(self, snapshot)
|
||||
self.validRevisions = self.validRevisions[:idx]
|
||||
s.journal.revert(s, snapshot)
|
||||
s.validRevisions = s.validRevisions[:idx]
|
||||
}
|
||||
|
||||
// GetRefund returns the current value of the refund counter.
|
||||
func (self *StateDB) GetRefund() uint64 {
|
||||
return self.refund
|
||||
func (s *StateDB) GetRefund() uint64 {
|
||||
return s.refund
|
||||
}
|
||||
|
||||
// Finalise finalises the state by removing the self destructed objects and clears
|
||||
// Finalise finalises the state by removing the s destructed objects and clears
|
||||
// the journal as well as the refunds. Finalise, however, will not push any updates
|
||||
// into the tries just yet. Only IntermediateRoot or Commit will do that.
|
||||
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
||||
@@ -710,10 +710,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
||||
|
||||
// Prepare sets the current transaction hash and index and block hash which is
|
||||
// used when the EVM emits new state logs.
|
||||
func (self *StateDB) Prepare(thash, bhash common.Hash, ti int) {
|
||||
self.thash = thash
|
||||
self.bhash = bhash
|
||||
self.txIndex = ti
|
||||
func (s *StateDB) Prepare(thash, bhash common.Hash, ti int) {
|
||||
s.thash = thash
|
||||
s.bhash = bhash
|
||||
s.txIndex = ti
|
||||
}
|
||||
|
||||
func (s *StateDB) clearJournalAndRefund() {
|
||||
|
@@ -29,8 +29,6 @@ import (
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@@ -458,7 +456,8 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StateSuite) TestTouchDelete(c *check.C) {
|
||||
func TestTouchDelete(t *testing.T) {
|
||||
s := newStateTest()
|
||||
s.state.GetOrNewStateObject(common.Address{})
|
||||
root, _ := s.state.Commit(false)
|
||||
s.state.Reset(root)
|
||||
@@ -467,11 +466,11 @@ func (s *StateSuite) TestTouchDelete(c *check.C) {
|
||||
s.state.AddBalance(common.Address{}, new(big.Int))
|
||||
|
||||
if len(s.state.journal.dirties) != 1 {
|
||||
c.Fatal("expected one dirty state object")
|
||||
t.Fatal("expected one dirty state object")
|
||||
}
|
||||
s.state.RevertToSnapshot(snapshot)
|
||||
if len(s.state.journal.dirties) != 0 {
|
||||
c.Fatal("expected no dirty state object")
|
||||
t.Fatal("expected no dirty state object")
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -136,7 +136,7 @@ func TestEmptyStateSync(t *testing.T) {
|
||||
func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) }
|
||||
func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) }
|
||||
|
||||
func testIterativeStateSync(t *testing.T, batch int) {
|
||||
func testIterativeStateSync(t *testing.T, count int) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
@@ -144,7 +144,7 @@ func testIterativeStateSync(t *testing.T, batch int) {
|
||||
dstDb := rawdb.NewMemoryDatabase()
|
||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
|
||||
|
||||
queue := append([]common.Hash{}, sched.Missing(batch)...)
|
||||
queue := append([]common.Hash{}, sched.Missing(count)...)
|
||||
for len(queue) > 0 {
|
||||
results := make([]trie.SyncResult, len(queue))
|
||||
for i, hash := range queue {
|
||||
@@ -157,10 +157,12 @@ func testIterativeStateSync(t *testing.T, batch int) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
queue = append(queue[:0], sched.Missing(batch)...)
|
||||
batch.Write()
|
||||
queue = append(queue[:0], sched.Missing(count)...)
|
||||
}
|
||||
// Cross check that the two states are in sync
|
||||
checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
|
||||
@@ -190,9 +192,11 @@ func TestIterativeDelayedStateSync(t *testing.T) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
batch.Write()
|
||||
queue = append(queue[len(results):], sched.Missing(0)...)
|
||||
}
|
||||
// Cross check that the two states are in sync
|
||||
@@ -205,7 +209,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
|
||||
func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
|
||||
func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) }
|
||||
|
||||
func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
func testIterativeRandomStateSync(t *testing.T, count int) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
@@ -214,7 +218,7 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
|
||||
|
||||
queue := make(map[common.Hash]struct{})
|
||||
for _, hash := range sched.Missing(batch) {
|
||||
for _, hash := range sched.Missing(count) {
|
||||
queue[hash] = struct{}{}
|
||||
}
|
||||
for len(queue) > 0 {
|
||||
@@ -231,11 +235,13 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
batch.Write()
|
||||
queue = make(map[common.Hash]struct{})
|
||||
for _, hash := range sched.Missing(batch) {
|
||||
for _, hash := range sched.Missing(count) {
|
||||
queue[hash] = struct{}{}
|
||||
}
|
||||
}
|
||||
@@ -277,9 +283,11 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
batch.Write()
|
||||
for _, hash := range sched.Missing(0) {
|
||||
queue[hash] = struct{}{}
|
||||
}
|
||||
@@ -316,9 +324,11 @@ func TestIncompleteStateSync(t *testing.T) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
batch.Write()
|
||||
for _, result := range results {
|
||||
added = append(added, result.Hash)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user