Compare commits
202 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
8c2f271528 | ||
|
524aaf5ec6 | ||
|
4eb01b21c8 | ||
|
bdc7554918 | ||
|
1fed223483 | ||
|
1e10489196 | ||
|
2a9ea6be87 | ||
|
7a5a822905 | ||
|
5c6155f9f4 | ||
|
348c3bc47d | ||
|
94d1f5888a | ||
|
c37e68e7c1 | ||
|
32341f88e3 | ||
|
66c3eb2f1a | ||
|
86dd005544 | ||
|
706f5e3b98 | ||
|
19a1c95046 | ||
|
905ed109ed | ||
|
43cd31ea9f | ||
|
5e86e4ed29 | ||
|
6d29e192e9 | ||
|
015e78928a | ||
|
716864deba | ||
|
e43d827a19 | ||
|
eb87121300 | ||
|
2b2fd74158 | ||
|
d9890a6a8f | ||
|
a15d71a255 | ||
|
9d1e2027a0 | ||
|
053ed9cc84 | ||
|
dad26582b6 | ||
|
6c8310ebb4 | ||
|
4ee11b072e | ||
|
901471f733 | ||
|
666092936c | ||
|
b007df89dd | ||
|
a04294d160 | ||
|
eebfb13053 | ||
|
0ddd4612b7 | ||
|
a90e645ccd | ||
|
420b78659b | ||
|
c9959145a9 | ||
|
c71a7e26a8 | ||
|
7ddb44b80e | ||
|
b5d362b2bf | ||
|
fdd42d425b | ||
|
39f8268147 | ||
|
a25899f3dc | ||
|
c1544423d6 | ||
|
e5defccd58 | ||
|
0921f8a74f | ||
|
25b16085da | ||
|
e1365b2464 | ||
|
fdb742419e | ||
|
129cf075e9 | ||
|
2c097bb7a2 | ||
|
9a39c6bcb1 | ||
|
f354c622ca | ||
|
2482ba016e | ||
|
fb835c024c | ||
|
07751c3d26 | ||
|
faba018b29 | ||
|
89884dc353 | ||
|
93f047023f | ||
|
8696dd39cb | ||
|
cf2a77af28 | ||
|
0185ee0993 | ||
|
4764b2f0be | ||
|
b65c384181 | ||
|
4996fce25a | ||
|
f7112cc182 | ||
|
71c37d82ad | ||
|
4eb9296910 | ||
|
a99ac5335c | ||
|
4e2641319b | ||
|
df219e23df | ||
|
7cf56d6f06 | ||
|
d7f02b448a | ||
|
1167639524 | ||
|
4ea9737de6 | ||
|
a3cd8a040a | ||
|
328901c24c | ||
|
3a98c6f6e6 | ||
|
d81c9d9b76 | ||
|
367f12f734 | ||
|
8d35b1eb2b | ||
|
0287d54847 | ||
|
24562d9b0c | ||
|
dc681fc1f6 | ||
|
86bcbb0d79 | ||
|
066c75531d | ||
|
8327d1fdfc | ||
|
d54f2f2e5e | ||
|
c5d28f0b27 | ||
|
de971cc845 | ||
|
f86324edb7 | ||
|
eeaf191633 | ||
|
3010f9fc75 | ||
|
d90bbce954 | ||
|
5cdb476dd1 | ||
|
ff23e265cd | ||
|
12d8570322 | ||
|
5883afb3ef | ||
|
05280a7ae3 | ||
|
d97e0063d5 | ||
|
856307d8bb | ||
|
16d7eae1c8 | ||
|
d8da0b3d81 | ||
|
92b12ee6c6 | ||
|
fc20680b95 | ||
|
979fc96899 | ||
|
63a9d4b2ae | ||
|
ce5f94920d | ||
|
341f451083 | ||
|
d13b8e5570 | ||
|
5655dce3b8 | ||
|
7b5107b73f | ||
|
bdde616f23 | ||
|
3ee91b9f2e | ||
|
0f4e7c9b0d | ||
|
1b5a867eec | ||
|
87c0ba9213 | ||
|
b68929caee | ||
|
9f7b79af00 | ||
|
4e54b1a45e | ||
|
a70a79b285 | ||
|
15fdaf2005 | ||
|
8cbdc8638f | ||
|
0bdd295cc0 | ||
|
7ebc6c43ff | ||
|
560d44479c | ||
|
32b078d418 | ||
|
2ff464b29d | ||
|
f3bafecef7 | ||
|
54add42550 | ||
|
04926db204 | ||
|
3e0641923d | ||
|
74925e547f | ||
|
7afdf792ab | ||
|
c28fd9c079 | ||
|
4baa574410 | ||
|
9f45d6efae | ||
|
cbbc54c495 | ||
|
7cee2509c0 | ||
|
48b484c5ac | ||
|
06125bff89 | ||
|
9fea1a5cf5 | ||
|
e401f5ff10 | ||
|
6a53ce29a4 | ||
|
8f24097836 | ||
|
4b9c0ea76d | ||
|
3bb8a4ed3f | ||
|
983cb25a07 | ||
|
68754f3931 | ||
|
5d4512b113 | ||
|
d21303f9dd | ||
|
4fde0cabc1 | ||
|
4a04127ce3 | ||
|
2de37f28e0 | ||
|
5a88a7cf5b | ||
|
1d25039ff5 | ||
|
8ead45c20b | ||
|
82a9e11058 | ||
|
b35e4fce99 | ||
|
e24e05dd01 | ||
|
90dedea40f | ||
|
c0c01612e9 | ||
|
b2b14e6ce3 | ||
|
290d6bd903 | ||
|
9c2ac6fbd5 | ||
|
a00dc5095b | ||
|
ff90894636 | ||
|
9e04c5ec83 | ||
|
abf2d7d74f | ||
|
1976bb3df0 | ||
|
350a0490ab | ||
|
37564ceda6 | ||
|
28c5a8a54b | ||
|
298a19bbc6 | ||
|
c47052a580 | ||
|
93da0cf8a1 | ||
|
79ce5537ab | ||
|
8e7bee9b56 | ||
|
f538259187 | ||
|
b1be979443 | ||
|
e997f92caf | ||
|
56434bfa89 | ||
|
6793ffa12b | ||
|
5413df1dfa | ||
|
c374447401 | ||
|
105922180f | ||
|
3a57eecc69 | ||
|
997b55236e | ||
|
4c268e65a0 | ||
|
0b53e485d8 | ||
|
9e22e912e3 | ||
|
123864fc05 | ||
|
7163a6664e | ||
|
4366c45e4e | ||
|
3a52c4dcf2 | ||
|
722b742780 | ||
|
508891e64b |
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -20,4 +20,3 @@ p2p/simulations @fjl
|
||||
p2p/protocols @fjl
|
||||
p2p/testing @fjl
|
||||
signer/ @holiman
|
||||
whisper/ @gballet
|
||||
|
32
.travis.yml
32
.travis.yml
@@ -16,7 +16,7 @@ jobs:
|
||||
- stage: lint
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
@@ -34,12 +34,22 @@ jobs:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- stage: build
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.14.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
# These are the latest Go versions.
|
||||
- stage: build
|
||||
os: linux
|
||||
arch: amd64
|
||||
dist: xenial
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
@@ -51,7 +61,7 @@ jobs:
|
||||
os: linux
|
||||
arch: arm64
|
||||
dist: xenial
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
@@ -61,7 +71,7 @@ jobs:
|
||||
- stage: build
|
||||
os: osx
|
||||
osx_image: xcode11.3
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
@@ -82,7 +92,7 @@ jobs:
|
||||
if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- GO111MODULE=on
|
||||
@@ -99,7 +109,7 @@ jobs:
|
||||
- python-paramiko
|
||||
script:
|
||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||
- go run build/ci.go debsrc -goversion 1.14.2 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
- go run build/ci.go debsrc -goversion 1.15 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- stage: build
|
||||
@@ -107,7 +117,7 @@ jobs:
|
||||
os: linux
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- azure-linux
|
||||
- GO111MODULE=on
|
||||
@@ -144,7 +154,7 @@ jobs:
|
||||
dist: xenial
|
||||
services:
|
||||
- docker
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
- GO111MODULE=on
|
||||
@@ -192,7 +202,7 @@ jobs:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://dl.google.com/go/go1.14.2.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://dl.google.com/go/go1.15.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
@@ -210,7 +220,7 @@ jobs:
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: osx
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
@@ -242,7 +252,7 @@ jobs:
|
||||
if: type = cron
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
env:
|
||||
- azure-purge
|
||||
- GO111MODULE=on
|
||||
|
59
COPYING
59
COPYING
@@ -1,7 +1,7 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2014 The go-ethereum Authors.
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
@@ -616,4 +616,59 @@ above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.14-alpine as builder
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
||||
|
||||
@@ -12,5 +12,5 @@ FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 8547 30303 30303/udp
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
ENTRYPOINT ["geth"]
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.14-alpine as builder
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
||||
|
||||
@@ -12,4 +12,4 @@ FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 8547 30303 30303/udp
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
|
4
Makefile
4
Makefile
@@ -24,7 +24,9 @@ android:
|
||||
$(GORUN) build/ci.go aar --local
|
||||
@echo "Done building."
|
||||
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
|
||||
|
||||
@echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs"
|
||||
@echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc"
|
||||
|
||||
ios:
|
||||
$(GORUN) build/ci.go xcode --local
|
||||
@echo "Done building."
|
||||
|
@@ -80,39 +80,59 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
return append(method.ID, arguments...), nil
|
||||
}
|
||||
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) {
|
||||
func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
var args Arguments
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
if len(data)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data)
|
||||
return nil, fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data)
|
||||
}
|
||||
return method.Outputs.Unpack(v, data)
|
||||
args = method.Outputs
|
||||
}
|
||||
if event, ok := abi.Events[name]; ok {
|
||||
return event.Inputs.Unpack(v, data)
|
||||
args = event.Inputs
|
||||
}
|
||||
return fmt.Errorf("abi: could not locate named method or event")
|
||||
if args == nil {
|
||||
return nil, errors.New("abi: could not locate named method or event")
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// UnpackIntoMap unpacks a log into the provided map[string]interface{}
|
||||
// Unpack unpacks the output according to the abi specification.
|
||||
func (abi ABI) Unpack(name string, data []byte) ([]interface{}, error) {
|
||||
args, err := abi.getArguments(name, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return args.Unpack(data)
|
||||
}
|
||||
|
||||
// UnpackIntoInterface unpacks the output in v according to the abi specification.
|
||||
// It performs an additional copy. Please only use, if you want to unpack into a
|
||||
// structure that does not strictly conform to the abi structure (e.g. has additional arguments)
|
||||
func (abi ABI) UnpackIntoInterface(v interface{}, name string, data []byte) error {
|
||||
args, err := abi.getArguments(name, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
unpacked, err := args.Unpack(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return args.Copy(v, unpacked)
|
||||
}
|
||||
|
||||
// UnpackIntoMap unpacks a log into the provided map[string]interface{}.
|
||||
func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) {
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
if len(data)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output")
|
||||
}
|
||||
return method.Outputs.UnpackIntoMap(v, data)
|
||||
args, err := abi.getArguments(name, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if event, ok := abi.Events[name]; ok {
|
||||
return event.Inputs.UnpackIntoMap(v, data)
|
||||
}
|
||||
return fmt.Errorf("abi: could not locate named method or event")
|
||||
return args.UnpackIntoMap(v, data)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
// UnmarshalJSON implements json.Unmarshaler interface.
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
@@ -201,8 +221,8 @@ func (abi *ABI) overloadedEventName(rawName string) string {
|
||||
return name
|
||||
}
|
||||
|
||||
// MethodById looks up a method by the 4-byte id
|
||||
// returns nil if none found
|
||||
// MethodById looks up a method by the 4-byte id,
|
||||
// returns nil if none found.
|
||||
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||
if len(sigdata) < 4 {
|
||||
return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata))
|
||||
@@ -250,10 +270,10 @@ func UnpackRevert(data []byte) (string, error) {
|
||||
if !bytes.Equal(data[:4], revertSelector) {
|
||||
return "", errors.New("invalid data for unpacking")
|
||||
}
|
||||
var reason string
|
||||
typ, _ := NewType("string", "", nil)
|
||||
if err := (Arguments{{Type: typ}}).Unpack(&reason, data[4:]); err != nil {
|
||||
unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return reason, nil
|
||||
return unpacked[0].(string), nil
|
||||
}
|
||||
|
@@ -33,7 +33,7 @@ import (
|
||||
|
||||
const jsondata = `
|
||||
[
|
||||
{ "type" : "function", "name" : "", "stateMutability" : "view" },
|
||||
{ "type" : "function", "name" : ""},
|
||||
{ "type" : "function", "name" : "balance", "stateMutability" : "view" },
|
||||
{ "type" : "function", "name" : "send", "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
||||
{ "type" : "function", "name" : "test", "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
|
||||
@@ -88,7 +88,7 @@ var (
|
||||
)
|
||||
|
||||
var methods = map[string]Method{
|
||||
"": NewMethod("", "", Function, "view", false, false, nil, nil),
|
||||
"": NewMethod("", "", Function, "", false, false, nil, nil),
|
||||
"balance": NewMethod("balance", "balance", Function, "view", false, false, nil, nil),
|
||||
"send": NewMethod("send", "send", Function, "", false, false, []Argument{{"amount", Uint256, false}}, nil),
|
||||
"test": NewMethod("test", "test", Function, "", false, false, []Argument{{"number", Uint32, false}}, nil),
|
||||
@@ -181,18 +181,15 @@ func TestConstructor(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
v := struct {
|
||||
A *big.Int
|
||||
B *big.Int
|
||||
}{new(big.Int), new(big.Int)}
|
||||
//abi.Unpack(&v, "", packed)
|
||||
if err := abi.Constructor.Inputs.Unpack(&v, packed); err != nil {
|
||||
unpacked, err := abi.Constructor.Inputs.Unpack(packed)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !reflect.DeepEqual(v.A, big.NewInt(1)) {
|
||||
|
||||
if !reflect.DeepEqual(unpacked[0], big.NewInt(1)) {
|
||||
t.Error("Unable to pack/unpack from constructor")
|
||||
}
|
||||
if !reflect.DeepEqual(v.B, big.NewInt(2)) {
|
||||
if !reflect.DeepEqual(unpacked[1], big.NewInt(2)) {
|
||||
t.Error("Unable to pack/unpack from constructor")
|
||||
}
|
||||
}
|
||||
@@ -743,7 +740,7 @@ func TestUnpackEvent(t *testing.T) {
|
||||
}
|
||||
var ev ReceivedEvent
|
||||
|
||||
err = abi.Unpack(&ev, "received", data)
|
||||
err = abi.UnpackIntoInterface(&ev, "received", data)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -752,7 +749,7 @@ func TestUnpackEvent(t *testing.T) {
|
||||
Sender common.Address
|
||||
}
|
||||
var receivedAddrEv ReceivedAddrEvent
|
||||
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
|
||||
err = abi.UnpackIntoInterface(&receivedAddrEv, "receivedAddr", data)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -1092,7 +1089,7 @@ func TestDoubleDuplicateEventNames(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestUnnamedEventParam checks that an event with unnamed parameters is
|
||||
// correctly handled
|
||||
// correctly handled.
|
||||
// The test runs the abi of the following contract.
|
||||
// contract TestEvent {
|
||||
// event send(uint256, uint256);
|
||||
|
@@ -41,7 +41,7 @@ type ArgumentMarshaling struct {
|
||||
Indexed bool
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
// UnmarshalJSON implements json.Unmarshaler interface.
|
||||
func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||
var arg ArgumentMarshaling
|
||||
err := json.Unmarshal(data, &arg)
|
||||
@@ -59,7 +59,7 @@ func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NonIndexed returns the arguments with indexed arguments filtered out
|
||||
// NonIndexed returns the arguments with indexed arguments filtered out.
|
||||
func (arguments Arguments) NonIndexed() Arguments {
|
||||
var ret []Argument
|
||||
for _, arg := range arguments {
|
||||
@@ -70,37 +70,29 @@ func (arguments Arguments) NonIndexed() Arguments {
|
||||
return ret
|
||||
}
|
||||
|
||||
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
|
||||
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[].
|
||||
func (arguments Arguments) isTuple() bool {
|
||||
return len(arguments) > 1
|
||||
}
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
// Unpack performs the operation hexdata -> Go format.
|
||||
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
||||
if len(data) == 0 {
|
||||
if len(arguments) != 0 {
|
||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
}
|
||||
return nil // Nothing to unmarshal, return
|
||||
// Nothing to unmarshal, return default variables
|
||||
nonIndexedArgs := arguments.NonIndexed()
|
||||
defaultVars := make([]interface{}, len(nonIndexedArgs))
|
||||
for index, arg := range nonIndexedArgs {
|
||||
defaultVars[index] = reflect.New(arg.Type.GetType())
|
||||
}
|
||||
return defaultVars, nil
|
||||
}
|
||||
// make sure the passed value is arguments pointer
|
||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
marshalledValues, err := arguments.UnpackValues(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(marshalledValues) == 0 {
|
||||
return fmt.Errorf("abi: Unpack(no-values unmarshalled %T)", v)
|
||||
}
|
||||
if arguments.isTuple() {
|
||||
return arguments.unpackTuple(v, marshalledValues)
|
||||
}
|
||||
return arguments.unpackAtomic(v, marshalledValues[0])
|
||||
return arguments.UnpackValues(data)
|
||||
}
|
||||
|
||||
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value
|
||||
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value.
|
||||
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
||||
// Make sure map is not nil
|
||||
if v == nil {
|
||||
@@ -122,8 +114,26 @@ func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy performs the operation go format -> provided struct.
|
||||
func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
|
||||
// make sure the passed value is arguments pointer
|
||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
if len(values) == 0 {
|
||||
if len(arguments) != 0 {
|
||||
return fmt.Errorf("abi: attempting to copy no values while %d arguments are expected", len(arguments))
|
||||
}
|
||||
return nil // Nothing to copy, return
|
||||
}
|
||||
if arguments.isTuple() {
|
||||
return arguments.copyTuple(v, values)
|
||||
}
|
||||
return arguments.copyAtomic(v, values[0])
|
||||
}
|
||||
|
||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
|
||||
func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error {
|
||||
dst := reflect.ValueOf(v).Elem()
|
||||
src := reflect.ValueOf(marshalledValues)
|
||||
|
||||
@@ -133,8 +143,8 @@ func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interfac
|
||||
return set(dst, src)
|
||||
}
|
||||
|
||||
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
|
||||
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
||||
// copyTuple copies a batch of values from marshalledValues to v.
|
||||
func (arguments Arguments) copyTuple(v interface{}, marshalledValues []interface{}) error {
|
||||
value := reflect.ValueOf(v).Elem()
|
||||
nonIndexedArgs := arguments.NonIndexed()
|
||||
|
||||
@@ -207,13 +217,13 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
||||
return retval, nil
|
||||
}
|
||||
|
||||
// PackValues performs the operation Go format -> Hexdata
|
||||
// It is the semantic opposite of UnpackValues
|
||||
// PackValues performs the operation Go format -> Hexdata.
|
||||
// It is the semantic opposite of UnpackValues.
|
||||
func (arguments Arguments) PackValues(args []interface{}) ([]byte, error) {
|
||||
return arguments.Pack(args...)
|
||||
}
|
||||
|
||||
// Pack performs the operation Go format -> Hexdata
|
||||
// Pack performs the operation Go format -> Hexdata.
|
||||
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
abiArgs := arguments
|
||||
|
@@ -45,7 +45,7 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) {
|
||||
}
|
||||
|
||||
// NewKeyStoreTransactor is a utility method to easily create a transaction signer from
|
||||
// an decrypted key from a keystore
|
||||
// a decrypted key from a keystore.
|
||||
func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) {
|
||||
return &TransactOpts{
|
||||
From: account.Address,
|
||||
|
@@ -41,7 +41,7 @@ var (
|
||||
ErrNoCodeAfterDeploy = errors.New("no contract code after deployment")
|
||||
)
|
||||
|
||||
// ContractCaller defines the methods needed to allow operating with contract on a read
|
||||
// ContractCaller defines the methods needed to allow operating with a contract on a read
|
||||
// only basis.
|
||||
type ContractCaller interface {
|
||||
// CodeAt returns the code of the given account. This is needed to differentiate
|
||||
@@ -62,8 +62,8 @@ type PendingContractCaller interface {
|
||||
PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error)
|
||||
}
|
||||
|
||||
// ContractTransactor defines the methods needed to allow operating with contract
|
||||
// on a write only basis. Beside the transacting method, the remainder are helpers
|
||||
// ContractTransactor defines the methods needed to allow operating with a contract
|
||||
// on a write only basis. Besides the transacting method, the remainder are helpers
|
||||
// used when the user does not provide some needed values, but rather leaves it up
|
||||
// to the transactor to decide.
|
||||
type ContractTransactor interface {
|
||||
|
@@ -45,7 +45,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend.
|
||||
// This nil assignment ensures at compile time that SimulatedBackend implements bind.ContractBackend.
|
||||
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
||||
|
||||
var (
|
||||
@@ -55,7 +55,7 @@ var (
|
||||
)
|
||||
|
||||
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
|
||||
// the background. Its main purpose is to allow easily testing contract bindings.
|
||||
// the background. Its main purpose is to allow for easy testing of contract bindings.
|
||||
// Simulated backend implements the following interfaces:
|
||||
// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor,
|
||||
// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender
|
||||
@@ -123,10 +123,10 @@ func (b *SimulatedBackend) Rollback() {
|
||||
|
||||
func (b *SimulatedBackend) rollback() {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
statedb, _ := b.blockchain.State()
|
||||
stateDB, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil)
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
|
||||
}
|
||||
|
||||
// stateByBlockNumber retrieves a state by a given blocknumber.
|
||||
@@ -146,12 +146,12 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address,
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return statedb.GetCode(contract), nil
|
||||
return stateDB.GetCode(contract), nil
|
||||
}
|
||||
|
||||
// BalanceAt returns the wei balance of a certain account in the blockchain.
|
||||
@@ -159,12 +159,12 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return statedb.GetBalance(contract), nil
|
||||
return stateDB.GetBalance(contract), nil
|
||||
}
|
||||
|
||||
// NonceAt returns the nonce of a certain account in the blockchain.
|
||||
@@ -172,12 +172,12 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return statedb.GetNonce(contract), nil
|
||||
return stateDB.GetNonce(contract), nil
|
||||
}
|
||||
|
||||
// StorageAt returns the value of key in the storage of an account in the blockchain.
|
||||
@@ -185,12 +185,12 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
val := statedb.GetState(contract, key)
|
||||
val := stateDB.GetState(contract, key)
|
||||
return val[:], nil
|
||||
}
|
||||
|
||||
@@ -222,7 +222,7 @@ func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.
|
||||
return nil, false, ethereum.NotFound
|
||||
}
|
||||
|
||||
// BlockByHash retrieves a block based on the block hash
|
||||
// BlockByHash retrieves a block based on the block hash.
|
||||
func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
@@ -293,7 +293,7 @@ func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (
|
||||
return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||
}
|
||||
|
||||
// TransactionCount returns the number of transactions in a given block
|
||||
// TransactionCount returns the number of transactions in a given block.
|
||||
func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
@@ -310,7 +310,7 @@ func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash commo
|
||||
return uint(block.Transactions().Len()), nil
|
||||
}
|
||||
|
||||
// TransactionInBlock returns the transaction for a specific block at a specific index
|
||||
// TransactionInBlock returns the transaction for a specific block at a specific index.
|
||||
func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
@@ -357,14 +357,14 @@ func newRevertError(result *core.ExecutionResult) *revertError {
|
||||
}
|
||||
}
|
||||
|
||||
// revertError is an API error that encompassas an EVM revertal with JSON error
|
||||
// revertError is an API error that encompasses an EVM revert with JSON error
|
||||
// code and a binary data blob.
|
||||
type revertError struct {
|
||||
error
|
||||
reason string // revert reason hex encoded
|
||||
}
|
||||
|
||||
// ErrorCode returns the JSON error code for a revertal.
|
||||
// ErrorCode returns the JSON error code for a revert.
|
||||
// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal
|
||||
func (e *revertError) ErrorCode() int {
|
||||
return 3
|
||||
@@ -383,11 +383,11 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return nil, errBlockNumberUnsupported
|
||||
}
|
||||
state, err := b.blockchain.State()
|
||||
stateDB, err := b.blockchain.State()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
||||
res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), stateDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -525,7 +525,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
||||
|
||||
// callContract implements common code between normal and pending contract calls.
|
||||
// state is modified during execution, make sure to copy it if necessary.
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) (*core.ExecutionResult, error) {
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) {
|
||||
// Ensure message is initialized properly.
|
||||
if call.GasPrice == nil {
|
||||
call.GasPrice = big.NewInt(1)
|
||||
@@ -537,18 +537,18 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
||||
call.Value = new(big.Int)
|
||||
}
|
||||
// Set infinite balance to the fake caller account.
|
||||
from := statedb.GetOrNewStateObject(call.From)
|
||||
from := stateDB.GetOrNewStateObject(call.From)
|
||||
from.SetBalance(math.MaxBig256)
|
||||
// Execute the call.
|
||||
msg := callmsg{call}
|
||||
msg := callMsg{call}
|
||||
|
||||
evmContext := core.NewEVMContext(msg, block.Header(), b.blockchain, nil)
|
||||
// Create a new environment which holds all relevant information
|
||||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxUint64)
|
||||
vmEnv := vm.NewEVM(evmContext, stateDB, b.config, vm.Config{})
|
||||
gasPool := new(core.GasPool).AddGas(math.MaxUint64)
|
||||
|
||||
return core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb()
|
||||
}
|
||||
|
||||
// SendTransaction updates the pending block to include the given transaction.
|
||||
@@ -572,10 +572,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
}
|
||||
block.AddTxWithChain(b.blockchain, tx)
|
||||
})
|
||||
statedb, _ := b.blockchain.State()
|
||||
stateDB, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil)
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -589,7 +589,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
|
||||
// Block filter requested, construct a single-shot filter
|
||||
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
|
||||
} else {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
// Initialize unset filter boundaries to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
@@ -607,8 +607,8 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
|
||||
return nil, err
|
||||
}
|
||||
res := make([]types.Log, len(logs))
|
||||
for i, log := range logs {
|
||||
res[i] = *log
|
||||
for i, nLog := range logs {
|
||||
res[i] = *nLog
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -629,9 +629,9 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere
|
||||
for {
|
||||
select {
|
||||
case logs := <-sink:
|
||||
for _, log := range logs {
|
||||
for _, nlog := range logs {
|
||||
select {
|
||||
case ch <- *log:
|
||||
case ch <- *nlog:
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
@@ -647,7 +647,7 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere
|
||||
}), nil
|
||||
}
|
||||
|
||||
// SubscribeNewHead returns an event subscription for a new header
|
||||
// SubscribeNewHead returns an event subscription for a new header.
|
||||
func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
|
||||
// subscribe to a new head
|
||||
sink := make(chan *types.Header)
|
||||
@@ -675,20 +675,22 @@ func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *type
|
||||
}
|
||||
|
||||
// AdjustTime adds a time shift to the simulated clock.
|
||||
// It can only be called on empty blocks.
|
||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if len(b.pendingBlock.Transactions()) != 0 {
|
||||
return errors.New("Could not adjust time on non-empty block")
|
||||
}
|
||||
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
}
|
||||
block.OffsetTime(int64(adjustment.Seconds()))
|
||||
})
|
||||
statedb, _ := b.blockchain.State()
|
||||
stateDB, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil)
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -698,19 +700,19 @@ func (b *SimulatedBackend) Blockchain() *core.BlockChain {
|
||||
return b.blockchain
|
||||
}
|
||||
|
||||
// callmsg implements core.Message to allow passing it as a transaction simulator.
|
||||
type callmsg struct {
|
||||
// callMsg implements core.Message to allow passing it as a transaction simulator.
|
||||
type callMsg struct {
|
||||
ethereum.CallMsg
|
||||
}
|
||||
|
||||
func (m callmsg) From() common.Address { return m.CallMsg.From }
|
||||
func (m callmsg) Nonce() uint64 { return 0 }
|
||||
func (m callmsg) CheckNonce() bool { return false }
|
||||
func (m callmsg) To() *common.Address { return m.CallMsg.To }
|
||||
func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
|
||||
func (m callmsg) Gas() uint64 { return m.CallMsg.Gas }
|
||||
func (m callmsg) Value() *big.Int { return m.CallMsg.Value }
|
||||
func (m callmsg) Data() []byte { return m.CallMsg.Data }
|
||||
func (m callMsg) From() common.Address { return m.CallMsg.From }
|
||||
func (m callMsg) Nonce() uint64 { return 0 }
|
||||
func (m callMsg) CheckNonce() bool { return false }
|
||||
func (m callMsg) To() *common.Address { return m.CallMsg.To }
|
||||
func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
|
||||
func (m callMsg) Gas() uint64 { return m.CallMsg.Gas }
|
||||
func (m callMsg) Value() *big.Int { return m.CallMsg.Value }
|
||||
func (m callMsg) Data() []byte { return m.CallMsg.Data }
|
||||
|
||||
// filterBackend implements filters.Backend to support filtering for logs without
|
||||
// taking bloom-bits acceleration structures into account.
|
||||
|
@@ -129,8 +129,8 @@ func TestNewSimulatedBackend(t *testing.T) {
|
||||
t.Errorf("expected sim blockchain config to equal params.AllEthashProtocolChanges, got %v", sim.config)
|
||||
}
|
||||
|
||||
statedb, _ := sim.blockchain.State()
|
||||
bal := statedb.GetBalance(testAddr)
|
||||
stateDB, _ := sim.blockchain.State()
|
||||
bal := stateDB.GetBalance(testAddr)
|
||||
if bal.Cmp(expectedBal) != 0 {
|
||||
t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal)
|
||||
}
|
||||
@@ -143,8 +143,7 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) {
|
||||
defer sim.Close()
|
||||
|
||||
prevTime := sim.pendingBlock.Time()
|
||||
err := sim.AdjustTime(time.Second)
|
||||
if err != nil {
|
||||
if err := sim.AdjustTime(time.Second); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
newTime := sim.pendingBlock.Time()
|
||||
@@ -154,6 +153,44 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
sim := simTestBackend(testAddr)
|
||||
// Create tx and send
|
||||
tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
sim.SendTransaction(context.Background(), signedTx)
|
||||
// AdjustTime should fail on non-empty block
|
||||
if err := sim.AdjustTime(time.Second); err == nil {
|
||||
t.Error("Expected adjust time to error on non-empty block")
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
prevTime := sim.pendingBlock.Time()
|
||||
if err := sim.AdjustTime(time.Minute); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
newTime := sim.pendingBlock.Time()
|
||||
if newTime-prevTime != uint64(time.Minute.Seconds()) {
|
||||
t.Errorf("adjusted time not equal to a minute. prev: %v, new: %v", prevTime, newTime)
|
||||
}
|
||||
// Put a transaction after adjusting time
|
||||
tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx2, err := types.SignTx(tx2, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
sim.SendTransaction(context.Background(), signedTx2)
|
||||
sim.Commit()
|
||||
newTime = sim.pendingBlock.Time()
|
||||
if newTime-prevTime >= uint64(time.Minute.Seconds()) {
|
||||
t.Errorf("time adjusted, but shouldn't be: prev: %v, new: %v", prevTime, newTime)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_BalanceAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
expectedBal := big.NewInt(10000000000)
|
||||
@@ -484,7 +521,7 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) {
|
||||
sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether*2 + 2e17)}}, 10000000)
|
||||
defer sim.Close()
|
||||
|
||||
receipant := common.HexToAddress("deadbeef")
|
||||
recipient := common.HexToAddress("deadbeef")
|
||||
var cases = []struct {
|
||||
name string
|
||||
message ethereum.CallMsg
|
||||
@@ -493,7 +530,7 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) {
|
||||
}{
|
||||
{"EstimateWithoutPrice", ethereum.CallMsg{
|
||||
From: addr,
|
||||
To: &receipant,
|
||||
To: &recipient,
|
||||
Gas: 0,
|
||||
GasPrice: big.NewInt(0),
|
||||
Value: big.NewInt(1000),
|
||||
@@ -502,7 +539,7 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) {
|
||||
|
||||
{"EstimateWithPrice", ethereum.CallMsg{
|
||||
From: addr,
|
||||
To: &receipant,
|
||||
To: &recipient,
|
||||
Gas: 0,
|
||||
GasPrice: big.NewInt(1000),
|
||||
Value: big.NewInt(1000),
|
||||
@@ -511,7 +548,7 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) {
|
||||
|
||||
{"EstimateWithVeryHighPrice", ethereum.CallMsg{
|
||||
From: addr,
|
||||
To: &receipant,
|
||||
To: &recipient,
|
||||
Gas: 0,
|
||||
GasPrice: big.NewInt(1e14), // gascost = 2.1ether
|
||||
Value: big.NewInt(1e17), // the remaining balance for fee is 2.1ether
|
||||
@@ -520,7 +557,7 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) {
|
||||
|
||||
{"EstimateWithSuperhighPrice", ethereum.CallMsg{
|
||||
From: addr,
|
||||
To: &receipant,
|
||||
To: &recipient,
|
||||
Gas: 0,
|
||||
GasPrice: big.NewInt(2e14), // gascost = 4.2ether
|
||||
Value: big.NewInt(1000),
|
||||
|
@@ -117,11 +117,14 @@ func DeployContract(opts *TransactOpts, abi abi.ABI, bytecode []byte, backend Co
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method string, params ...interface{}) error {
|
||||
// Don't crash on a lazy user
|
||||
if opts == nil {
|
||||
opts = new(CallOpts)
|
||||
}
|
||||
if results == nil {
|
||||
results = new([]interface{})
|
||||
}
|
||||
// Pack the input, call and unpack the results
|
||||
input, err := c.abi.Pack(method, params...)
|
||||
if err != nil {
|
||||
@@ -158,10 +161,14 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
if len(*results) == 0 {
|
||||
res, err := c.abi.Unpack(method, output)
|
||||
*results = res
|
||||
return err
|
||||
}
|
||||
return c.abi.Unpack(result, method, output)
|
||||
res := *results
|
||||
return c.abi.UnpackIntoInterface(res[0], method, output)
|
||||
}
|
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
@@ -177,7 +184,7 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in
|
||||
}
|
||||
|
||||
// RawTransact initiates a transaction with the given raw calldata as the input.
|
||||
// It's usually used to initiates transaction for invoking **Fallback** function.
|
||||
// It's usually used to initiate transactions for invoking **Fallback** function.
|
||||
func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
|
||||
// todo(rjl493456442) check the method is payable or not,
|
||||
// reject invalid transaction at the first place
|
||||
@@ -339,7 +346,7 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
|
||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.abi.Unpack(out, event, log.Data); err != nil {
|
||||
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@@ -71,11 +71,10 @@ func TestPassingBlockNumber(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}, mc, nil, nil)
|
||||
var ret string
|
||||
|
||||
blockNumber := big.NewInt(42)
|
||||
|
||||
bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something")
|
||||
bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, nil, "something")
|
||||
|
||||
if mc.callContractBlockNumber != blockNumber {
|
||||
t.Fatalf("CallContract() was not passed the block number")
|
||||
@@ -85,7 +84,7 @@ func TestPassingBlockNumber(t *testing.T) {
|
||||
t.Fatalf("CodeAt() was not passed the block number")
|
||||
}
|
||||
|
||||
bc.Call(&bind.CallOpts{}, &ret, "something")
|
||||
bc.Call(&bind.CallOpts{}, nil, "something")
|
||||
|
||||
if mc.callContractBlockNumber != nil {
|
||||
t.Fatalf("CallContract() was passed a block number when it should not have been")
|
||||
@@ -95,7 +94,7 @@ func TestPassingBlockNumber(t *testing.T) {
|
||||
t.Fatalf("CodeAt() was passed a block number when it should not have been")
|
||||
}
|
||||
|
||||
bc.Call(&bind.CallOpts{BlockNumber: blockNumber, Pending: true}, &ret, "something")
|
||||
bc.Call(&bind.CallOpts{BlockNumber: blockNumber, Pending: true}, nil, "something")
|
||||
|
||||
if !mc.pendingCallContractCalled {
|
||||
t.Fatalf("CallContract() was not passed the block number")
|
||||
|
@@ -52,7 +52,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
// contracts is the map of each individual contract requested binding
|
||||
contracts = make(map[string]*tmplContract)
|
||||
|
||||
// structs is the map of all reclared structs shared by passed contracts.
|
||||
// structs is the map of all redeclared structs shared by passed contracts.
|
||||
structs = make(map[string]*tmplStruct)
|
||||
|
||||
// isLib is the map used to flag each encountered library as such
|
||||
@@ -80,10 +80,10 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
fallback *tmplMethod
|
||||
receive *tmplMethod
|
||||
|
||||
// identifiers are used to detect duplicated identifier of function
|
||||
// and event. For all calls, transacts and events, abigen will generate
|
||||
// identifiers are used to detect duplicated identifiers of functions
|
||||
// and events. For all calls, transacts and events, abigen will generate
|
||||
// corresponding bindings. However we have to ensure there is no
|
||||
// identifier coliision in the bindings of these categories.
|
||||
// identifier collisions in the bindings of these categories.
|
||||
callIdentifiers = make(map[string]bool)
|
||||
transactIdentifiers = make(map[string]bool)
|
||||
eventIdentifiers = make(map[string]bool)
|
||||
@@ -246,7 +246,7 @@ var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) stri
|
||||
LangJava: bindTypeJava,
|
||||
}
|
||||
|
||||
// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go one.
|
||||
// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones.
|
||||
func bindBasicTypeGo(kind abi.Type) string {
|
||||
switch kind.T {
|
||||
case abi.AddressTy:
|
||||
@@ -286,7 +286,7 @@ func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
}
|
||||
}
|
||||
|
||||
// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java one.
|
||||
// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java ones.
|
||||
func bindBasicTypeJava(kind abi.Type) string {
|
||||
switch kind.T {
|
||||
case abi.AddressTy:
|
||||
@@ -330,7 +330,7 @@ func bindBasicTypeJava(kind abi.Type) string {
|
||||
}
|
||||
|
||||
// pluralizeJavaType explicitly converts multidimensional types to predefined
|
||||
// type in go side.
|
||||
// types in go side.
|
||||
func pluralizeJavaType(typ string) string {
|
||||
switch typ {
|
||||
case "boolean":
|
||||
@@ -369,7 +369,7 @@ var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct)
|
||||
}
|
||||
|
||||
// bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
// functionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
bound := bindTypeGo(kind, structs)
|
||||
|
||||
@@ -386,7 +386,7 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
}
|
||||
|
||||
// bindTopicTypeJava converts a Solidity topic type to a Java one. It is almost the same
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
// functionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
bound := bindTypeJava(kind, structs)
|
||||
|
||||
@@ -394,7 +394,7 @@ func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// We only convert strings and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
if bound == "String" || bound == "byte[]" {
|
||||
bound = "Hash"
|
||||
@@ -415,7 +415,7 @@ var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct
|
||||
func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
// We compose raw struct name and canonical parameter expression
|
||||
// We compose a raw struct name and a canonical parameter expression
|
||||
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||
// is empty, so we use canonical parameter expression to distinguish
|
||||
// different struct definition. From the consideration of backward
|
||||
@@ -454,7 +454,7 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
// We compose raw struct name and canonical parameter expression
|
||||
// We compose a raw struct name and a canonical parameter expression
|
||||
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||
// is empty, so we use canonical parameter expression to distinguish
|
||||
// different struct definition. From the consideration of backward
|
||||
@@ -486,7 +486,7 @@ func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
}
|
||||
|
||||
// namedType is a set of functions that transform language specific types to
|
||||
// named versions that my be used inside method names.
|
||||
// named versions that may be used inside method names.
|
||||
var namedType = map[Lang]func(string, abi.Type) string{
|
||||
LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") },
|
||||
LangJava: namedTypeJava,
|
||||
@@ -528,7 +528,7 @@ func alias(aliases map[string]string, n string) string {
|
||||
}
|
||||
|
||||
// methodNormalizer is a name transformer that modifies Solidity method names to
|
||||
// conform to target language naming concentions.
|
||||
// conform to target language naming conventions.
|
||||
var methodNormalizer = map[Lang]func(string) string{
|
||||
LangGo: abi.ToCamelCase,
|
||||
LangJava: decapitalise,
|
||||
|
@@ -1696,11 +1696,11 @@ func TestGolangBindings(t *testing.T) {
|
||||
t.Skip("go sdk not found for testing")
|
||||
}
|
||||
// Create a temporary workspace for the test suite
|
||||
ws, err := ioutil.TempDir("", "")
|
||||
ws, err := ioutil.TempDir("", "binding-test")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temporary workspace: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(ws)
|
||||
//defer os.RemoveAll(ws)
|
||||
|
||||
pkg := filepath.Join(ws, "bindtest")
|
||||
if err = os.MkdirAll(pkg, 0700); err != nil {
|
||||
|
@@ -30,7 +30,7 @@ type tmplData struct {
|
||||
type tmplContract struct {
|
||||
Type string // Type name of the main contract binding
|
||||
InputABI string // JSON ABI used as the input to generate the binding from
|
||||
InputBin string // Optional EVM bytecode used to denetare deploy code from
|
||||
InputBin string // Optional EVM bytecode used to generate deploy code from
|
||||
FuncSigs map[string]string // Optional map: string signature -> 4-byte signature
|
||||
Constructor abi.Method // Contract constructor for deploy parametrization
|
||||
Calls map[string]*tmplMethod // Contract calls that only read state data
|
||||
@@ -50,7 +50,8 @@ type tmplMethod struct {
|
||||
Structured bool // Whether the returns should be accumulated into a struct
|
||||
}
|
||||
|
||||
// tmplEvent is a wrapper around an a
|
||||
// tmplEvent is a wrapper around an abi.Event that contains a few preprocessed
|
||||
// and cached data fields.
|
||||
type tmplEvent struct {
|
||||
Original abi.Event // Original event as parsed by the abi package
|
||||
Normalized abi.Event // Normalized version of the parsed fields
|
||||
@@ -64,7 +65,7 @@ type tmplField struct {
|
||||
SolKind abi.Type // Raw abi type information
|
||||
}
|
||||
|
||||
// tmplStruct is a wrapper around an abi.tuple contains an auto-generated
|
||||
// tmplStruct is a wrapper around an abi.tuple and contains an auto-generated
|
||||
// struct name.
|
||||
type tmplStruct struct {
|
||||
Name string // Auto-generated struct name(before solidity v0.5.11) or raw name.
|
||||
@@ -78,8 +79,8 @@ var tmplSource = map[Lang]string{
|
||||
LangJava: tmplSourceJava,
|
||||
}
|
||||
|
||||
// tmplSourceGo is the Go source template use to generate the contract binding
|
||||
// based on.
|
||||
// tmplSourceGo is the Go source template that the generated Go contract binding
|
||||
// is based on.
|
||||
const tmplSourceGo = `
|
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
@@ -260,7 +261,7 @@ var (
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
|
||||
return _{{$contract.Type}}.Contract.{{$contract.Type}}Caller.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
@@ -279,7 +280,7 @@ var (
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
|
||||
return _{{$contract.Type}}.Contract.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
@@ -299,19 +300,23 @@ var (
|
||||
//
|
||||
// Solidity: {{.Original.String}}
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) {
|
||||
{{if .Structured}}ret := new(struct{
|
||||
{{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}}
|
||||
{{end}}
|
||||
}){{else}}var (
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}} = new({{bindtype .Type $structs}})
|
||||
{{end}}
|
||||
){{end}}
|
||||
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}&[]interface{}{
|
||||
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}},
|
||||
{{end}}
|
||||
}{{end}}{{end}}
|
||||
err := _{{$contract.Type}}.contract.Call(opts, out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||
return {{if .Structured}}*ret,{{else}}{{range $i, $_ := .Normalized.Outputs}}*ret{{$i}},{{end}}{{end}} err
|
||||
var out []interface{}
|
||||
err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||
{{if .Structured}}
|
||||
outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} })
|
||||
{{range $i, $t := .Normalized.Outputs}}
|
||||
outstruct.{{.Name}} = out[{{$i}}].({{bindtype .Type $structs}}){{end}}
|
||||
|
||||
return *outstruct, err
|
||||
{{else}}
|
||||
if err != nil {
|
||||
return {{range $i, $_ := .Normalized.Outputs}}*new({{bindtype .Type $structs}}), {{end}} err
|
||||
}
|
||||
{{range $i, $t := .Normalized.Outputs}}
|
||||
out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
|
||||
|
||||
return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err
|
||||
{{end}}
|
||||
}
|
||||
|
||||
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||
@@ -543,8 +548,8 @@ var (
|
||||
{{end}}
|
||||
`
|
||||
|
||||
// tmplSourceJava is the Java source template use to generate the contract binding
|
||||
// based on.
|
||||
// tmplSourceJava is the Java source template that the generated Java contract binding
|
||||
// is based on.
|
||||
const tmplSourceJava = `
|
||||
// This file is an automatically generated Java binding. Do not modify as any
|
||||
// change will likely be lost upon the next re-generation!
|
||||
|
@@ -52,7 +52,7 @@ func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||
}
|
||||
}
|
||||
|
||||
if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.GetType().Kind() {
|
||||
if val.Type().Elem().Kind() != t.Elem.GetType().Kind() {
|
||||
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type())
|
||||
}
|
||||
return nil
|
||||
|
@@ -32,7 +32,7 @@ type Event struct {
|
||||
// the raw name and a suffix will be added in the case of a event overload.
|
||||
//
|
||||
// e.g.
|
||||
// There are two events have same name:
|
||||
// These are two events that have the same name:
|
||||
// * foo(int,int)
|
||||
// * foo(uint,uint)
|
||||
// The event name of the first one wll be resolved as foo while the second one
|
||||
|
@@ -147,10 +147,6 @@ func TestEventString(t *testing.T) {
|
||||
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
||||
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 [2]uint8
|
||||
Value2 uint8
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
require.NoError(t, err)
|
||||
var b bytes.Buffer
|
||||
@@ -158,10 +154,10 @@ func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
||||
for ; i <= 3; i++ {
|
||||
b.Write(packNum(reflect.ValueOf(i)))
|
||||
}
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, [2]uint8{1, 2}, rst.Value1)
|
||||
require.Equal(t, uint8(3), rst.Value2)
|
||||
unpacked, err := abi.Unpack("test", b.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [2]uint8{1, 2}, unpacked[0])
|
||||
require.Equal(t, uint8(3), unpacked[1])
|
||||
}
|
||||
|
||||
func TestEventTupleUnpack(t *testing.T) {
|
||||
@@ -351,14 +347,14 @@ func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, ass
|
||||
var e Event
|
||||
assert.NoError(json.Unmarshal(jsonEvent, &e), "Should be able to unmarshal event ABI")
|
||||
a := ABI{Events: map[string]Event{"e": e}}
|
||||
return a.Unpack(dest, "e", data)
|
||||
return a.UnpackIntoInterface(dest, "e", data)
|
||||
}
|
||||
|
||||
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
||||
func TestEventUnpackIndexed(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 uint8
|
||||
Value1 uint8 // indexed
|
||||
Value2 uint8
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
@@ -366,16 +362,16 @@ func TestEventUnpackIndexed(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
b.Write(packNum(reflect.ValueOf(uint8(8))))
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.NoError(t, abi.UnpackIntoInterface(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, uint8(0), rst.Value1)
|
||||
require.Equal(t, uint8(8), rst.Value2)
|
||||
}
|
||||
|
||||
// TestEventIndexedWithArrayUnpack verifies that decoder will not overlow when static array is indexed input.
|
||||
// TestEventIndexedWithArrayUnpack verifies that decoder will not overflow when static array is indexed input.
|
||||
func TestEventIndexedWithArrayUnpack(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 [2]uint8
|
||||
Value1 [2]uint8 // indexed
|
||||
Value2 string
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
@@ -388,7 +384,7 @@ func TestEventIndexedWithArrayUnpack(t *testing.T) {
|
||||
b.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.NoError(t, abi.UnpackIntoInterface(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, [2]uint8{0, 0}, rst.Value1)
|
||||
require.Equal(t, stringOut, rst.Value2)
|
||||
}
|
||||
|
@@ -45,7 +45,7 @@ const (
|
||||
// If the method is `Const` no transaction needs to be created for this
|
||||
// particular Method call. It can easily be simulated using a local VM.
|
||||
// For example a `Balance()` method only needs to retrieve something
|
||||
// from the storage and therefore requires no Tx to be send to the
|
||||
// from the storage and therefore requires no Tx to be sent to the
|
||||
// network. A method such as `Transact` does require a Tx and thus will
|
||||
// be flagged `false`.
|
||||
// Input specifies the required input parameters for this gives method.
|
||||
@@ -54,7 +54,7 @@ type Method struct {
|
||||
// the raw name and a suffix will be added in the case of a function overload.
|
||||
//
|
||||
// e.g.
|
||||
// There are two functions have same name:
|
||||
// These are two functions that have the same name:
|
||||
// * foo(int,int)
|
||||
// * foo(uint,uint)
|
||||
// The method name of the first one will be resolved as foo while the second one
|
||||
|
@@ -17,6 +17,8 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
@@ -25,7 +27,7 @@ import (
|
||||
)
|
||||
|
||||
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
|
||||
// bytes slice
|
||||
// bytes slice.
|
||||
func packBytesSlice(bytes []byte, l int) []byte {
|
||||
len := packNum(reflect.ValueOf(l))
|
||||
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
|
||||
@@ -33,39 +35,42 @@ func packBytesSlice(bytes []byte, l int) []byte {
|
||||
|
||||
// packElement packs the given reflect value according to the abi specification in
|
||||
// t.
|
||||
func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
|
||||
switch t.T {
|
||||
case IntTy, UintTy:
|
||||
return packNum(reflectValue)
|
||||
return packNum(reflectValue), nil
|
||||
case StringTy:
|
||||
return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len())
|
||||
return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len()), nil
|
||||
case AddressTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
|
||||
return common.LeftPadBytes(reflectValue.Bytes(), 32)
|
||||
return common.LeftPadBytes(reflectValue.Bytes(), 32), nil
|
||||
case BoolTy:
|
||||
if reflectValue.Bool() {
|
||||
return math.PaddedBigBytes(common.Big1, 32)
|
||||
return math.PaddedBigBytes(common.Big1, 32), nil
|
||||
}
|
||||
return math.PaddedBigBytes(common.Big0, 32)
|
||||
return math.PaddedBigBytes(common.Big0, 32), nil
|
||||
case BytesTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len())
|
||||
if reflectValue.Type() != reflect.TypeOf([]byte{}) {
|
||||
return []byte{}, errors.New("Bytes type is neither slice nor array")
|
||||
}
|
||||
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil
|
||||
case FixedBytesTy, FunctionTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32)
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32), nil
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
return []byte{}, fmt.Errorf("Could not pack element, unknown type: %v", t.T)
|
||||
}
|
||||
}
|
||||
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation.
|
||||
func packNum(value reflect.Value) []byte {
|
||||
switch kind := value.Kind(); kind {
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
@@ -77,5 +82,4 @@ func packNum(value reflect.Value) []byte {
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -44,18 +44,7 @@ func TestPack(t *testing.T) {
|
||||
t.Fatalf("invalid ABI definition %s, %v", inDef, err)
|
||||
}
|
||||
var packed []byte
|
||||
if reflect.TypeOf(test.unpacked).Kind() != reflect.Struct {
|
||||
packed, err = inAbi.Pack("method", test.unpacked)
|
||||
} else {
|
||||
// if want is a struct we need to use the components.
|
||||
elem := reflect.ValueOf(test.unpacked)
|
||||
var values []interface{}
|
||||
for i := 0; i < elem.NumField(); i++ {
|
||||
field := elem.Field(i)
|
||||
values = append(values, field.Interface())
|
||||
}
|
||||
packed, err = inAbi.Pack("method", values...)
|
||||
}
|
||||
packed, err = inAbi.Pack("method", test.unpacked)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("test %d (%v) failed: %v", i, test.def, err)
|
||||
|
@@ -620,7 +620,7 @@ var packUnpackTests = []packUnpackTest{
|
||||
|
||||
{
|
||||
def: `[{"type": "bytes32[]"}]`,
|
||||
unpacked: []common.Hash{{1}, {2}},
|
||||
unpacked: [][32]byte{{1}, {2}},
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||
"0100000000000000000000000000000000000000000000000000000000000000" +
|
||||
@@ -722,7 +722,7 @@ var packUnpackTests = []packUnpackTest{
|
||||
},
|
||||
// struct outputs
|
||||
{
|
||||
def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`,
|
||||
def: `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]`,
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||
unpacked: struct {
|
||||
@@ -731,28 +731,28 @@ var packUnpackTests = []packUnpackTest{
|
||||
}{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int_one","type":"int256"}]`,
|
||||
def: `[{"components": [{"name":"int_one","type":"int256"}], "type":"tuple"}]`,
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
unpacked: struct {
|
||||
IntOne *big.Int
|
||||
}{big.NewInt(1)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int__one","type":"int256"}]`,
|
||||
def: `[{"components": [{"name":"int__one","type":"int256"}], "type":"tuple"}]`,
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
unpacked: struct {
|
||||
IntOne *big.Int
|
||||
}{big.NewInt(1)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int_one_","type":"int256"}]`,
|
||||
def: `[{"components": [{"name":"int_one_","type":"int256"}], "type":"tuple"}]`,
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
unpacked: struct {
|
||||
IntOne *big.Int
|
||||
}{big.NewInt(1)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
|
||||
def: `[{"components": [{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}], "type":"tuple"}]`,
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||
unpacked: struct {
|
||||
@@ -831,11 +831,11 @@ var packUnpackTests = []packUnpackTest{
|
||||
},
|
||||
{
|
||||
// static tuple
|
||||
def: `[{"name":"a","type":"int64"},
|
||||
def: `[{"components": [{"name":"a","type":"int64"},
|
||||
{"name":"b","type":"int256"},
|
||||
{"name":"c","type":"int256"},
|
||||
{"name":"d","type":"bool"},
|
||||
{"name":"e","type":"bytes32[3][2]"}]`,
|
||||
{"name":"e","type":"bytes32[3][2]"}], "type":"tuple"}]`,
|
||||
unpacked: struct {
|
||||
A int64
|
||||
B *big.Int
|
||||
@@ -855,21 +855,22 @@ var packUnpackTests = []packUnpackTest{
|
||||
"0500000000000000000000000000000000000000000000000000000000000000", // struct[e] array[1][2]
|
||||
},
|
||||
{
|
||||
def: `[{"name":"a","type":"string"},
|
||||
def: `[{"components": [{"name":"a","type":"string"},
|
||||
{"name":"b","type":"int64"},
|
||||
{"name":"c","type":"bytes"},
|
||||
{"name":"d","type":"string[]"},
|
||||
{"name":"e","type":"int256[]"},
|
||||
{"name":"f","type":"address[]"}]`,
|
||||
{"name":"f","type":"address[]"}], "type":"tuple"}]`,
|
||||
unpacked: struct {
|
||||
FieldA string `abi:"a"` // Test whether abi tag works
|
||||
FieldB int64 `abi:"b"`
|
||||
C []byte
|
||||
D []string
|
||||
E []*big.Int
|
||||
F []common.Address
|
||||
A string
|
||||
B int64
|
||||
C []byte
|
||||
D []string
|
||||
E []*big.Int
|
||||
F []common.Address
|
||||
}{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
|
||||
packed: "00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000020" + // struct a
|
||||
"00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
||||
"0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
|
||||
@@ -894,23 +895,24 @@ var packUnpackTests = []packUnpackTest{
|
||||
"0000000000000000000000000200000000000000000000000000000000000000", // common.Address{2}
|
||||
},
|
||||
{
|
||||
def: `[{"components": [{"name": "a","type": "uint256"},
|
||||
def: `[{"components": [{ "type": "tuple","components": [{"name": "a","type": "uint256"},
|
||||
{"name": "b","type": "uint256[]"}],
|
||||
"name": "a","type": "tuple"},
|
||||
{"name": "b","type": "uint256[]"}]`,
|
||||
{"name": "b","type": "uint256[]"}], "type": "tuple"}]`,
|
||||
unpacked: struct {
|
||||
A struct {
|
||||
FieldA *big.Int `abi:"a"`
|
||||
B []*big.Int
|
||||
A *big.Int
|
||||
B []*big.Int
|
||||
}
|
||||
B []*big.Int
|
||||
}{
|
||||
A: struct {
|
||||
FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
|
||||
B []*big.Int
|
||||
A *big.Int
|
||||
B []*big.Int
|
||||
}{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(2)}},
|
||||
B: []*big.Int{big.NewInt(1), big.NewInt(2)}},
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000040" + // a offset
|
||||
packed: "0000000000000000000000000000000000000000000000000000000000000020" + // struct a
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // a offset
|
||||
"00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
|
||||
|
@@ -24,6 +24,29 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ConvertType converts an interface of a runtime type into a interface of the
|
||||
// given type
|
||||
// e.g. turn
|
||||
// var fields []reflect.StructField
|
||||
// fields = append(fields, reflect.StructField{
|
||||
// Name: "X",
|
||||
// Type: reflect.TypeOf(new(big.Int)),
|
||||
// Tag: reflect.StructTag("json:\"" + "x" + "\""),
|
||||
// }
|
||||
// into
|
||||
// type TupleT struct { X *big.Int }
|
||||
func ConvertType(in interface{}, proto interface{}) interface{} {
|
||||
protoType := reflect.TypeOf(proto)
|
||||
if reflect.TypeOf(in).ConvertibleTo(protoType) {
|
||||
return reflect.ValueOf(in).Convert(protoType).Interface()
|
||||
}
|
||||
// Use set as a last ditch effort
|
||||
if err := set(reflect.ValueOf(proto), reflect.ValueOf(in)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return proto
|
||||
}
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
// or finds a big.Int
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
@@ -61,7 +84,7 @@ func reflectIntType(unsigned bool, size int) reflect.Type {
|
||||
return reflect.TypeOf(&big.Int{})
|
||||
}
|
||||
|
||||
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
|
||||
// mustArrayToByteSlice creates a new byte slice with the exact same size as value
|
||||
// and copies the bytes in value to the new slice.
|
||||
func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
||||
slice := reflect.MakeSlice(reflect.TypeOf([]byte{}), value.Len(), value.Len())
|
||||
@@ -119,6 +142,9 @@ func setSlice(dst, src reflect.Value) error {
|
||||
}
|
||||
|
||||
func setArray(dst, src reflect.Value) error {
|
||||
if src.Kind() == reflect.Ptr {
|
||||
return set(dst, indirect(src))
|
||||
}
|
||||
array := reflect.New(dst.Type()).Elem()
|
||||
min := src.Len()
|
||||
if src.Len() > dst.Len() {
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
@@ -189,3 +190,72 @@ func TestReflectNameToStruct(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertType(t *testing.T) {
|
||||
// Test Basic Struct
|
||||
type T struct {
|
||||
X *big.Int
|
||||
Y *big.Int
|
||||
}
|
||||
// Create on-the-fly structure
|
||||
var fields []reflect.StructField
|
||||
fields = append(fields, reflect.StructField{
|
||||
Name: "X",
|
||||
Type: reflect.TypeOf(new(big.Int)),
|
||||
Tag: reflect.StructTag("json:\"" + "x" + "\""),
|
||||
})
|
||||
fields = append(fields, reflect.StructField{
|
||||
Name: "Y",
|
||||
Type: reflect.TypeOf(new(big.Int)),
|
||||
Tag: reflect.StructTag("json:\"" + "y" + "\""),
|
||||
})
|
||||
val := reflect.New(reflect.StructOf(fields))
|
||||
val.Elem().Field(0).Set(reflect.ValueOf(big.NewInt(1)))
|
||||
val.Elem().Field(1).Set(reflect.ValueOf(big.NewInt(2)))
|
||||
// ConvertType
|
||||
out := *ConvertType(val.Interface(), new(T)).(*T)
|
||||
if out.X.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out.X, big.NewInt(1))
|
||||
}
|
||||
if out.Y.Cmp(big.NewInt(2)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out.Y, big.NewInt(2))
|
||||
}
|
||||
// Slice Type
|
||||
val2 := reflect.MakeSlice(reflect.SliceOf(reflect.StructOf(fields)), 2, 2)
|
||||
val2.Index(0).Field(0).Set(reflect.ValueOf(big.NewInt(1)))
|
||||
val2.Index(0).Field(1).Set(reflect.ValueOf(big.NewInt(2)))
|
||||
val2.Index(1).Field(0).Set(reflect.ValueOf(big.NewInt(3)))
|
||||
val2.Index(1).Field(1).Set(reflect.ValueOf(big.NewInt(4)))
|
||||
out2 := *ConvertType(val2.Interface(), new([]T)).(*[]T)
|
||||
if out2[0].X.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out2[0].X, big.NewInt(1))
|
||||
}
|
||||
if out2[0].Y.Cmp(big.NewInt(2)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out2[1].Y, big.NewInt(2))
|
||||
}
|
||||
if out2[1].X.Cmp(big.NewInt(3)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out2[0].X, big.NewInt(1))
|
||||
}
|
||||
if out2[1].Y.Cmp(big.NewInt(4)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out2[1].Y, big.NewInt(2))
|
||||
}
|
||||
// Array Type
|
||||
val3 := reflect.New(reflect.ArrayOf(2, reflect.StructOf(fields)))
|
||||
val3.Elem().Index(0).Field(0).Set(reflect.ValueOf(big.NewInt(1)))
|
||||
val3.Elem().Index(0).Field(1).Set(reflect.ValueOf(big.NewInt(2)))
|
||||
val3.Elem().Index(1).Field(0).Set(reflect.ValueOf(big.NewInt(3)))
|
||||
val3.Elem().Index(1).Field(1).Set(reflect.ValueOf(big.NewInt(4)))
|
||||
out3 := *ConvertType(val3.Interface(), new([2]T)).(*[2]T)
|
||||
if out3[0].X.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out3[0].X, big.NewInt(1))
|
||||
}
|
||||
if out3[0].Y.Cmp(big.NewInt(2)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out3[1].Y, big.NewInt(2))
|
||||
}
|
||||
if out3[1].X.Cmp(big.NewInt(3)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out3[0].X, big.NewInt(1))
|
||||
}
|
||||
if out3[1].Y.Cmp(big.NewInt(4)) != 0 {
|
||||
t.Errorf("ConvertType failed, got %v want %v", out3[1].Y, big.NewInt(2))
|
||||
}
|
||||
}
|
||||
|
@@ -102,7 +102,7 @@ func genIntType(rule int64, size uint) []byte {
|
||||
var topic [common.HashLength]byte
|
||||
if rule < 0 {
|
||||
// if a rule is negative, we need to put it into two's complement.
|
||||
// extended to common.Hashlength bytes.
|
||||
// extended to common.HashLength bytes.
|
||||
topic = [common.HashLength]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
|
||||
}
|
||||
for i := uint(0); i < size; i++ {
|
||||
@@ -120,7 +120,7 @@ func ParseTopics(out interface{}, fields Arguments, topics []common.Hash) error
|
||||
})
|
||||
}
|
||||
|
||||
// ParseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs
|
||||
// ParseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs.
|
||||
func ParseTopicsIntoMap(out map[string]interface{}, fields Arguments, topics []common.Hash) error {
|
||||
return parseTopicWithSetter(fields, topics,
|
||||
func(arg Argument, reconstr interface{}) {
|
||||
|
@@ -44,7 +44,7 @@ const (
|
||||
FunctionTy
|
||||
)
|
||||
|
||||
// Type is the reflection of the supported argument type
|
||||
// Type is the reflection of the supported argument type.
|
||||
type Type struct {
|
||||
Elem *Type
|
||||
Size int
|
||||
@@ -264,7 +264,7 @@ func overloadedArgName(rawName string, names map[string]string) (string, error)
|
||||
return fieldName, nil
|
||||
}
|
||||
|
||||
// String implements Stringer
|
||||
// String implements Stringer.
|
||||
func (t Type) String() (out string) {
|
||||
return t.stringKind
|
||||
}
|
||||
@@ -346,7 +346,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
return append(ret, tail...), nil
|
||||
|
||||
default:
|
||||
return packElement(t, v), nil
|
||||
return packElement(t, v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,7 +386,7 @@ func isDynamicType(t Type) bool {
|
||||
func getTypeSize(t Type) int {
|
||||
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
|
||||
// Recursively calculate type size if it is a nested array
|
||||
if t.Elem.T == ArrayTy {
|
||||
if t.Elem.T == ArrayTy || t.Elem.T == TupleTy {
|
||||
return t.Size * getTypeSize(*t.Elem)
|
||||
}
|
||||
return t.Size * 32
|
||||
|
@@ -330,3 +330,39 @@ func TestInternalType(t *testing.T) {
|
||||
t.Errorf("type %q: parsed type mismatch:\nGOT %s\nWANT %s ", blob, spew.Sdump(typeWithoutStringer(typ)), spew.Sdump(typeWithoutStringer(kind)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTypeSize(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
typ string
|
||||
components []ArgumentMarshaling
|
||||
typSize int
|
||||
}{
|
||||
// simple array
|
||||
{"uint256[2]", nil, 32 * 2},
|
||||
{"address[3]", nil, 32 * 3},
|
||||
{"bytes32[4]", nil, 32 * 4},
|
||||
// array array
|
||||
{"uint256[2][3][4]", nil, 32 * (2 * 3 * 4)},
|
||||
// array tuple
|
||||
{"tuple[2]", []ArgumentMarshaling{{Name: "x", Type: "bytes32"}, {Name: "y", Type: "bytes32"}}, (32 * 2) * 2},
|
||||
// simple tuple
|
||||
{"tuple", []ArgumentMarshaling{{Name: "x", Type: "uint256"}, {Name: "y", Type: "uint256"}}, 32 * 2},
|
||||
// tuple array
|
||||
{"tuple", []ArgumentMarshaling{{Name: "x", Type: "bytes32[2]"}}, 32 * 2},
|
||||
// tuple tuple
|
||||
{"tuple", []ArgumentMarshaling{{Name: "x", Type: "tuple", Components: []ArgumentMarshaling{{Name: "x", Type: "bytes32"}}}}, 32},
|
||||
{"tuple", []ArgumentMarshaling{{Name: "x", Type: "tuple", Components: []ArgumentMarshaling{{Name: "x", Type: "bytes32[2]"}, {Name: "y", Type: "uint256"}}}}, 32 * (2 + 1)},
|
||||
}
|
||||
|
||||
for i, data := range testCases {
|
||||
typ, err := NewType(data.typ, "", data.components)
|
||||
if err != nil {
|
||||
t.Errorf("type %q: failed to parse type string: %v", data.typ, err)
|
||||
}
|
||||
|
||||
result := getTypeSize(typ)
|
||||
if result != data.typSize {
|
||||
t.Errorf("case %d type %q: get type size error: actual: %d expected: %d", i, data.typ, result, data.typSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -26,13 +26,13 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// MaxUint256 is the maximum value that can be represented by a uint256
|
||||
// MaxUint256 is the maximum value that can be represented by a uint256.
|
||||
MaxUint256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 256), common.Big1)
|
||||
// MaxInt256 is the maximum value that can be represented by a int256
|
||||
// MaxInt256 is the maximum value that can be represented by a int256.
|
||||
MaxInt256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 255), common.Big1)
|
||||
)
|
||||
|
||||
// ReadInteger reads the integer based on its kind and returns the appropriate value
|
||||
// ReadInteger reads the integer based on its kind and returns the appropriate value.
|
||||
func ReadInteger(typ Type, b []byte) interface{} {
|
||||
if typ.T == UintTy {
|
||||
switch typ.Size {
|
||||
@@ -73,7 +73,7 @@ func ReadInteger(typ Type, b []byte) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
// reads a bool
|
||||
// readBool reads a bool.
|
||||
func readBool(word []byte) (bool, error) {
|
||||
for _, b := range word[:31] {
|
||||
if b != 0 {
|
||||
@@ -91,7 +91,8 @@ func readBool(word []byte) (bool, error) {
|
||||
}
|
||||
|
||||
// A function type is simply the address with the function selection signature at the end.
|
||||
// This enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
|
||||
//
|
||||
// readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
|
||||
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
||||
if t.T != FunctionTy {
|
||||
return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array")
|
||||
@@ -104,7 +105,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// ReadFixedBytes uses reflection to create a fixed array to be read from
|
||||
// ReadFixedBytes uses reflection to create a fixed array to be read from.
|
||||
func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
if t.T != FixedBytesTy {
|
||||
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
|
||||
@@ -117,7 +118,7 @@ func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
|
||||
}
|
||||
|
||||
// iteratively unpack elements
|
||||
// forEachUnpack iteratively unpack elements.
|
||||
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
||||
if size < 0 {
|
||||
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
|
||||
@@ -224,7 +225,10 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
return forEachUnpack(t, output[begin:], 0, length)
|
||||
case ArrayTy:
|
||||
if isDynamicType(*t.Elem) {
|
||||
offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]))
|
||||
offset := binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:])
|
||||
if offset > uint64(len(output)) {
|
||||
return nil, fmt.Errorf("abi: toGoType offset greater than output length: offset: %d, len(output): %d", offset, len(output))
|
||||
}
|
||||
return forEachUnpack(t, output[offset:], 0, t.Size)
|
||||
}
|
||||
return forEachUnpack(t, output[index:], 0, t.Size)
|
||||
@@ -249,7 +253,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
|
||||
// lengthPrefixPointsTo interprets a 32 byte slice as an offset and then determines which indices to look to decode the type.
|
||||
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
|
||||
bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32])
|
||||
bigOffsetEnd.Add(bigOffsetEnd, common.Big32)
|
||||
|
@@ -44,15 +44,13 @@ func TestUnpack(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex %s: %v", test.packed, err)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.unpacked))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
out, err := abi.Unpack("method", encb)
|
||||
if err != nil {
|
||||
t.Errorf("test %d (%v) failed: %v", i, test.def, err)
|
||||
return
|
||||
}
|
||||
out := outptr.Elem().Interface()
|
||||
if !reflect.DeepEqual(test.unpacked, out) {
|
||||
t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.unpacked, out)
|
||||
if !reflect.DeepEqual(test.unpacked, ConvertType(out[0], test.unpacked)) {
|
||||
t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.unpacked, out[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -221,7 +219,7 @@ func TestLocalUnpackTests(t *testing.T) {
|
||||
t.Fatalf("invalid hex %s: %v", test.enc, err)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.want))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
err = abi.UnpackIntoInterface(outptr.Interface(), "method", encb)
|
||||
if err := test.checkError(err); err != nil {
|
||||
t.Errorf("test %d (%v) failed: %v", i, test.def, err)
|
||||
return
|
||||
@@ -234,7 +232,7 @@ func TestLocalUnpackTests(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetDynamicArrayOutput(t *testing.T) {
|
||||
func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -249,7 +247,7 @@ func TestUnpackSetDynamicArrayOutput(t *testing.T) {
|
||||
)
|
||||
|
||||
// test 32
|
||||
err = abi.Unpack(&out32, "testDynamicFixedBytes32", marshalledReturn32)
|
||||
err = abi.UnpackIntoInterface(&out32, "testDynamicFixedBytes32", marshalledReturn32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -266,7 +264,7 @@ func TestUnpackSetDynamicArrayOutput(t *testing.T) {
|
||||
}
|
||||
|
||||
// test 15
|
||||
err = abi.Unpack(&out15, "testDynamicFixedBytes32", marshalledReturn15)
|
||||
err = abi.UnpackIntoInterface(&out15, "testDynamicFixedBytes32", marshalledReturn15)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -367,7 +365,7 @@ func TestMethodMultiReturn(t *testing.T) {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
err := abi.Unpack(tc.dest, "multi", data)
|
||||
err := abi.UnpackIntoInterface(tc.dest, "multi", data)
|
||||
if tc.error == "" {
|
||||
require.Nil(err, "Should be able to unpack method outputs.")
|
||||
require.Equal(tc.expected, tc.dest)
|
||||
@@ -390,7 +388,7 @@ func TestMultiReturnWithArray(t *testing.T) {
|
||||
|
||||
ret1, ret1Exp := new([3]uint64), [3]uint64{9, 9, 9}
|
||||
ret2, ret2Exp := new(uint64), uint64(8)
|
||||
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||
@@ -414,7 +412,7 @@ func TestMultiReturnWithStringArray(t *testing.T) {
|
||||
ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f")
|
||||
ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"}
|
||||
ret4, ret4Exp := new(bool), false
|
||||
if err := abi.Unpack(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil {
|
||||
if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||
@@ -452,7 +450,7 @@ func TestMultiReturnWithStringSlice(t *testing.T) {
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value
|
||||
ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"}
|
||||
ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)}
|
||||
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||
@@ -492,7 +490,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
||||
{{0x411, 0x412, 0x413}, {0x421, 0x422, 0x423}},
|
||||
}
|
||||
ret2, ret2Exp := new(uint64), uint64(0x9876)
|
||||
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||
@@ -531,7 +529,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000a"))
|
||||
buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000"))
|
||||
|
||||
err = abi.Unpack(&mixedBytes, "mixedBytes", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&mixedBytes, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
@@ -546,7 +544,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
|
||||
// marshal int
|
||||
var Int *big.Int
|
||||
err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
err = abi.UnpackIntoInterface(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -557,7 +555,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
|
||||
// marshal bool
|
||||
var Bool bool
|
||||
err = abi.Unpack(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
err = abi.UnpackIntoInterface(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -574,7 +572,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var Bytes []byte
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -590,7 +588,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -606,7 +604,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -616,7 +614,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
}
|
||||
|
||||
// marshal dynamic bytes output empty
|
||||
err = abi.Unpack(&Bytes, "bytes", nil)
|
||||
err = abi.UnpackIntoInterface(&Bytes, "bytes", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
@@ -627,7 +625,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -641,7 +639,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
var hash common.Hash
|
||||
err = abi.Unpack(&hash, "fixed", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&hash, "fixed", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -654,12 +652,12 @@ func TestUnmarshal(t *testing.T) {
|
||||
// marshal error
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
err = abi.Unpack(&Bytes, "multi", make([]byte, 64))
|
||||
err = abi.UnpackIntoInterface(&Bytes, "multi", make([]byte, 64))
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
@@ -670,7 +668,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
// marshal int array
|
||||
var intArray [3]*big.Int
|
||||
err = abi.Unpack(&intArray, "intArraySingle", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&intArray, "intArraySingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -691,7 +689,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
|
||||
var outAddr []common.Address
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
@@ -718,7 +716,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
A []common.Address
|
||||
B []common.Address
|
||||
}
|
||||
err = abi.Unpack(&outAddrStruct, "addressSliceDouble", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&outAddrStruct, "addressSliceDouble", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
@@ -746,7 +744,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100"))
|
||||
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Fatal("expected error:", err)
|
||||
}
|
||||
@@ -769,7 +767,7 @@ func TestUnpackTuple(t *testing.T) {
|
||||
B *big.Int
|
||||
}{new(big.Int), new(big.Int)}
|
||||
|
||||
err = abi.Unpack(&v, "tuple", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&v, "tuple", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
@@ -841,7 +839,7 @@ func TestUnpackTuple(t *testing.T) {
|
||||
A: big.NewInt(1),
|
||||
}
|
||||
|
||||
err = abi.Unpack(&ret, "tuple", buff.Bytes())
|
||||
err = abi.UnpackIntoInterface(&ret, "tuple", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@@ -88,7 +88,7 @@ type Wallet interface {
|
||||
// to discover non zero accounts and automatically add them to list of tracked
|
||||
// accounts.
|
||||
//
|
||||
// Note, self derivaton will increment the last component of the specified path
|
||||
// Note, self derivation will increment the last component of the specified path
|
||||
// opposed to decending into a child path to allow discovering accounts starting
|
||||
// from non zero components.
|
||||
//
|
||||
|
@@ -32,7 +32,7 @@ import (
|
||||
type fileCache struct {
|
||||
all mapset.Set // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
mu sync.RWMutex
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// scan performs a new scan on the given directory, compares against the already
|
||||
|
@@ -336,7 +336,9 @@ func TestWalletNotifications(t *testing.T) {
|
||||
|
||||
// Shut down the event collector and check events.
|
||||
sub.Unsubscribe()
|
||||
<-updates
|
||||
for ev := range updates {
|
||||
events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]})
|
||||
}
|
||||
checkAccounts(t, live, ks.Wallets())
|
||||
checkEvents(t, wantEvents, events)
|
||||
}
|
||||
|
@@ -637,7 +637,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
||||
// to discover non zero accounts and automatically add them to list of tracked
|
||||
// accounts.
|
||||
//
|
||||
// Note, self derivaton will increment the last component of the specified path
|
||||
// Note, self derivation will increment the last component of the specified path
|
||||
// opposed to decending into a child path to allow discovering accounts starting
|
||||
// from non zero components.
|
||||
//
|
||||
|
@@ -368,18 +368,22 @@ func (w *wallet) selfDerive() {
|
||||
w.log.Warn("USB wallet nonce retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
// If the next account is empty, stop self-derivation, but add for the last base path
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
// unless the account was empty.
|
||||
path := make(accounts.DerivationPath, len(nextPaths[i]))
|
||||
copy(path[:], nextPaths[i][:])
|
||||
if balance.Sign() == 0 && nonce == 0 {
|
||||
empty = true
|
||||
// If it indeed was empty, make a log output for it anyway. In the case
|
||||
// of legacy-ledger, the first account on the legacy-path will
|
||||
// be shown to the user, even if we don't actively track it
|
||||
if i < len(nextAddrs)-1 {
|
||||
w.log.Info("Skipping trakcking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track",
|
||||
"path", path, "address", nextAddrs[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
path := make(accounts.DerivationPath, len(nextPaths[i]))
|
||||
copy(path[:], nextPaths[i][:])
|
||||
paths = append(paths, path)
|
||||
|
||||
account := accounts.Account{
|
||||
Address: nextAddrs[i],
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
@@ -489,7 +493,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
||||
// to discover non zero accounts and automatically add them to list of tracked
|
||||
// accounts.
|
||||
//
|
||||
// Note, self derivaton will increment the last component of the specified path
|
||||
// Note, self derivation will increment the last component of the specified path
|
||||
// opposed to decending into a child path to allow discovering accounts starting
|
||||
// from non zero components.
|
||||
//
|
||||
|
@@ -24,8 +24,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.14.2.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.14.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.15.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.15.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# This file contains sha256 checksums of optional build dependencies.
|
||||
|
||||
98de84e69726a66da7b4e58eac41b99cbe274d7e8906eeb8a5b7eb0aadee7f7c go1.14.2.src.tar.gz
|
||||
69438f7ed4f532154ffaf878f3dfd83747e7a00b70b3556eddabf7aaee28ac3a go1.15.src.tar.gz
|
||||
|
||||
d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35 golangci-lint-1.27.0-linux-armv7.tar.gz
|
||||
bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint.exe-1.27.0-windows-amd64.zip
|
||||
|
20
build/ci.go
20
build/ci.go
@@ -79,7 +79,6 @@ var (
|
||||
executablePath("geth"),
|
||||
executablePath("puppeth"),
|
||||
executablePath("rlpdump"),
|
||||
executablePath("wnode"),
|
||||
executablePath("clef"),
|
||||
}
|
||||
|
||||
@@ -109,10 +108,6 @@ var (
|
||||
BinaryName: "rlpdump",
|
||||
Description: "Developer utility tool that prints RLP structures.",
|
||||
},
|
||||
{
|
||||
BinaryName: "wnode",
|
||||
Description: "Ethereum Whisper diagnostic tool",
|
||||
},
|
||||
{
|
||||
BinaryName: "clef",
|
||||
Description: "Ethereum account management tool.",
|
||||
@@ -139,13 +134,14 @@ var (
|
||||
// Note: zesty is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: artful is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: cosmic is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: disco is unsupported because it was officially deprecated on Launchpad.
|
||||
debDistroGoBoots = map[string]string{
|
||||
"trusty": "golang-1.11",
|
||||
"xenial": "golang-go",
|
||||
"bionic": "golang-go",
|
||||
"disco": "golang-go",
|
||||
"eoan": "golang-go",
|
||||
"focal": "golang-go",
|
||||
"groovy": "golang-go",
|
||||
}
|
||||
|
||||
debGoBootPaths = map[string]string{
|
||||
@@ -215,9 +211,9 @@ func doInstall(cmdline []string) {
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if minor < 11 {
|
||||
if minor < 13 {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.11 and cannot")
|
||||
log.Println("go-ethereum requires at least Go version 1.13 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -233,6 +229,7 @@ func doInstall(cmdline []string) {
|
||||
if runtime.GOARCH == "arm64" {
|
||||
goinstall.Args = append(goinstall.Args, "-p", "1")
|
||||
}
|
||||
goinstall.Args = append(goinstall.Args, "-trimpath")
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
goinstall.Args = append(goinstall.Args, packages...)
|
||||
build.MustRun(goinstall)
|
||||
@@ -241,6 +238,7 @@ func doInstall(cmdline []string) {
|
||||
|
||||
// Seems we are cross compiling, work around forbidden GOBIN
|
||||
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
||||
goinstall.Args = append(goinstall.Args, "-trimpath")
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...)
|
||||
goinstall.Args = append(goinstall.Args, packages...)
|
||||
@@ -838,6 +836,7 @@ func doAndroidArchive(cmdline []string) {
|
||||
if *local {
|
||||
// If we're building locally, copy bundle to build dir and skip Maven
|
||||
os.Rename("geth.aar", filepath.Join(GOBIN, "geth.aar"))
|
||||
os.Rename("geth-sources.jar", filepath.Join(GOBIN, "geth-sources.jar"))
|
||||
return
|
||||
}
|
||||
meta := newMavenMetadata(env)
|
||||
@@ -884,11 +883,12 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd {
|
||||
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||
continue
|
||||
}
|
||||
cmd.Env = append(cmd.Env, e)
|
||||
}
|
||||
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -957,7 +957,7 @@ func doXCodeFramework(cmdline []string) {
|
||||
|
||||
if *local {
|
||||
// If we're building locally, use the build folder and stop afterwards
|
||||
bind.Dir, _ = filepath.Abs(GOBIN)
|
||||
bind.Dir = GOBIN
|
||||
build.MustRun(bind)
|
||||
return
|
||||
}
|
||||
|
@@ -44,7 +44,7 @@ func main() {
|
||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
|
||||
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
||||
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
||||
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-9)")
|
||||
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)")
|
||||
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
||||
|
||||
nodeKey *ecdsa.PrivateKey
|
||||
|
@@ -94,7 +94,7 @@ with minimal requirements.
|
||||
On the `client` qube, we need to create a listener which will receive the request from the Dapp, and proxy it.
|
||||
|
||||
|
||||
[qubes-client.py](qubes/client/qubes-client.py):
|
||||
[qubes-client.py](qubes/qubes-client.py):
|
||||
|
||||
```python
|
||||
|
||||
|
@@ -10,6 +10,64 @@ TL;DR: Given a version number MAJOR.MINOR.PATCH, increment the:
|
||||
|
||||
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
|
||||
|
||||
### 6.1.0
|
||||
|
||||
The API-method `account_signGnosisSafeTx` was added. This method takes two parameters,
|
||||
`[address, safeTx]`. The latter, `safeTx`, can be copy-pasted from the gnosis relay. For example:
|
||||
|
||||
```
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_signGnosisSafeTx",
|
||||
"params": ["0xfd1c4226bfD1c436672092F4eCbfC270145b7256",
|
||||
{
|
||||
"safe": "0x25a6c4BBd32B2424A9c99aEB0584Ad12045382B3",
|
||||
"to": "0xB372a646f7F05Cc1785018dBDA7EBc734a2A20E2",
|
||||
"value": "20000000000000000",
|
||||
"data": null,
|
||||
"operation": 0,
|
||||
"gasToken": "0x0000000000000000000000000000000000000000",
|
||||
"safeTxGas": 27845,
|
||||
"baseGas": 0,
|
||||
"gasPrice": "0",
|
||||
"refundReceiver": "0x0000000000000000000000000000000000000000",
|
||||
"nonce": 2,
|
||||
"executionDate": null,
|
||||
"submissionDate": "2020-09-15T21:54:49.617634Z",
|
||||
"modified": "2020-09-15T21:54:49.617634Z",
|
||||
"blockNumber": null,
|
||||
"transactionHash": null,
|
||||
"safeTxHash": "0x2edfbd5bc113ff18c0631595db32eb17182872d88d9bf8ee4d8c2dd5db6d95e2",
|
||||
"executor": null,
|
||||
"isExecuted": false,
|
||||
"isSuccessful": null,
|
||||
"ethGasPrice": null,
|
||||
"gasUsed": null,
|
||||
"fee": null,
|
||||
"origin": null,
|
||||
"dataDecoded": null,
|
||||
"confirmationsRequired": null,
|
||||
"confirmations": [
|
||||
{
|
||||
"owner": "0xAd2e180019FCa9e55CADe76E4487F126Fd08DA34",
|
||||
"submissionDate": "2020-09-15T21:54:49.663299Z",
|
||||
"transactionHash": null,
|
||||
"confirmationType": "CONFIRMATION",
|
||||
"signature": "0x95a7250bb645f831c86defc847350e7faff815b2fb586282568e96cc859e39315876db20a2eed5f7a0412906ec5ab57652a6f645ad4833f345bda059b9da2b821c",
|
||||
"signatureType": "EOA"
|
||||
}
|
||||
],
|
||||
"signatures": null
|
||||
}
|
||||
],
|
||||
"id": 67
|
||||
}
|
||||
```
|
||||
|
||||
Not all fields are required, though. This method is really just a UX helper, which massages the
|
||||
input to conform to the `EIP-712` [specification](https://docs.gnosis.io/safe/docs/contracts_tx_execution/#transaction-hash)
|
||||
for the Gnosis Safe, and making the output be directly importable to by a relay service.
|
||||
|
||||
|
||||
### 6.0.0
|
||||
|
||||
|
@@ -29,7 +29,6 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"os/signal"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
@@ -666,8 +665,8 @@ func signer(c *cli.Context) error {
|
||||
Version: "1.0"},
|
||||
}
|
||||
if c.GlobalBool(utils.HTTPEnabledFlag.Name) {
|
||||
vhosts := splitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name))
|
||||
cors := splitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name))
|
||||
vhosts := utils.SplitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name))
|
||||
cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name))
|
||||
|
||||
srv := rpc.NewServer()
|
||||
err := node.RegisterApisFromWhitelist(rpcAPI, []string{"account"}, srv, false)
|
||||
@@ -736,21 +735,11 @@ func signer(c *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// splitAndTrim splits input separated by a comma
|
||||
// and trims excessive white space from the substrings.
|
||||
func splitAndTrim(input string) []string {
|
||||
result := strings.Split(input, ",")
|
||||
for i, r := range result {
|
||||
result[i] = strings.TrimSpace(r)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// DefaultConfigDir is the default config directory to use for the vaults and other
|
||||
// persistence requirements.
|
||||
func DefaultConfigDir() string {
|
||||
// Try to place the data folder in the user's home dir
|
||||
home := homeDir()
|
||||
home := utils.HomeDir()
|
||||
if home != "" {
|
||||
if runtime.GOOS == "darwin" {
|
||||
return filepath.Join(home, "Library", "Signer")
|
||||
@@ -769,15 +758,6 @@ func DefaultConfigDir() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func homeDir() string {
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
return home
|
||||
}
|
||||
if usr, err := user.Current(); err == nil {
|
||||
return usr.HomeDir
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) {
|
||||
var (
|
||||
file string
|
||||
|
86
cmd/devp2p/README.md
Normal file
86
cmd/devp2p/README.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# The devp2p command
|
||||
|
||||
The devp2p command line tool is a utility for low-level peer-to-peer debugging and
|
||||
protocol development purposes. It can do many things.
|
||||
|
||||
### ENR Decoding
|
||||
|
||||
Use `devp2p enrdump <base64>` to verify and display an Ethereum Node Record.
|
||||
|
||||
### Node Key Management
|
||||
|
||||
The `devp2p key ...` command family deals with node key files.
|
||||
|
||||
Run `devp2p key generate mynode.key` to create a new node key in the `mynode.key` file.
|
||||
|
||||
Run `devp2p key to-enode mynode.key -ip 127.0.0.1 -tcp 30303` to create an enode:// URL
|
||||
corresponding to the given node key and address information.
|
||||
|
||||
### Maintaining DNS Discovery Node Lists
|
||||
|
||||
The devp2p command can create and publish DNS discovery node lists.
|
||||
|
||||
Run `devp2p dns sign <directory>` to update the signature of a DNS discovery tree.
|
||||
|
||||
Run `devp2p dns sync <enrtree-URL>` to download a complete DNS discovery tree.
|
||||
|
||||
Run `devp2p dns to-cloudflare <directory>` to publish a tree to CloudFlare DNS.
|
||||
|
||||
Run `devp2p dns to-route53 <directory>` to publish a tree to Amazon Route53.
|
||||
|
||||
You can find more information about these commands in the [DNS Discovery Setup Guide][dns-tutorial].
|
||||
|
||||
### Discovery v4 Utilities
|
||||
|
||||
The `devp2p discv4 ...` command family deals with the [Node Discovery v4][discv4]
|
||||
protocol.
|
||||
|
||||
Run `devp2p discv4 ping <enode/ENR>` to ping a node.
|
||||
|
||||
Run `devp2p discv4 resolve <enode/ENR>` to find the most recent node record of a node in
|
||||
the DHT.
|
||||
|
||||
Run `devp2p discv4 crawl <nodes.json path>` to create or update a JSON node set.
|
||||
|
||||
### Discovery v5 Utilities
|
||||
|
||||
The `devp2p discv5 ...` command family deals with the [Node Discovery v5][discv5]
|
||||
protocol. This protocol is currently under active development.
|
||||
|
||||
Run `devp2p discv5 ping <ENR>` to ping a node.
|
||||
|
||||
Run `devp2p discv5 resolve <ENR>` to find the most recent node record of a node in
|
||||
the discv5 DHT.
|
||||
|
||||
Run `devp2p discv5 listen` to run a Discovery v5 node.
|
||||
|
||||
Run `devp2p discv5 crawl <nodes.json path>` to create or update a JSON node set containing
|
||||
discv5 nodes.
|
||||
|
||||
### Discovery Test Suites
|
||||
|
||||
The devp2p command also contains interactive test suites for Discovery v4 and Discovery
|
||||
v5.
|
||||
|
||||
To run these tests against your implementation, you need to set up a networking
|
||||
environment where two separate UDP listening addresses are available on the same machine.
|
||||
The two listening addresses must also be routed such that they are able to reach the node
|
||||
you want to test.
|
||||
|
||||
For example, if you want to run the test on your local host, and the node under test is
|
||||
also on the local host, you need to assign two IP addresses (or a larger range) to your
|
||||
loopback interface. On macOS, this can be done by executing the following command:
|
||||
|
||||
sudo ifconfig lo0 add 127.0.0.2
|
||||
|
||||
You can now run either test suite as follows: Start the node under test first, ensuring
|
||||
that it won't talk to the Internet (i.e. disable bootstrapping). An easy way to prevent
|
||||
unintended connections to the global DHT is listening on `127.0.0.1`.
|
||||
|
||||
Now get the ENR of your node and store it in the `NODE` environment variable.
|
||||
|
||||
Start the test by running `devp2p discv5 test -listen1 127.0.0.1 -listen2 127.0.0.2 $NODE`.
|
||||
|
||||
[dns-tutorial]: https://geth.ethereum.org/docs/developers/dns-discovery-setup
|
||||
[discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md
|
||||
[discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md
|
@@ -286,7 +286,11 @@ func listen(ln *enode.LocalNode, addr string) *net.UDPConn {
|
||||
}
|
||||
usocket := socket.(*net.UDPConn)
|
||||
uaddr := socket.LocalAddr().(*net.UDPAddr)
|
||||
ln.SetFallbackIP(net.IP{127, 0, 0, 1})
|
||||
if uaddr.IP.IsUnspecified() {
|
||||
ln.SetFallbackIP(net.IP{127, 0, 0, 1})
|
||||
} else {
|
||||
ln.SetFallbackIP(uaddr.IP)
|
||||
}
|
||||
ln.SetFallbackUDP(uaddr.Port)
|
||||
return usocket
|
||||
}
|
||||
@@ -294,7 +298,11 @@ func listen(ln *enode.LocalNode, addr string) *net.UDPConn {
|
||||
func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
||||
s := params.RinkebyBootnodes
|
||||
if ctx.IsSet(bootnodesFlag.Name) {
|
||||
s = strings.Split(ctx.String(bootnodesFlag.Name), ",")
|
||||
input := ctx.String(bootnodesFlag.Name)
|
||||
if input == "" {
|
||||
return nil, nil
|
||||
}
|
||||
s = strings.Split(input, ",")
|
||||
}
|
||||
nodes := make([]*enode.Node, len(s))
|
||||
var err error
|
||||
|
@@ -18,9 +18,13 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
@@ -33,6 +37,7 @@ var (
|
||||
discv5PingCommand,
|
||||
discv5ResolveCommand,
|
||||
discv5CrawlCommand,
|
||||
discv5TestCommand,
|
||||
discv5ListenCommand,
|
||||
},
|
||||
}
|
||||
@@ -53,6 +58,12 @@ var (
|
||||
Action: discv5Crawl,
|
||||
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
||||
}
|
||||
discv5TestCommand = cli.Command{
|
||||
Name: "test",
|
||||
Usage: "Runs protocol tests against a node",
|
||||
Action: discv5Test,
|
||||
Flags: []cli.Flag{testPatternFlag, testListen1Flag, testListen2Flag},
|
||||
}
|
||||
discv5ListenCommand = cli.Command{
|
||||
Name: "listen",
|
||||
Usage: "Runs a node",
|
||||
@@ -103,6 +114,30 @@ func discv5Crawl(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func discv5Test(ctx *cli.Context) error {
|
||||
// Disable logging unless explicitly enabled.
|
||||
if !ctx.GlobalIsSet("verbosity") && !ctx.GlobalIsSet("vmodule") {
|
||||
log.Root().SetHandler(log.DiscardHandler())
|
||||
}
|
||||
|
||||
// Filter and run test cases.
|
||||
suite := &v5test.Suite{
|
||||
Dest: getNodeArg(ctx),
|
||||
Listen1: ctx.String(testListen1Flag.Name),
|
||||
Listen2: ctx.String(testListen2Flag.Name),
|
||||
}
|
||||
tests := suite.AllTests()
|
||||
if ctx.IsSet(testPatternFlag.Name) {
|
||||
tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name))
|
||||
}
|
||||
results := utesting.RunTests(tests, os.Stdout)
|
||||
if fails := utesting.CountFailures(results); fails > 0 {
|
||||
return fmt.Errorf("%v/%v tests passed.", len(tests)-fails, len(tests))
|
||||
}
|
||||
fmt.Printf("%v/%v passed\n", len(tests), len(tests))
|
||||
return nil
|
||||
}
|
||||
|
||||
func discv5Listen(ctx *cli.Context) error {
|
||||
disc := startV5(ctx)
|
||||
defer disc.Close()
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
@@ -69,22 +70,30 @@ func enrdump(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("INVALID: %v", err)
|
||||
}
|
||||
fmt.Print(dumpRecord(r))
|
||||
dumpRecord(os.Stdout, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
// dumpRecord creates a human-readable description of the given node record.
|
||||
func dumpRecord(r *enr.Record) string {
|
||||
out := new(bytes.Buffer)
|
||||
if n, err := enode.New(enode.ValidSchemes, r); err != nil {
|
||||
func dumpRecord(out io.Writer, r *enr.Record) {
|
||||
n, err := enode.New(enode.ValidSchemes, r)
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, "INVALID: %v\n", err)
|
||||
} else {
|
||||
fmt.Fprintf(out, "Node ID: %v\n", n.ID())
|
||||
dumpNodeURL(out, n)
|
||||
}
|
||||
kv := r.AppendElements(nil)[1:]
|
||||
fmt.Fprintf(out, "Record has sequence number %d and %d key/value pairs.\n", r.Seq(), len(kv)/2)
|
||||
fmt.Fprint(out, dumpRecordKV(kv, 2))
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func dumpNodeURL(out io.Writer, n *enode.Node) {
|
||||
var key enode.Secp256k1
|
||||
if n.Load(&key) != nil {
|
||||
return // no secp256k1 public key
|
||||
}
|
||||
fmt.Fprintf(out, "URLv4: %s\n", n.URLv4())
|
||||
}
|
||||
|
||||
func dumpRecordKV(kv []interface{}, indent int) string {
|
||||
|
166
cmd/devp2p/internal/ethtest/chain.go
Normal file
166
cmd/devp2p/internal/ethtest/chain.go
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethtest
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
type Chain struct {
|
||||
blocks []*types.Block
|
||||
chainConfig *params.ChainConfig
|
||||
}
|
||||
|
||||
func (c *Chain) WriteTo(writer io.Writer) error {
|
||||
for _, block := range c.blocks {
|
||||
if err := rlp.Encode(writer, block); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len returns the length of the chain.
|
||||
func (c *Chain) Len() int {
|
||||
return len(c.blocks)
|
||||
}
|
||||
|
||||
// TD calculates the total difficulty of the chain.
|
||||
func (c *Chain) TD(height int) *big.Int { // TODO later on channge scheme so that the height is included in range
|
||||
sum := big.NewInt(0)
|
||||
for _, block := range c.blocks[:height] {
|
||||
sum.Add(sum, block.Difficulty())
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// ForkID gets the fork id of the chain.
|
||||
func (c *Chain) ForkID() forkid.ID {
|
||||
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
||||
}
|
||||
|
||||
// Shorten returns a copy chain of a desired height from the imported
|
||||
func (c *Chain) Shorten(height int) *Chain {
|
||||
blocks := make([]*types.Block, height)
|
||||
copy(blocks, c.blocks[:height])
|
||||
|
||||
config := *c.chainConfig
|
||||
return &Chain{
|
||||
blocks: blocks,
|
||||
chainConfig: &config,
|
||||
}
|
||||
}
|
||||
|
||||
// Head returns the chain head.
|
||||
func (c *Chain) Head() *types.Block {
|
||||
return c.blocks[c.Len()-1]
|
||||
}
|
||||
|
||||
func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) {
|
||||
if req.Amount < 1 {
|
||||
return nil, fmt.Errorf("no block headers requested")
|
||||
}
|
||||
|
||||
headers := make(BlockHeaders, req.Amount)
|
||||
var blockNumber uint64
|
||||
|
||||
// range over blocks to check if our chain has the requested header
|
||||
for _, block := range c.blocks {
|
||||
if block.Hash() == req.Origin.Hash || block.Number().Uint64() == req.Origin.Number {
|
||||
headers[0] = block.Header()
|
||||
blockNumber = block.Number().Uint64()
|
||||
}
|
||||
}
|
||||
if headers[0] == nil {
|
||||
return nil, fmt.Errorf("no headers found for given origin number %v, hash %v", req.Origin.Number, req.Origin.Hash)
|
||||
}
|
||||
|
||||
if req.Reverse {
|
||||
for i := 1; i < int(req.Amount); i++ {
|
||||
blockNumber -= (1 - req.Skip)
|
||||
headers[i] = c.blocks[blockNumber].Header()
|
||||
|
||||
}
|
||||
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
for i := 1; i < int(req.Amount); i++ {
|
||||
blockNumber += (1 + req.Skip)
|
||||
headers[i] = c.blocks[blockNumber].Header()
|
||||
}
|
||||
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// loadChain takes the given chain.rlp file, and decodes and returns
|
||||
// the blocks from the file.
|
||||
func loadChain(chainfile string, genesis string) (*Chain, error) {
|
||||
// Open the file handle and potentially unwrap the gzip stream
|
||||
fh, err := os.Open(chainfile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
var reader io.Reader = fh
|
||||
if strings.HasSuffix(chainfile, ".gz") {
|
||||
if reader, err = gzip.NewReader(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
stream := rlp.NewStream(reader, 0)
|
||||
var blocks []*types.Block
|
||||
for i := 0; ; i++ {
|
||||
var b types.Block
|
||||
if err := stream.Decode(&b); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("at block %d: %v", i, err)
|
||||
}
|
||||
blocks = append(blocks, &b)
|
||||
}
|
||||
|
||||
// Open the file handle and potentially unwrap the gzip stream
|
||||
chainConfig, err := ioutil.ReadFile(genesis)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var gen core.Genesis
|
||||
if err := json.Unmarshal(chainConfig, &gen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Chain{
|
||||
blocks: blocks,
|
||||
chainConfig: gen.Config,
|
||||
}, nil
|
||||
}
|
150
cmd/devp2p/internal/ethtest/chain_test.go
Normal file
150
cmd/devp2p/internal/ethtest/chain_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethtest
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestEthProtocolNegotiation tests whether the test suite
|
||||
// can negotiate the highest eth protocol in a status message exchange
|
||||
func TestEthProtocolNegotiation(t *testing.T) {
|
||||
var tests = []struct {
|
||||
conn *Conn
|
||||
caps []p2p.Cap
|
||||
expected uint32
|
||||
}{
|
||||
{
|
||||
conn: &Conn{},
|
||||
caps: []p2p.Cap{
|
||||
{Name: "eth", Version: 63},
|
||||
{Name: "eth", Version: 64},
|
||||
{Name: "eth", Version: 65},
|
||||
},
|
||||
expected: uint32(65),
|
||||
},
|
||||
{
|
||||
conn: &Conn{},
|
||||
caps: []p2p.Cap{
|
||||
{Name: "eth", Version: 0},
|
||||
{Name: "eth", Version: 89},
|
||||
{Name: "eth", Version: 65},
|
||||
},
|
||||
expected: uint32(65),
|
||||
},
|
||||
{
|
||||
conn: &Conn{},
|
||||
caps: []p2p.Cap{
|
||||
{Name: "eth", Version: 63},
|
||||
{Name: "eth", Version: 64},
|
||||
{Name: "wrongProto", Version: 65},
|
||||
},
|
||||
expected: uint32(64),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
tt.conn.negotiateEthProtocol(tt.caps)
|
||||
assert.Equal(t, tt.expected, uint32(tt.conn.ethProtocolVersion))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestChain_GetHeaders tests whether the test suite can correctly
|
||||
// respond to a GetBlockHeaders request from a node.
|
||||
func TestChain_GetHeaders(t *testing.T) {
|
||||
chainFile, err := filepath.Abs("./testdata/chain.rlp.gz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesisFile, err := filepath.Abs("./testdata/genesis.json")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chain, err := loadChain(chainFile, genesisFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var tests = []struct {
|
||||
req GetBlockHeaders
|
||||
expected BlockHeaders
|
||||
}{
|
||||
{
|
||||
req: GetBlockHeaders{
|
||||
Origin: hashOrNumber{
|
||||
Number: uint64(2),
|
||||
},
|
||||
Amount: uint64(5),
|
||||
Skip: 1,
|
||||
Reverse: false,
|
||||
},
|
||||
expected: BlockHeaders{
|
||||
chain.blocks[2].Header(),
|
||||
chain.blocks[4].Header(),
|
||||
chain.blocks[6].Header(),
|
||||
chain.blocks[8].Header(),
|
||||
chain.blocks[10].Header(),
|
||||
},
|
||||
},
|
||||
{
|
||||
req: GetBlockHeaders{
|
||||
Origin: hashOrNumber{
|
||||
Number: uint64(chain.Len() - 1),
|
||||
},
|
||||
Amount: uint64(3),
|
||||
Skip: 0,
|
||||
Reverse: true,
|
||||
},
|
||||
expected: BlockHeaders{
|
||||
chain.blocks[chain.Len()-1].Header(),
|
||||
chain.blocks[chain.Len()-2].Header(),
|
||||
chain.blocks[chain.Len()-3].Header(),
|
||||
},
|
||||
},
|
||||
{
|
||||
req: GetBlockHeaders{
|
||||
Origin: hashOrNumber{
|
||||
Hash: chain.Head().Hash(),
|
||||
},
|
||||
Amount: uint64(1),
|
||||
Skip: 0,
|
||||
Reverse: false,
|
||||
},
|
||||
expected: BlockHeaders{
|
||||
chain.Head().Header(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
headers, err := chain.GetHeaders(tt.req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, headers, tt.expected)
|
||||
})
|
||||
}
|
||||
}
|
216
cmd/devp2p/internal/ethtest/suite.go
Normal file
216
cmd/devp2p/internal/ethtest/suite.go
Normal file
@@ -0,0 +1,216 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethtest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Suite represents a structure used to test the eth
|
||||
// protocol of a node(s).
|
||||
type Suite struct {
|
||||
Dest *enode.Node
|
||||
|
||||
chain *Chain
|
||||
fullChain *Chain
|
||||
}
|
||||
|
||||
// NewSuite creates and returns a new eth-test suite that can
|
||||
// be used to test the given node against the given blockchain
|
||||
// data.
|
||||
func NewSuite(dest *enode.Node, chainfile string, genesisfile string) *Suite {
|
||||
chain, err := loadChain(chainfile, genesisfile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &Suite{
|
||||
Dest: dest,
|
||||
chain: chain.Shorten(1000),
|
||||
fullChain: chain,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Suite) AllTests() []utesting.Test {
|
||||
return []utesting.Test{
|
||||
{Name: "Status", Fn: s.TestStatus},
|
||||
{Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders},
|
||||
{Name: "Broadcast", Fn: s.TestBroadcast},
|
||||
{Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
|
||||
}
|
||||
}
|
||||
|
||||
// TestStatus attempts to connect to the given node and exchange
|
||||
// a status message with it, and then check to make sure
|
||||
// the chain head is correct.
|
||||
func (s *Suite) TestStatus(t *utesting.T) {
|
||||
conn, err := s.dial()
|
||||
if err != nil {
|
||||
t.Fatalf("could not dial: %v", err)
|
||||
}
|
||||
// get protoHandshake
|
||||
conn.handshake(t)
|
||||
// get status
|
||||
switch msg := conn.statusExchange(t, s.chain).(type) {
|
||||
case *Status:
|
||||
t.Logf("%+v\n", msg)
|
||||
default:
|
||||
t.Fatalf("unexpected: %#v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetBlockHeaders tests whether the given node can respond to
|
||||
// a `GetBlockHeaders` request and that the response is accurate.
|
||||
func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
||||
conn, err := s.dial()
|
||||
if err != nil {
|
||||
t.Fatalf("could not dial: %v", err)
|
||||
}
|
||||
|
||||
conn.handshake(t)
|
||||
conn.statusExchange(t, s.chain)
|
||||
|
||||
// get block headers
|
||||
req := &GetBlockHeaders{
|
||||
Origin: hashOrNumber{
|
||||
Hash: s.chain.blocks[1].Hash(),
|
||||
},
|
||||
Amount: 2,
|
||||
Skip: 1,
|
||||
Reverse: false,
|
||||
}
|
||||
|
||||
if err := conn.Write(req); err != nil {
|
||||
t.Fatalf("could not write to connection: %v", err)
|
||||
}
|
||||
|
||||
switch msg := conn.ReadAndServe(s.chain).(type) {
|
||||
case *BlockHeaders:
|
||||
headers := msg
|
||||
for _, header := range *headers {
|
||||
num := header.Number.Uint64()
|
||||
assert.Equal(t, s.chain.blocks[int(num)].Header(), header)
|
||||
t.Logf("\nHEADER FOR BLOCK NUMBER %d: %+v\n", header.Number, header)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unexpected: %#v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetBlockBodies tests whether the given node can respond to
|
||||
// a `GetBlockBodies` request and that the response is accurate.
|
||||
func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
||||
conn, err := s.dial()
|
||||
if err != nil {
|
||||
t.Fatalf("could not dial: %v", err)
|
||||
}
|
||||
|
||||
conn.handshake(t)
|
||||
conn.statusExchange(t, s.chain)
|
||||
// create block bodies request
|
||||
req := &GetBlockBodies{s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash()}
|
||||
if err := conn.Write(req); err != nil {
|
||||
t.Fatalf("could not write to connection: %v", err)
|
||||
}
|
||||
|
||||
switch msg := conn.ReadAndServe(s.chain).(type) {
|
||||
case *BlockBodies:
|
||||
bodies := msg
|
||||
for _, body := range *bodies {
|
||||
t.Logf("\nBODY: %+v\n", body)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unexpected: %#v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBroadcast tests whether a block announcement is correctly
|
||||
// propagated to the given node's peer(s).
|
||||
func (s *Suite) TestBroadcast(t *utesting.T) {
|
||||
// create conn to send block announcement
|
||||
sendConn, err := s.dial()
|
||||
if err != nil {
|
||||
t.Fatalf("could not dial: %v", err)
|
||||
}
|
||||
// create conn to receive block announcement
|
||||
receiveConn, err := s.dial()
|
||||
if err != nil {
|
||||
t.Fatalf("could not dial: %v", err)
|
||||
}
|
||||
|
||||
sendConn.handshake(t)
|
||||
receiveConn.handshake(t)
|
||||
|
||||
sendConn.statusExchange(t, s.chain)
|
||||
receiveConn.statusExchange(t, s.chain)
|
||||
|
||||
// sendConn sends the block announcement
|
||||
blockAnnouncement := &NewBlock{
|
||||
Block: s.fullChain.blocks[1000],
|
||||
TD: s.fullChain.TD(1001),
|
||||
}
|
||||
if err := sendConn.Write(blockAnnouncement); err != nil {
|
||||
t.Fatalf("could not write to connection: %v", err)
|
||||
}
|
||||
|
||||
switch msg := receiveConn.ReadAndServe(s.chain).(type) {
|
||||
case *NewBlock:
|
||||
assert.Equal(t, blockAnnouncement.Block.Header(), msg.Block.Header(),
|
||||
"wrong block header in announcement")
|
||||
assert.Equal(t, blockAnnouncement.TD, msg.TD,
|
||||
"wrong TD in announcement")
|
||||
case *NewBlockHashes:
|
||||
hashes := *msg
|
||||
assert.Equal(t, blockAnnouncement.Block.Hash(), hashes[0].Hash,
|
||||
"wrong block hash in announcement")
|
||||
default:
|
||||
t.Fatalf("unexpected: %#v", msg)
|
||||
}
|
||||
// update test suite chain
|
||||
s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[1000])
|
||||
// wait for client to update its chain
|
||||
if err := receiveConn.waitForBlock(s.chain.Head()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// dial attempts to dial the given node and perform a handshake,
|
||||
// returning the created Conn if successful.
|
||||
func (s *Suite) dial() (*Conn, error) {
|
||||
var conn Conn
|
||||
|
||||
fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn.Conn = rlpx.NewConn(fd, s.Dest.Pubkey())
|
||||
|
||||
// do encHandshake
|
||||
conn.ourKey, _ = crypto.GenerateKey()
|
||||
_, err = conn.Handshake(conn.ourKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &conn, nil
|
||||
}
|
BIN
cmd/devp2p/internal/ethtest/testdata/chain.rlp.gz
vendored
Executable file
BIN
cmd/devp2p/internal/ethtest/testdata/chain.rlp.gz
vendored
Executable file
Binary file not shown.
26
cmd/devp2p/internal/ethtest/testdata/genesis.json
vendored
Normal file
26
cmd/devp2p/internal/ethtest/testdata/genesis.json
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"config": {
|
||||
"chainId": 1,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"ethash": {}
|
||||
},
|
||||
"nonce": "0xdeadbeefdeadbeef",
|
||||
"timestamp": "0x0",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"gasLimit": "0x8000000",
|
||||
"difficulty": "0x10",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
"alloc": {
|
||||
"71562b71999873db5b286df957af199ec94617f7": {
|
||||
"balance": "0xf4240"
|
||||
}
|
||||
},
|
||||
"number": "0x0",
|
||||
"gasUsed": "0x0",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
366
cmd/devp2p/internal/ethtest/types.go
Normal file
366
cmd/devp2p/internal/ethtest/types.go
Normal file
@@ -0,0 +1,366 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethtest
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
type Message interface {
|
||||
Code() int
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *Error) Unwrap() error { return e.err }
|
||||
func (e *Error) Error() string { return e.err.Error() }
|
||||
func (e *Error) Code() int { return -1 }
|
||||
func (e *Error) GoString() string { return e.Error() }
|
||||
|
||||
// Hello is the RLP structure of the protocol handshake.
|
||||
type Hello struct {
|
||||
Version uint64
|
||||
Name string
|
||||
Caps []p2p.Cap
|
||||
ListenPort uint64
|
||||
ID []byte // secp256k1 public key
|
||||
|
||||
// Ignore additional fields (for forward compatibility).
|
||||
Rest []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
|
||||
func (h Hello) Code() int { return 0x00 }
|
||||
|
||||
// Disconnect is the RLP structure for a disconnect message.
|
||||
type Disconnect struct {
|
||||
Reason p2p.DiscReason
|
||||
}
|
||||
|
||||
func (d Disconnect) Code() int { return 0x01 }
|
||||
|
||||
type Ping struct{}
|
||||
|
||||
func (p Ping) Code() int { return 0x02 }
|
||||
|
||||
type Pong struct{}
|
||||
|
||||
func (p Pong) Code() int { return 0x03 }
|
||||
|
||||
// Status is the network packet for the status message for eth/64 and later.
|
||||
type Status struct {
|
||||
ProtocolVersion uint32
|
||||
NetworkID uint64
|
||||
TD *big.Int
|
||||
Head common.Hash
|
||||
Genesis common.Hash
|
||||
ForkID forkid.ID
|
||||
}
|
||||
|
||||
func (s Status) Code() int { return 16 }
|
||||
|
||||
// NewBlockHashes is the network packet for the block announcements.
|
||||
type NewBlockHashes []struct {
|
||||
Hash common.Hash // Hash of one particular block being announced
|
||||
Number uint64 // Number of one particular block being announced
|
||||
}
|
||||
|
||||
func (nbh NewBlockHashes) Code() int { return 17 }
|
||||
|
||||
// NewBlock is the network packet for the block propagation message.
|
||||
type NewBlock struct {
|
||||
Block *types.Block
|
||||
TD *big.Int
|
||||
}
|
||||
|
||||
func (nb NewBlock) Code() int { return 23 }
|
||||
|
||||
// GetBlockHeaders represents a block header query.
|
||||
type GetBlockHeaders struct {
|
||||
Origin hashOrNumber // Block from which to retrieve headers
|
||||
Amount uint64 // Maximum number of headers to retrieve
|
||||
Skip uint64 // Blocks to skip between consecutive headers
|
||||
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
|
||||
}
|
||||
|
||||
func (g GetBlockHeaders) Code() int { return 19 }
|
||||
|
||||
type BlockHeaders []*types.Header
|
||||
|
||||
func (bh BlockHeaders) Code() int { return 20 }
|
||||
|
||||
// HashOrNumber is a combined field for specifying an origin block.
|
||||
type hashOrNumber struct {
|
||||
Hash common.Hash // Block hash from which to retrieve headers (excludes Number)
|
||||
Number uint64 // Block hash from which to retrieve headers (excludes Hash)
|
||||
}
|
||||
|
||||
// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the
|
||||
// two contained union fields.
|
||||
func (hn *hashOrNumber) EncodeRLP(w io.Writer) error {
|
||||
if hn.Hash == (common.Hash{}) {
|
||||
return rlp.Encode(w, hn.Number)
|
||||
}
|
||||
if hn.Number != 0 {
|
||||
return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number)
|
||||
}
|
||||
return rlp.Encode(w, hn.Hash)
|
||||
}
|
||||
|
||||
// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents
|
||||
// into either a block hash or a block number.
|
||||
func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error {
|
||||
_, size, _ := s.Kind()
|
||||
origin, err := s.Raw()
|
||||
if err == nil {
|
||||
switch {
|
||||
case size == 32:
|
||||
err = rlp.DecodeBytes(origin, &hn.Hash)
|
||||
case size <= 8:
|
||||
err = rlp.DecodeBytes(origin, &hn.Number)
|
||||
default:
|
||||
err = fmt.Errorf("invalid input size %d for origin", size)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetBlockBodies represents a GetBlockBodies request
|
||||
type GetBlockBodies []common.Hash
|
||||
|
||||
func (gbb GetBlockBodies) Code() int { return 21 }
|
||||
|
||||
// BlockBodies is the network packet for block content distribution.
|
||||
type BlockBodies []*types.Body
|
||||
|
||||
func (bb BlockBodies) Code() int { return 22 }
|
||||
|
||||
// Conn represents an individual connection with a peer
|
||||
type Conn struct {
|
||||
*rlpx.Conn
|
||||
ourKey *ecdsa.PrivateKey
|
||||
ethProtocolVersion uint
|
||||
}
|
||||
|
||||
func (c *Conn) Read() Message {
|
||||
code, rawData, _, err := c.Conn.Read()
|
||||
if err != nil {
|
||||
return &Error{fmt.Errorf("could not read from connection: %v", err)}
|
||||
}
|
||||
|
||||
var msg Message
|
||||
switch int(code) {
|
||||
case (Hello{}).Code():
|
||||
msg = new(Hello)
|
||||
case (Ping{}).Code():
|
||||
msg = new(Ping)
|
||||
case (Pong{}).Code():
|
||||
msg = new(Pong)
|
||||
case (Disconnect{}).Code():
|
||||
msg = new(Disconnect)
|
||||
case (Status{}).Code():
|
||||
msg = new(Status)
|
||||
case (GetBlockHeaders{}).Code():
|
||||
msg = new(GetBlockHeaders)
|
||||
case (BlockHeaders{}).Code():
|
||||
msg = new(BlockHeaders)
|
||||
case (GetBlockBodies{}).Code():
|
||||
msg = new(GetBlockBodies)
|
||||
case (BlockBodies{}).Code():
|
||||
msg = new(BlockBodies)
|
||||
case (NewBlock{}).Code():
|
||||
msg = new(NewBlock)
|
||||
case (NewBlockHashes{}).Code():
|
||||
msg = new(NewBlockHashes)
|
||||
default:
|
||||
return &Error{fmt.Errorf("invalid message code: %d", code)}
|
||||
}
|
||||
|
||||
if err := rlp.DecodeBytes(rawData, msg); err != nil {
|
||||
return &Error{fmt.Errorf("could not rlp decode message: %v", err)}
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
// ReadAndServe serves GetBlockHeaders requests while waiting
|
||||
// on another message from the node.
|
||||
func (c *Conn) ReadAndServe(chain *Chain) Message {
|
||||
for {
|
||||
switch msg := c.Read().(type) {
|
||||
case *Ping:
|
||||
c.Write(&Pong{})
|
||||
case *GetBlockHeaders:
|
||||
req := *msg
|
||||
headers, err := chain.GetHeaders(req)
|
||||
if err != nil {
|
||||
return &Error{fmt.Errorf("could not get headers for inbound header request: %v", err)}
|
||||
}
|
||||
|
||||
if err := c.Write(headers); err != nil {
|
||||
return &Error{fmt.Errorf("could not write to connection: %v", err)}
|
||||
}
|
||||
default:
|
||||
return msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) Write(msg Message) error {
|
||||
payload, err := rlp.EncodeToBytes(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.Conn.Write(uint64(msg.Code()), payload)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// handshake checks to make sure a `HELLO` is received.
|
||||
func (c *Conn) handshake(t *utesting.T) Message {
|
||||
// write protoHandshake to client
|
||||
pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:]
|
||||
ourHandshake := &Hello{
|
||||
Version: 5,
|
||||
Caps: []p2p.Cap{
|
||||
{Name: "eth", Version: 64},
|
||||
{Name: "eth", Version: 65},
|
||||
},
|
||||
ID: pub0,
|
||||
}
|
||||
if err := c.Write(ourHandshake); err != nil {
|
||||
t.Fatalf("could not write to connection: %v", err)
|
||||
}
|
||||
// read protoHandshake from client
|
||||
switch msg := c.Read().(type) {
|
||||
case *Hello:
|
||||
// set snappy if version is at least 5
|
||||
if msg.Version >= 5 {
|
||||
c.SetSnappy(true)
|
||||
}
|
||||
|
||||
c.negotiateEthProtocol(msg.Caps)
|
||||
if c.ethProtocolVersion == 0 {
|
||||
t.Fatalf("unexpected eth protocol version")
|
||||
}
|
||||
return msg
|
||||
default:
|
||||
t.Fatalf("bad handshake: %#v", msg)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// negotiateEthProtocol sets the Conn's eth protocol version
|
||||
// to highest advertised capability from peer
|
||||
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
||||
var highestEthVersion uint
|
||||
for _, capability := range caps {
|
||||
if capability.Name != "eth" {
|
||||
continue
|
||||
}
|
||||
if capability.Version > highestEthVersion && capability.Version <= 65 {
|
||||
highestEthVersion = capability.Version
|
||||
}
|
||||
}
|
||||
c.ethProtocolVersion = highestEthVersion
|
||||
}
|
||||
|
||||
// statusExchange performs a `Status` message exchange with the given
|
||||
// node.
|
||||
func (c *Conn) statusExchange(t *utesting.T, chain *Chain) Message {
|
||||
// read status message from client
|
||||
var message Message
|
||||
|
||||
loop:
|
||||
for {
|
||||
switch msg := c.Read().(type) {
|
||||
case *Status:
|
||||
if msg.Head != chain.blocks[chain.Len()-1].Hash() {
|
||||
t.Fatalf("wrong head in status: %v", msg.Head)
|
||||
}
|
||||
if msg.TD.Cmp(chain.TD(chain.Len())) != 0 {
|
||||
t.Fatalf("wrong TD in status: %v", msg.TD)
|
||||
}
|
||||
if !reflect.DeepEqual(msg.ForkID, chain.ForkID()) {
|
||||
t.Fatalf("wrong fork ID in status: %v", msg.ForkID)
|
||||
}
|
||||
message = msg
|
||||
break loop
|
||||
case *Disconnect:
|
||||
t.Fatalf("disconnect received: %v", msg.Reason)
|
||||
case *Ping:
|
||||
c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error
|
||||
// (PINGs should not be a response upon fresh connection)
|
||||
default:
|
||||
t.Fatalf("bad status message: %#v", msg)
|
||||
}
|
||||
}
|
||||
// make sure eth protocol version is set for negotiation
|
||||
if c.ethProtocolVersion == 0 {
|
||||
t.Fatalf("eth protocol version must be set in Conn")
|
||||
}
|
||||
// write status message to client
|
||||
status := Status{
|
||||
ProtocolVersion: uint32(c.ethProtocolVersion),
|
||||
NetworkID: 1,
|
||||
TD: chain.TD(chain.Len()),
|
||||
Head: chain.blocks[chain.Len()-1].Hash(),
|
||||
Genesis: chain.blocks[0].Hash(),
|
||||
ForkID: chain.ForkID(),
|
||||
}
|
||||
if err := c.Write(status); err != nil {
|
||||
t.Fatalf("could not write to connection: %v", err)
|
||||
}
|
||||
|
||||
return message
|
||||
}
|
||||
|
||||
// waitForBlock waits for confirmation from the client that it has
|
||||
// imported the given block.
|
||||
func (c *Conn) waitForBlock(block *types.Block) error {
|
||||
for {
|
||||
req := &GetBlockHeaders{Origin: hashOrNumber{Hash: block.Hash()}, Amount: 1}
|
||||
if err := c.Write(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch msg := c.Read().(type) {
|
||||
case *BlockHeaders:
|
||||
if len(*msg) > 0 {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
default:
|
||||
return fmt.Errorf("invalid message: %v", msg)
|
||||
}
|
||||
}
|
||||
}
|
377
cmd/devp2p/internal/v5test/discv5tests.go
Normal file
377
cmd/devp2p/internal/v5test/discv5tests.go
Normal file
@@ -0,0 +1,377 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package v5test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover/v5wire"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
)
|
||||
|
||||
// Suite is the discv5 test suite.
|
||||
type Suite struct {
|
||||
Dest *enode.Node
|
||||
Listen1, Listen2 string // listening addresses
|
||||
}
|
||||
|
||||
func (s *Suite) listen1(log logger) (*conn, net.PacketConn) {
|
||||
c := newConn(s.Dest, log)
|
||||
l := c.listen(s.Listen1)
|
||||
return c, l
|
||||
}
|
||||
|
||||
func (s *Suite) listen2(log logger) (*conn, net.PacketConn, net.PacketConn) {
|
||||
c := newConn(s.Dest, log)
|
||||
l1, l2 := c.listen(s.Listen1), c.listen(s.Listen2)
|
||||
return c, l1, l2
|
||||
}
|
||||
|
||||
func (s *Suite) AllTests() []utesting.Test {
|
||||
return []utesting.Test{
|
||||
{Name: "Ping", Fn: s.TestPing},
|
||||
{Name: "PingLargeRequestID", Fn: s.TestPingLargeRequestID},
|
||||
{Name: "PingMultiIP", Fn: s.TestPingMultiIP},
|
||||
{Name: "PingHandshakeInterrupted", Fn: s.TestPingHandshakeInterrupted},
|
||||
{Name: "TalkRequest", Fn: s.TestTalkRequest},
|
||||
{Name: "FindnodeZeroDistance", Fn: s.TestFindnodeZeroDistance},
|
||||
{Name: "FindnodeResults", Fn: s.TestFindnodeResults},
|
||||
}
|
||||
}
|
||||
|
||||
// This test sends PING and expects a PONG response.
|
||||
func (s *Suite) TestPing(t *utesting.T) {
|
||||
conn, l1 := s.listen1(t)
|
||||
defer conn.close()
|
||||
|
||||
ping := &v5wire.Ping{ReqID: conn.nextReqID()}
|
||||
switch resp := conn.reqresp(l1, ping).(type) {
|
||||
case *v5wire.Pong:
|
||||
checkPong(t, resp, ping, l1)
|
||||
default:
|
||||
t.Fatal("expected PONG, got", resp.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func checkPong(t *utesting.T, pong *v5wire.Pong, ping *v5wire.Ping, c net.PacketConn) {
|
||||
if !bytes.Equal(pong.ReqID, ping.ReqID) {
|
||||
t.Fatalf("wrong request ID %x in PONG, want %x", pong.ReqID, ping.ReqID)
|
||||
}
|
||||
if !pong.ToIP.Equal(laddr(c).IP) {
|
||||
t.Fatalf("wrong destination IP %v in PONG, want %v", pong.ToIP, laddr(c).IP)
|
||||
}
|
||||
if int(pong.ToPort) != laddr(c).Port {
|
||||
t.Fatalf("wrong destination port %v in PONG, want %v", pong.ToPort, laddr(c).Port)
|
||||
}
|
||||
}
|
||||
|
||||
// This test sends PING with a 9-byte request ID, which isn't allowed by the spec.
|
||||
// The remote node should not respond.
|
||||
func (s *Suite) TestPingLargeRequestID(t *utesting.T) {
|
||||
conn, l1 := s.listen1(t)
|
||||
defer conn.close()
|
||||
|
||||
ping := &v5wire.Ping{ReqID: make([]byte, 9)}
|
||||
switch resp := conn.reqresp(l1, ping).(type) {
|
||||
case *v5wire.Pong:
|
||||
t.Errorf("PONG response with unknown request ID %x", resp.ReqID)
|
||||
case *readError:
|
||||
if resp.err == v5wire.ErrInvalidReqID {
|
||||
t.Error("response with oversized request ID")
|
||||
} else if !netutil.IsTimeout(resp.err) {
|
||||
t.Error(resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In this test, a session is established from one IP as usual. The session is then reused
|
||||
// on another IP, which shouldn't work. The remote node should respond with WHOAREYOU for
|
||||
// the attempt from a different IP.
|
||||
func (s *Suite) TestPingMultiIP(t *utesting.T) {
|
||||
conn, l1, l2 := s.listen2(t)
|
||||
defer conn.close()
|
||||
|
||||
// Create the session on l1.
|
||||
ping := &v5wire.Ping{ReqID: conn.nextReqID()}
|
||||
resp := conn.reqresp(l1, ping)
|
||||
if resp.Kind() != v5wire.PongMsg {
|
||||
t.Fatal("expected PONG, got", resp)
|
||||
}
|
||||
checkPong(t, resp.(*v5wire.Pong), ping, l1)
|
||||
|
||||
// Send on l2. This reuses the session because there is only one codec.
|
||||
ping2 := &v5wire.Ping{ReqID: conn.nextReqID()}
|
||||
conn.write(l2, ping2, nil)
|
||||
switch resp := conn.read(l2).(type) {
|
||||
case *v5wire.Pong:
|
||||
t.Fatalf("remote responded to PING from %v for session on IP %v", laddr(l2).IP, laddr(l1).IP)
|
||||
case *v5wire.Whoareyou:
|
||||
t.Logf("got WHOAREYOU for new session as expected")
|
||||
resp.Node = s.Dest
|
||||
conn.write(l2, ping2, resp)
|
||||
default:
|
||||
t.Fatal("expected WHOAREYOU, got", resp)
|
||||
}
|
||||
|
||||
// Catch the PONG on l2.
|
||||
switch resp := conn.read(l2).(type) {
|
||||
case *v5wire.Pong:
|
||||
checkPong(t, resp, ping2, l2)
|
||||
default:
|
||||
t.Fatal("expected PONG, got", resp)
|
||||
}
|
||||
|
||||
// Try on l1 again.
|
||||
ping3 := &v5wire.Ping{ReqID: conn.nextReqID()}
|
||||
conn.write(l1, ping3, nil)
|
||||
switch resp := conn.read(l1).(type) {
|
||||
case *v5wire.Pong:
|
||||
t.Fatalf("remote responded to PING from %v for session on IP %v", laddr(l1).IP, laddr(l2).IP)
|
||||
case *v5wire.Whoareyou:
|
||||
t.Logf("got WHOAREYOU for new session as expected")
|
||||
default:
|
||||
t.Fatal("expected WHOAREYOU, got", resp)
|
||||
}
|
||||
}
|
||||
|
||||
// This test starts a handshake, but doesn't finish it and sends a second ordinary message
|
||||
// packet instead of a handshake message packet. The remote node should respond with
|
||||
// another WHOAREYOU challenge for the second packet.
|
||||
func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) {
|
||||
conn, l1 := s.listen1(t)
|
||||
defer conn.close()
|
||||
|
||||
// First PING triggers challenge.
|
||||
ping := &v5wire.Ping{ReqID: conn.nextReqID()}
|
||||
conn.write(l1, ping, nil)
|
||||
switch resp := conn.read(l1).(type) {
|
||||
case *v5wire.Whoareyou:
|
||||
t.Logf("got WHOAREYOU for PING")
|
||||
default:
|
||||
t.Fatal("expected WHOAREYOU, got", resp)
|
||||
}
|
||||
|
||||
// Send second PING.
|
||||
ping2 := &v5wire.Ping{ReqID: conn.nextReqID()}
|
||||
switch resp := conn.reqresp(l1, ping2).(type) {
|
||||
case *v5wire.Pong:
|
||||
checkPong(t, resp, ping2, l1)
|
||||
default:
|
||||
t.Fatal("expected WHOAREYOU, got", resp)
|
||||
}
|
||||
}
|
||||
|
||||
// This test sends TALKREQ and expects an empty TALKRESP response.
|
||||
func (s *Suite) TestTalkRequest(t *utesting.T) {
|
||||
conn, l1 := s.listen1(t)
|
||||
defer conn.close()
|
||||
|
||||
// Non-empty request ID.
|
||||
id := conn.nextReqID()
|
||||
resp := conn.reqresp(l1, &v5wire.TalkRequest{ReqID: id, Protocol: "test-protocol"})
|
||||
switch resp := resp.(type) {
|
||||
case *v5wire.TalkResponse:
|
||||
if !bytes.Equal(resp.ReqID, id) {
|
||||
t.Fatalf("wrong request ID %x in TALKRESP, want %x", resp.ReqID, id)
|
||||
}
|
||||
if len(resp.Message) > 0 {
|
||||
t.Fatalf("non-empty message %x in TALKRESP", resp.Message)
|
||||
}
|
||||
default:
|
||||
t.Fatal("expected TALKRESP, got", resp.Name())
|
||||
}
|
||||
|
||||
// Empty request ID.
|
||||
resp = conn.reqresp(l1, &v5wire.TalkRequest{Protocol: "test-protocol"})
|
||||
switch resp := resp.(type) {
|
||||
case *v5wire.TalkResponse:
|
||||
if len(resp.ReqID) > 0 {
|
||||
t.Fatalf("wrong request ID %x in TALKRESP, want empty byte array", resp.ReqID)
|
||||
}
|
||||
if len(resp.Message) > 0 {
|
||||
t.Fatalf("non-empty message %x in TALKRESP", resp.Message)
|
||||
}
|
||||
default:
|
||||
t.Fatal("expected TALKRESP, got", resp.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// This test checks that the remote node returns itself for FINDNODE with distance zero.
|
||||
func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) {
|
||||
conn, l1 := s.listen1(t)
|
||||
defer conn.close()
|
||||
|
||||
nodes, err := conn.findnode(l1, []uint{0})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(nodes) != 1 {
|
||||
t.Fatalf("remote returned more than one node for FINDNODE [0]")
|
||||
}
|
||||
if nodes[0].ID() != conn.remote.ID() {
|
||||
t.Errorf("ID of response node is %v, want %v", nodes[0].ID(), conn.remote.ID())
|
||||
}
|
||||
}
|
||||
|
||||
// In this test, multiple nodes ping the node under test. After waiting for them to be
|
||||
// accepted into the remote table, the test checks that they are returned by FINDNODE.
|
||||
func (s *Suite) TestFindnodeResults(t *utesting.T) {
|
||||
// Create bystanders.
|
||||
nodes := make([]*bystander, 5)
|
||||
added := make(chan enode.ID, len(nodes))
|
||||
for i := range nodes {
|
||||
nodes[i] = newBystander(t, s, added)
|
||||
defer nodes[i].close()
|
||||
}
|
||||
|
||||
// Get them added to the remote table.
|
||||
timeout := 60 * time.Second
|
||||
timeoutCh := time.After(timeout)
|
||||
for count := 0; count < len(nodes); {
|
||||
select {
|
||||
case id := <-added:
|
||||
t.Logf("bystander node %v added to remote table", id)
|
||||
count++
|
||||
case <-timeoutCh:
|
||||
t.Errorf("remote added %d bystander nodes in %v, need %d to continue", count, timeout, len(nodes))
|
||||
t.Logf("this can happen if the node has a non-empty table from previous runs")
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Logf("all %d bystander nodes were added", len(nodes))
|
||||
|
||||
// Collect our nodes by distance.
|
||||
var dists []uint
|
||||
expect := make(map[enode.ID]*enode.Node)
|
||||
for _, bn := range nodes {
|
||||
n := bn.conn.localNode.Node()
|
||||
expect[n.ID()] = n
|
||||
d := uint(enode.LogDist(n.ID(), s.Dest.ID()))
|
||||
if !containsUint(dists, d) {
|
||||
dists = append(dists, d)
|
||||
}
|
||||
}
|
||||
|
||||
// Send FINDNODE for all distances.
|
||||
conn, l1 := s.listen1(t)
|
||||
defer conn.close()
|
||||
foundNodes, err := conn.findnode(l1, dists)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("remote returned %d nodes for distance list %v", len(foundNodes), dists)
|
||||
for _, n := range foundNodes {
|
||||
delete(expect, n.ID())
|
||||
}
|
||||
if len(expect) > 0 {
|
||||
t.Errorf("missing %d nodes in FINDNODE result", len(expect))
|
||||
t.Logf("this can happen if the test is run multiple times in quick succession")
|
||||
t.Logf("and the remote node hasn't removed dead nodes from previous runs yet")
|
||||
} else {
|
||||
t.Logf("all %d expected nodes were returned", len(nodes))
|
||||
}
|
||||
}
|
||||
|
||||
// A bystander is a node whose only purpose is filling a spot in the remote table.
|
||||
type bystander struct {
|
||||
dest *enode.Node
|
||||
conn *conn
|
||||
l net.PacketConn
|
||||
|
||||
addedCh chan enode.ID
|
||||
done sync.WaitGroup
|
||||
}
|
||||
|
||||
func newBystander(t *utesting.T, s *Suite, added chan enode.ID) *bystander {
|
||||
conn, l := s.listen1(t)
|
||||
conn.setEndpoint(l) // bystander nodes need IP/port to get pinged
|
||||
bn := &bystander{
|
||||
conn: conn,
|
||||
l: l,
|
||||
dest: s.Dest,
|
||||
addedCh: added,
|
||||
}
|
||||
bn.done.Add(1)
|
||||
go bn.loop()
|
||||
return bn
|
||||
}
|
||||
|
||||
// id returns the node ID of the bystander.
|
||||
func (bn *bystander) id() enode.ID {
|
||||
return bn.conn.localNode.ID()
|
||||
}
|
||||
|
||||
// close shuts down loop.
|
||||
func (bn *bystander) close() {
|
||||
bn.conn.close()
|
||||
bn.done.Wait()
|
||||
}
|
||||
|
||||
// loop answers packets from the remote node until quit.
|
||||
func (bn *bystander) loop() {
|
||||
defer bn.done.Done()
|
||||
|
||||
var (
|
||||
lastPing time.Time
|
||||
wasAdded bool
|
||||
)
|
||||
for {
|
||||
// Ping the remote node.
|
||||
if !wasAdded && time.Since(lastPing) > 10*time.Second {
|
||||
bn.conn.reqresp(bn.l, &v5wire.Ping{
|
||||
ReqID: bn.conn.nextReqID(),
|
||||
ENRSeq: bn.dest.Seq(),
|
||||
})
|
||||
lastPing = time.Now()
|
||||
}
|
||||
// Answer packets.
|
||||
switch p := bn.conn.read(bn.l).(type) {
|
||||
case *v5wire.Ping:
|
||||
bn.conn.write(bn.l, &v5wire.Pong{
|
||||
ReqID: p.ReqID,
|
||||
ENRSeq: bn.conn.localNode.Seq(),
|
||||
ToIP: bn.dest.IP(),
|
||||
ToPort: uint16(bn.dest.UDP()),
|
||||
}, nil)
|
||||
wasAdded = true
|
||||
bn.notifyAdded()
|
||||
case *v5wire.Findnode:
|
||||
bn.conn.write(bn.l, &v5wire.Nodes{ReqID: p.ReqID, Total: 1}, nil)
|
||||
wasAdded = true
|
||||
bn.notifyAdded()
|
||||
case *v5wire.TalkRequest:
|
||||
bn.conn.write(bn.l, &v5wire.TalkResponse{ReqID: p.ReqID}, nil)
|
||||
case *readError:
|
||||
if !netutil.IsTemporaryError(p.err) {
|
||||
bn.conn.logf("shutting down: %v", p.err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bn *bystander) notifyAdded() {
|
||||
if bn.addedCh != nil {
|
||||
bn.addedCh <- bn.id()
|
||||
bn.addedCh = nil
|
||||
}
|
||||
}
|
263
cmd/devp2p/internal/v5test/framework.go
Normal file
263
cmd/devp2p/internal/v5test/framework.go
Normal file
@@ -0,0 +1,263 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package v5test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover/v5wire"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
)
|
||||
|
||||
// readError represents an error during packet reading.
|
||||
// This exists to facilitate type-switching on the result of conn.read.
|
||||
type readError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *readError) Kind() byte { return 99 }
|
||||
func (p *readError) Name() string { return fmt.Sprintf("error: %v", p.err) }
|
||||
func (p *readError) Error() string { return p.err.Error() }
|
||||
func (p *readError) Unwrap() error { return p.err }
|
||||
func (p *readError) RequestID() []byte { return nil }
|
||||
func (p *readError) SetRequestID([]byte) {}
|
||||
|
||||
// readErrorf creates a readError with the given text.
|
||||
func readErrorf(format string, args ...interface{}) *readError {
|
||||
return &readError{fmt.Errorf(format, args...)}
|
||||
}
|
||||
|
||||
// This is the response timeout used in tests.
|
||||
const waitTime = 300 * time.Millisecond
|
||||
|
||||
// conn is a connection to the node under test.
|
||||
type conn struct {
|
||||
localNode *enode.LocalNode
|
||||
localKey *ecdsa.PrivateKey
|
||||
remote *enode.Node
|
||||
remoteAddr *net.UDPAddr
|
||||
listeners []net.PacketConn
|
||||
|
||||
log logger
|
||||
codec *v5wire.Codec
|
||||
lastRequest v5wire.Packet
|
||||
lastChallenge *v5wire.Whoareyou
|
||||
idCounter uint32
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Logf(string, ...interface{})
|
||||
}
|
||||
|
||||
// newConn sets up a connection to the given node.
|
||||
func newConn(dest *enode.Node, log logger) *conn {
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
db, err := enode.OpenDB("")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ln := enode.NewLocalNode(db, key)
|
||||
|
||||
return &conn{
|
||||
localKey: key,
|
||||
localNode: ln,
|
||||
remote: dest,
|
||||
remoteAddr: &net.UDPAddr{IP: dest.IP(), Port: dest.UDP()},
|
||||
codec: v5wire.NewCodec(ln, key, mclock.System{}),
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *conn) setEndpoint(c net.PacketConn) {
|
||||
tc.localNode.SetStaticIP(laddr(c).IP)
|
||||
tc.localNode.SetFallbackUDP(laddr(c).Port)
|
||||
}
|
||||
|
||||
func (tc *conn) listen(ip string) net.PacketConn {
|
||||
l, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", ip))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tc.listeners = append(tc.listeners, l)
|
||||
return l
|
||||
}
|
||||
|
||||
// close shuts down all listeners and the local node.
|
||||
func (tc *conn) close() {
|
||||
for _, l := range tc.listeners {
|
||||
l.Close()
|
||||
}
|
||||
tc.localNode.Database().Close()
|
||||
}
|
||||
|
||||
// nextReqID creates a request id.
|
||||
func (tc *conn) nextReqID() []byte {
|
||||
id := make([]byte, 4)
|
||||
tc.idCounter++
|
||||
binary.BigEndian.PutUint32(id, tc.idCounter)
|
||||
return id
|
||||
}
|
||||
|
||||
// reqresp performs a request/response interaction on the given connection.
|
||||
// The request is retried if a handshake is requested.
|
||||
func (tc *conn) reqresp(c net.PacketConn, req v5wire.Packet) v5wire.Packet {
|
||||
reqnonce := tc.write(c, req, nil)
|
||||
switch resp := tc.read(c).(type) {
|
||||
case *v5wire.Whoareyou:
|
||||
if resp.Nonce != reqnonce {
|
||||
return readErrorf("wrong nonce %x in WHOAREYOU (want %x)", resp.Nonce[:], reqnonce[:])
|
||||
}
|
||||
resp.Node = tc.remote
|
||||
tc.write(c, req, resp)
|
||||
return tc.read(c)
|
||||
default:
|
||||
return resp
|
||||
}
|
||||
}
|
||||
|
||||
// findnode sends a FINDNODE request and waits for its responses.
|
||||
func (tc *conn) findnode(c net.PacketConn, dists []uint) ([]*enode.Node, error) {
|
||||
var (
|
||||
findnode = &v5wire.Findnode{ReqID: tc.nextReqID(), Distances: dists}
|
||||
reqnonce = tc.write(c, findnode, nil)
|
||||
first = true
|
||||
total uint8
|
||||
results []*enode.Node
|
||||
)
|
||||
for n := 1; n > 0; {
|
||||
switch resp := tc.read(c).(type) {
|
||||
case *v5wire.Whoareyou:
|
||||
// Handle handshake.
|
||||
if resp.Nonce == reqnonce {
|
||||
resp.Node = tc.remote
|
||||
tc.write(c, findnode, resp)
|
||||
} else {
|
||||
return nil, fmt.Errorf("unexpected WHOAREYOU (nonce %x), waiting for NODES", resp.Nonce[:])
|
||||
}
|
||||
case *v5wire.Ping:
|
||||
// Handle ping from remote.
|
||||
tc.write(c, &v5wire.Pong{
|
||||
ReqID: resp.ReqID,
|
||||
ENRSeq: tc.localNode.Seq(),
|
||||
}, nil)
|
||||
case *v5wire.Nodes:
|
||||
// Got NODES! Check request ID.
|
||||
if !bytes.Equal(resp.ReqID, findnode.ReqID) {
|
||||
return nil, fmt.Errorf("NODES response has wrong request id %x", resp.ReqID)
|
||||
}
|
||||
// Check total count. It should be greater than one
|
||||
// and needs to be the same across all responses.
|
||||
if first {
|
||||
if resp.Total == 0 || resp.Total > 6 {
|
||||
return nil, fmt.Errorf("invalid NODES response 'total' %d (not in (0,7))", resp.Total)
|
||||
}
|
||||
total = resp.Total
|
||||
n = int(total) - 1
|
||||
first = false
|
||||
} else {
|
||||
n--
|
||||
if resp.Total != total {
|
||||
return nil, fmt.Errorf("invalid NODES response 'total' %d (!= %d)", resp.Total, total)
|
||||
}
|
||||
}
|
||||
// Check nodes.
|
||||
nodes, err := checkRecords(resp.Nodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid node in NODES response: %v", err)
|
||||
}
|
||||
results = append(results, nodes...)
|
||||
default:
|
||||
return nil, fmt.Errorf("expected NODES, got %v", resp)
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// write sends a packet on the given connection.
|
||||
func (tc *conn) write(c net.PacketConn, p v5wire.Packet, challenge *v5wire.Whoareyou) v5wire.Nonce {
|
||||
packet, nonce, err := tc.codec.Encode(tc.remote.ID(), tc.remoteAddr.String(), p, challenge)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't encode %v packet: %v", p.Name(), err))
|
||||
}
|
||||
if _, err := c.WriteTo(packet, tc.remoteAddr); err != nil {
|
||||
tc.logf("Can't send %s: %v", p.Name(), err)
|
||||
} else {
|
||||
tc.logf(">> %s", p.Name())
|
||||
}
|
||||
return nonce
|
||||
}
|
||||
|
||||
// read waits for an incoming packet on the given connection.
|
||||
func (tc *conn) read(c net.PacketConn) v5wire.Packet {
|
||||
buf := make([]byte, 1280)
|
||||
if err := c.SetReadDeadline(time.Now().Add(waitTime)); err != nil {
|
||||
return &readError{err}
|
||||
}
|
||||
n, fromAddr, err := c.ReadFrom(buf)
|
||||
if err != nil {
|
||||
return &readError{err}
|
||||
}
|
||||
_, _, p, err := tc.codec.Decode(buf[:n], fromAddr.String())
|
||||
if err != nil {
|
||||
return &readError{err}
|
||||
}
|
||||
tc.logf("<< %s", p.Name())
|
||||
return p
|
||||
}
|
||||
|
||||
// logf prints to the test log.
|
||||
func (tc *conn) logf(format string, args ...interface{}) {
|
||||
if tc.log != nil {
|
||||
tc.log.Logf("(%s) %s", tc.localNode.ID().TerminalString(), fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func laddr(c net.PacketConn) *net.UDPAddr {
|
||||
return c.LocalAddr().(*net.UDPAddr)
|
||||
}
|
||||
|
||||
func checkRecords(records []*enr.Record) ([]*enode.Node, error) {
|
||||
nodes := make([]*enode.Node, len(records))
|
||||
for i := range records {
|
||||
n, err := enode.New(enode.ValidSchemes, records[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes[i] = n
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func containsUint(ints []uint, x uint) bool {
|
||||
for i := range ints {
|
||||
if ints[i] == x {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@@ -63,6 +63,7 @@ func init() {
|
||||
discv5Command,
|
||||
dnsCommand,
|
||||
nodesetCommand,
|
||||
rlpxCommand,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,7 +81,7 @@ func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool {
|
||||
|
||||
// getNodeArg handles the common case of a single node descriptor argument.
|
||||
func getNodeArg(ctx *cli.Context) *enode.Node {
|
||||
if ctx.NArg() != 1 {
|
||||
if ctx.NArg() < 1 {
|
||||
exit("missing node as command-line argument")
|
||||
}
|
||||
n, err := parseNode(ctx.Args()[0])
|
||||
|
109
cmd/devp2p/rlpxcmd.go
Normal file
109
cmd/devp2p/rlpxcmd.go
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
rlpxCommand = cli.Command{
|
||||
Name: "rlpx",
|
||||
Usage: "RLPx Commands",
|
||||
Subcommands: []cli.Command{
|
||||
rlpxPingCommand,
|
||||
rlpxEthTestCommand,
|
||||
},
|
||||
}
|
||||
rlpxPingCommand = cli.Command{
|
||||
Name: "ping",
|
||||
Usage: "ping <node>",
|
||||
Action: rlpxPing,
|
||||
}
|
||||
rlpxEthTestCommand = cli.Command{
|
||||
Name: "eth-test",
|
||||
Usage: "Runs tests against a node",
|
||||
ArgsUsage: "<node> <path_to_chain.rlp_file>",
|
||||
Action: rlpxEthTest,
|
||||
Flags: []cli.Flag{testPatternFlag},
|
||||
}
|
||||
)
|
||||
|
||||
func rlpxPing(ctx *cli.Context) error {
|
||||
n := getNodeArg(ctx)
|
||||
fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", n.IP(), n.TCP()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn := rlpx.NewConn(fd, n.Pubkey())
|
||||
ourKey, _ := crypto.GenerateKey()
|
||||
_, err = conn.Handshake(ourKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
code, data, _, err := conn.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch code {
|
||||
case 0:
|
||||
var h ethtest.Hello
|
||||
if err := rlp.DecodeBytes(data, &h); err != nil {
|
||||
return fmt.Errorf("invalid handshake: %v", err)
|
||||
}
|
||||
fmt.Printf("%+v\n", h)
|
||||
case 1:
|
||||
var msg []p2p.DiscReason
|
||||
if rlp.DecodeBytes(data, &msg); len(msg) == 0 {
|
||||
return fmt.Errorf("invalid disconnect message")
|
||||
}
|
||||
return fmt.Errorf("received disconnect message: %v", msg[0])
|
||||
default:
|
||||
return fmt.Errorf("invalid message code %d, expected handshake (code zero)", code)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func rlpxEthTest(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 3 {
|
||||
exit("missing path to chain.rlp as command-line argument")
|
||||
}
|
||||
|
||||
suite := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2])
|
||||
|
||||
// Filter and run test cases.
|
||||
tests := suite.AllTests()
|
||||
if ctx.IsSet(testPatternFlag.Name) {
|
||||
tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name))
|
||||
}
|
||||
results := utesting.RunTests(tests, os.Stdout)
|
||||
if fails := utesting.CountFailures(results); fails > 0 {
|
||||
return fmt.Errorf("%v of %v tests passed.", len(tests)-fails, len(tests))
|
||||
}
|
||||
fmt.Printf("all tests passed\n")
|
||||
return nil
|
||||
}
|
@@ -29,6 +29,8 @@ Command line params that has to be supported are
|
||||
--trace Output full trace logs to files <txhash>.jsonl
|
||||
--trace.nomemory Disable full memory dump in traces
|
||||
--trace.nostack Disable stack output in traces
|
||||
--trace.noreturndata Disable return data output in traces
|
||||
--output.basedir value Specifies where output files are placed. Will be created if it does not exist. (default: ".")
|
||||
--output.alloc alloc Determines where to put the alloc of the post-state.
|
||||
`stdout` - into the stdout output
|
||||
`stderr` - into the stderr output
|
||||
@@ -232,13 +234,13 @@ Example where blockhashes are provided:
|
||||
./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace
|
||||
```
|
||||
```
|
||||
cat trace-0.jsonl | grep BLOCKHASH -C2
|
||||
cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2
|
||||
```
|
||||
```
|
||||
{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"depth":1,"refund":0,"opName":"PUSH1","error":""}
|
||||
{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"depth":1,"refund":0,"opName":"BLOCKHASH","error":""}
|
||||
{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"depth":1,"refund":0,"opName":"STOP","error":""}
|
||||
{"output":"","gasUsed":"0x17","time":155861}
|
||||
{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"PUSH1","error":""}
|
||||
{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"BLOCKHASH","error":""}
|
||||
{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"STOP","error":""}
|
||||
{"output":"","gasUsed":"0x17","time":112885}
|
||||
```
|
||||
|
||||
In this example, the caller has not provided the required blockhash:
|
||||
@@ -254,9 +256,9 @@ Error code: 4
|
||||
Another thing that can be done, is to chain invocations:
|
||||
```
|
||||
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json
|
||||
INFO [06-29|11:52:04.934] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||
INFO [06-29|11:52:04.936] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||
INFO [06-29|11:52:04.936] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||
INFO [08-03|15:25:15.168] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||
INFO [08-03|15:25:15.169] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||
INFO [08-03|15:25:15.169] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||
|
||||
```
|
||||
What happened here, is that we first applied two identical transactions, so the second one was rejected.
|
||||
|
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
@@ -81,7 +82,7 @@ type stEnvMarshaling struct {
|
||||
// Apply applies a set of transactions to a pre-state
|
||||
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
txs types.Transactions, miningReward int64,
|
||||
getTracerFn func(txIndex int) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) {
|
||||
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) {
|
||||
|
||||
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
||||
// required blockhashes
|
||||
@@ -135,7 +136,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
rejectedTxs = append(rejectedTxs, i)
|
||||
continue
|
||||
}
|
||||
tracer, err := getTracerFn(txIndex)
|
||||
tracer, err := getTracerFn(txIndex, tx.Hash())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -220,8 +221,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
}
|
||||
execRs := &ExecutionResult{
|
||||
StateRoot: root,
|
||||
TxRoot: types.DeriveSha(includedTxs),
|
||||
ReceiptRoot: types.DeriveSha(receipts),
|
||||
TxRoot: types.DeriveSha(includedTxs, new(trie.Trie)),
|
||||
ReceiptRoot: types.DeriveSha(receipts, new(trie.Trie)),
|
||||
Bloom: types.CreateBloom(receipts),
|
||||
LogsHash: rlpHash(statedb.Logs()),
|
||||
Receipts: receipts,
|
||||
|
@@ -42,6 +42,11 @@ var (
|
||||
Name: "trace.noreturndata",
|
||||
Usage: "Disable return data output in traces",
|
||||
}
|
||||
OutputBasedir = cli.StringFlag{
|
||||
Name: "output.basedir",
|
||||
Usage: "Specifies where output files are placed. Will be created if it does not exist.",
|
||||
Value: "",
|
||||
}
|
||||
OutputAllocFlag = cli.StringFlag{
|
||||
Name: "output.alloc",
|
||||
Usage: "Determines where to put the `alloc` of the post-state.\n" +
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@@ -75,11 +76,22 @@ func Main(ctx *cli.Context) error {
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var (
|
||||
err error
|
||||
tracer vm.Tracer
|
||||
err error
|
||||
tracer vm.Tracer
|
||||
baseDir = ""
|
||||
)
|
||||
var getTracer func(txIndex int) (vm.Tracer, error)
|
||||
var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error)
|
||||
|
||||
// If user specified a basedir, make sure it exists
|
||||
if ctx.IsSet(OutputBasedir.Name) {
|
||||
if base := ctx.String(OutputBasedir.Name); len(base) > 0 {
|
||||
err := os.MkdirAll(base, 0755) // //rw-r--r--
|
||||
if err != nil {
|
||||
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
||||
}
|
||||
baseDir = base
|
||||
}
|
||||
}
|
||||
if ctx.Bool(TraceFlag.Name) {
|
||||
// Configure the EVM logger
|
||||
logConfig := &vm.LogConfig{
|
||||
@@ -95,11 +107,11 @@ func Main(ctx *cli.Context) error {
|
||||
prevFile.Close()
|
||||
}
|
||||
}()
|
||||
getTracer = func(txIndex int) (vm.Tracer, error) {
|
||||
getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) {
|
||||
if prevFile != nil {
|
||||
prevFile.Close()
|
||||
}
|
||||
traceFile, err := os.Create(fmt.Sprintf("trace-%d.jsonl", txIndex))
|
||||
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||
}
|
||||
@@ -107,7 +119,7 @@ func Main(ctx *cli.Context) error {
|
||||
return vm.NewJSONLogger(logConfig, traceFile), nil
|
||||
}
|
||||
} else {
|
||||
getTracer = func(txIndex int) (tracer vm.Tracer, err error) {
|
||||
getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
@@ -197,7 +209,7 @@ func Main(ctx *cli.Context) error {
|
||||
//postAlloc := state.DumpGenesisFormat(false, false, false)
|
||||
collector := make(Alloc)
|
||||
state.DumpToCollector(collector, false, false, false, nil, -1)
|
||||
return dispatchOutput(ctx, result, collector)
|
||||
return dispatchOutput(ctx, baseDir, result, collector)
|
||||
|
||||
}
|
||||
|
||||
@@ -224,12 +236,12 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) {
|
||||
}
|
||||
|
||||
// saveFile marshalls the object to the given file
|
||||
func saveFile(filename string, data interface{}) error {
|
||||
func saveFile(baseDir, filename string, data interface{}) error {
|
||||
b, err := json.MarshalIndent(data, "", " ")
|
||||
if err != nil {
|
||||
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||
}
|
||||
if err = ioutil.WriteFile(filename, b, 0644); err != nil {
|
||||
if err = ioutil.WriteFile(path.Join(baseDir, filename), b, 0644); err != nil {
|
||||
return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err))
|
||||
}
|
||||
return nil
|
||||
@@ -237,26 +249,26 @@ func saveFile(filename string, data interface{}) error {
|
||||
|
||||
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
|
||||
// files
|
||||
func dispatchOutput(ctx *cli.Context, result *ExecutionResult, alloc Alloc) error {
|
||||
func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc) error {
|
||||
stdOutObject := make(map[string]interface{})
|
||||
stdErrObject := make(map[string]interface{})
|
||||
dispatch := func(fName, name string, obj interface{}) error {
|
||||
dispatch := func(baseDir, fName, name string, obj interface{}) error {
|
||||
switch fName {
|
||||
case "stdout":
|
||||
stdOutObject[name] = obj
|
||||
case "stderr":
|
||||
stdErrObject[name] = obj
|
||||
default: // save to file
|
||||
if err := saveFile(fName, obj); err != nil {
|
||||
if err := saveFile(baseDir, fName, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := dispatch(ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil {
|
||||
if err := dispatch(baseDir, ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dispatch(ctx.String(OutputResultFlag.Name), "result", result); err != nil {
|
||||
if err := dispatch(baseDir, ctx.String(OutputResultFlag.Name), "result", result); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(stdOutObject) > 0 {
|
||||
|
@@ -146,6 +146,7 @@ var stateTransitionCommand = cli.Command{
|
||||
t8ntool.TraceDisableMemoryFlag,
|
||||
t8ntool.TraceDisableStackFlag,
|
||||
t8ntool.TraceDisableReturnDataFlag,
|
||||
t8ntool.OutputBasedir,
|
||||
t8ntool.OutputAllocFlag,
|
||||
t8ntool.OutputResultFlag,
|
||||
t8ntool.InputAllocFlag,
|
||||
|
@@ -155,10 +155,10 @@ echo "Example where blockhashes are provided: "
|
||||
cmd="./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace"
|
||||
tick && echo $cmd && tick
|
||||
$cmd 2>&1 >/dev/null
|
||||
cmd="cat trace-0.jsonl | grep BLOCKHASH -C2"
|
||||
cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2"
|
||||
tick && echo $cmd && tick
|
||||
echo "$ticks"
|
||||
cat trace-0.jsonl | grep BLOCKHASH -C2
|
||||
cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2
|
||||
echo "$ticks"
|
||||
echo ""
|
||||
|
||||
|
@@ -43,6 +43,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@@ -235,23 +236,21 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Assemble the Ethereum light client protocol
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
cfg := eth.DefaultConfig
|
||||
cfg.SyncMode = downloader.LightSync
|
||||
cfg.NetworkId = network
|
||||
cfg.Genesis = genesis
|
||||
return les.New(ctx, &cfg)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
cfg := eth.DefaultConfig
|
||||
cfg.SyncMode = downloader.LightSync
|
||||
cfg.NetworkId = network
|
||||
cfg.Genesis = genesis
|
||||
utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock(nil).Hash())
|
||||
lesBackend, err := les.New(stack, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err)
|
||||
}
|
||||
|
||||
// Assemble the ethstats monitoring and reporting service'
|
||||
if stats != "" {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var serv *les.LightEthereum
|
||||
ctx.Service(&serv)
|
||||
return ethstats.New(stats, nil, serv)
|
||||
}); err != nil {
|
||||
if err := ethstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -268,7 +267,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
// Attach to the client and retrieve and interesting metadatas
|
||||
api, err := stack.Attach()
|
||||
if err != nil {
|
||||
stack.Stop()
|
||||
stack.Close()
|
||||
return nil, err
|
||||
}
|
||||
client := ethclient.NewClient(api)
|
||||
|
@@ -49,7 +49,7 @@
|
||||
<div class="row">
|
||||
<div class="col-lg-8 col-lg-offset-2">
|
||||
<div class="input-group">
|
||||
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address...">
|
||||
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address..."/>
|
||||
<span class="input-group-btn">
|
||||
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
|
||||
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
|
||||
|
@@ -239,8 +239,8 @@ func initGenesis(ctx *cli.Context) error {
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
utils.Fatalf("invalid genesis file: %v", err)
|
||||
}
|
||||
// Open an initialise both full and light databases
|
||||
stack := makeFullNode(ctx)
|
||||
// Open and initialise both full and light databases
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||
@@ -277,7 +277,8 @@ func importChain(ctx *cli.Context) error {
|
||||
utils.SetupMetrics(ctx)
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
stack := makeFullNode(ctx)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, db := utils.MakeChain(ctx, stack, false)
|
||||
@@ -371,7 +372,8 @@ func exportChain(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
stack := makeFullNode(ctx)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, _ := utils.MakeChain(ctx, stack, true)
|
||||
@@ -406,7 +408,8 @@ func importPreimages(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
stack := makeFullNode(ctx)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack)
|
||||
@@ -424,7 +427,8 @@ func exportPreimages(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
stack := makeFullNode(ctx)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack)
|
||||
@@ -446,7 +450,7 @@ func copyDb(ctx *cli.Context) error {
|
||||
utils.Fatalf("Source ancient chain directory path argument missing")
|
||||
}
|
||||
// Initialize a new chain for the running node to sync into
|
||||
stack := makeFullNode(ctx)
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, chainDb := utils.MakeChain(ctx, stack, false)
|
||||
@@ -554,7 +558,7 @@ func confirmAndRemoveDB(database string, kind string) {
|
||||
}
|
||||
|
||||
func dump(ctx *cli.Context) error {
|
||||
stack := makeFullNode(ctx)
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, chainDb := utils.MakeChain(ctx, stack, true)
|
||||
|
@@ -28,9 +28,10 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
@@ -72,9 +73,19 @@ type ethstatsConfig struct {
|
||||
URL string `toml:",omitempty"`
|
||||
}
|
||||
|
||||
// whisper has been deprecated, but clients out there might still have [Shh]
|
||||
// in their config, which will crash. Cut them some slack by keeping the
|
||||
// config, and displaying a message that those config switches are ineffectual.
|
||||
// To be removed circa Q1 2021 -- @gballet.
|
||||
type whisperDeprecatedConfig struct {
|
||||
MaxMessageSize uint32 `toml:",omitempty"`
|
||||
MinimumAcceptedPOW float64 `toml:",omitempty"`
|
||||
RestrictConnectionBetweenLightClients bool `toml:",omitempty"`
|
||||
}
|
||||
|
||||
type gethConfig struct {
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Shh whisperDeprecatedConfig
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
}
|
||||
@@ -104,11 +115,11 @@ func defaultNodeConfig() node.Config {
|
||||
return cfg
|
||||
}
|
||||
|
||||
// makeConfigNode loads geth configuration and creates a blank node instance.
|
||||
func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
}
|
||||
|
||||
@@ -117,6 +128,10 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
if err := loadConfig(file, &cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
if cfg.Shh != (whisperDeprecatedConfig{}) {
|
||||
log.Warn("Deprecated whisper config detected. Whisper has been moved to github.com/ethereum/whisper")
|
||||
}
|
||||
}
|
||||
|
||||
// Apply flags.
|
||||
@@ -129,49 +144,36 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) {
|
||||
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
|
||||
}
|
||||
utils.SetShhConfig(ctx, stack, &cfg.Shh)
|
||||
utils.SetShhConfig(ctx, stack)
|
||||
|
||||
return stack, cfg
|
||||
}
|
||||
|
||||
// enableWhisper returns true in case one of the whisper flags is set.
|
||||
func enableWhisper(ctx *cli.Context) bool {
|
||||
func checkWhisper(ctx *cli.Context) {
|
||||
for _, flag := range whisperFlags {
|
||||
if ctx.GlobalIsSet(flag.GetName()) {
|
||||
return true
|
||||
log.Warn("deprecated whisper flag detected. Whisper has been moved to github.com/ethereum/whisper")
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
// makeFullNode loads geth configuration and creates the Ethereum backend.
|
||||
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
stack, cfg := makeConfigNode(ctx)
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DeveloperFlag.Name)
|
||||
if shhEnabled || shhAutoEnabled {
|
||||
if ctx.GlobalIsSet(utils.WhisperMaxMessageSizeFlag.Name) {
|
||||
cfg.Shh.MaxMessageSize = uint32(ctx.Int(utils.WhisperMaxMessageSizeFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.WhisperMinPOWFlag.Name) {
|
||||
cfg.Shh.MinimumAcceptedPOW = ctx.Float64(utils.WhisperMinPOWFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.WhisperRestrictConnectionBetweenLightClientsFlag.Name) {
|
||||
cfg.Shh.RestrictConnectionBetweenLightClients = true
|
||||
}
|
||||
utils.RegisterShhService(stack, &cfg.Shh)
|
||||
}
|
||||
backend := utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
checkWhisper(ctx)
|
||||
// Configure GraphQL if requested
|
||||
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
||||
utils.RegisterGraphQLService(stack, cfg.Node.GraphQLEndpoint(), cfg.Node.GraphQLCors, cfg.Node.GraphQLVirtualHosts, cfg.Node.HTTPTimeouts)
|
||||
utils.RegisterGraphQLService(stack, backend, cfg.Node)
|
||||
}
|
||||
// Add the Ethereum Stats daemon if requested.
|
||||
if cfg.Ethstats.URL != "" {
|
||||
utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
|
||||
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
|
||||
}
|
||||
return stack
|
||||
return stack, backend
|
||||
}
|
||||
|
||||
// dumpConfig is the dumpconfig command.
|
||||
|
@@ -78,12 +78,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Cons
|
||||
func localConsole(ctx *cli.Context) error {
|
||||
// Create and start the node based on the CLI flags
|
||||
prepare(ctx)
|
||||
node := makeFullNode(ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Close()
|
||||
stack, backend := makeFullNode(ctx)
|
||||
startNode(ctx, stack, backend)
|
||||
defer stack.Close()
|
||||
|
||||
// Attach to the newly started node and start the JavaScript console
|
||||
client, err := node.Attach()
|
||||
client, err := stack.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
@@ -190,12 +190,12 @@ func dialRPC(endpoint string) (*rpc.Client, error) {
|
||||
// everything down.
|
||||
func ephemeralConsole(ctx *cli.Context) error {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := makeFullNode(ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Close()
|
||||
stack, backend := makeFullNode(ctx)
|
||||
startNode(ctx, stack, backend)
|
||||
defer stack.Close()
|
||||
|
||||
// Attach to the newly started node and start the JavaScript console
|
||||
client, err := node.Attach()
|
||||
client, err := stack.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
|
||||
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 txpool:1.0 web3:1.0"
|
||||
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
|
||||
)
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestConsoleWelcome(t *testing.T) {
|
||||
// Start a geth console, make sure it's cleaned up and terminate the console
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh",
|
||||
"--etherbase", coinbase,
|
||||
"console")
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
@@ -83,11 +83,9 @@ func TestIPCAttachWelcome(t *testing.T) {
|
||||
defer os.RemoveAll(ws)
|
||||
ipc = filepath.Join(ws, "geth.ipc")
|
||||
}
|
||||
// Note: we need --shh because testAttachWelcome checks for default
|
||||
// list of ipc modules and shh is included there.
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh", "--ipcpath", ipc)
|
||||
"--etherbase", coinbase, "--ipcpath", ipc)
|
||||
|
||||
defer func() {
|
||||
geth.Interrupt()
|
||||
|
@@ -119,8 +119,7 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
|
||||
} else {
|
||||
// Force chain initialization
|
||||
args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir}
|
||||
geth := runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...)
|
||||
geth.WaitExit()
|
||||
runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...).WaitExit()
|
||||
}
|
||||
// Retrieve the DAO config flag from the database
|
||||
path := filepath.Join(datadir, "geth", "chaindata")
|
||||
|
@@ -95,9 +95,9 @@ func (g *gethrpc) waitSynced() {
|
||||
}
|
||||
}
|
||||
|
||||
func startGethWithRpc(t *testing.T, name string, args ...string) *gethrpc {
|
||||
func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
|
||||
g := &gethrpc{name: name}
|
||||
args = append([]string{"--networkid=42", "--port=0", "--nousb", "--http", "--http.port=0", "--http.api=admin,eth,les"}, args...)
|
||||
args = append([]string{"--networkid=42", "--port=0", "--nousb"}, args...)
|
||||
t.Logf("Starting %v with rpc: %v", name, args)
|
||||
g.geth = runGeth(t, args...)
|
||||
// wait before we can attach to it. TODO: probe for it properly
|
||||
@@ -112,7 +112,7 @@ func startGethWithRpc(t *testing.T, name string, args ...string) *gethrpc {
|
||||
}
|
||||
|
||||
func initGeth(t *testing.T) string {
|
||||
g := runGeth(t, "--networkid=42", "init", "./testdata/clique.json")
|
||||
g := runGeth(t, "--nousb", "--networkid=42", "init", "./testdata/clique.json")
|
||||
datadir := g.Datadir
|
||||
g.WaitExit()
|
||||
return datadir
|
||||
@@ -120,15 +120,15 @@ func initGeth(t *testing.T) string {
|
||||
|
||||
func startLightServer(t *testing.T) *gethrpc {
|
||||
datadir := initGeth(t)
|
||||
runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv").WaitExit()
|
||||
runGeth(t, "--nousb", "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv").WaitExit()
|
||||
account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
|
||||
server := startGethWithRpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1")
|
||||
server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1")
|
||||
return server
|
||||
}
|
||||
|
||||
func startClient(t *testing.T, name string) *gethrpc {
|
||||
datadir := initGeth(t)
|
||||
return startGethWithRpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1")
|
||||
return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1")
|
||||
}
|
||||
|
||||
func TestPriorityClient(t *testing.T) {
|
||||
@@ -152,7 +152,7 @@ func TestPriorityClient(t *testing.T) {
|
||||
defer prioCli.killAndWait()
|
||||
// 3_000_000_000 once we move to Go 1.13
|
||||
tokens := 3000000000
|
||||
lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens, "foobar")
|
||||
lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens)
|
||||
prioCli.addPeer(lightServer)
|
||||
|
||||
// Check if priority client is actually syncing and the regular client got kicked out
|
||||
@@ -166,6 +166,7 @@ func TestPriorityClient(t *testing.T) {
|
||||
freeCli.getNodeInfo().ID: freeCli,
|
||||
prioCli.getNodeInfo().ID: prioCli,
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
lightServer.callRPC(&peers, "admin_peers")
|
||||
peersWithNames := make(map[string]string)
|
||||
for _, p := range peers {
|
||||
|
@@ -36,8 +36,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@@ -108,6 +108,8 @@ var (
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheTrieJournalFlag,
|
||||
utils.CacheTrieRejournalFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.CacheSnapshotFlag,
|
||||
utils.CacheNoPrefetchFlag,
|
||||
@@ -152,6 +154,7 @@ var (
|
||||
utils.LegacyGpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.LegacyGpoPercentileFlag,
|
||||
utils.GpoMaxGasPriceFlag,
|
||||
utils.EWASMInterpreterFlag,
|
||||
utils.EVMInterpreterFlag,
|
||||
configFileFlag,
|
||||
@@ -169,8 +172,6 @@ var (
|
||||
utils.LegacyRPCCORSDomainFlag,
|
||||
utils.LegacyRPCVirtualHostsFlag,
|
||||
utils.GraphQLEnabledFlag,
|
||||
utils.GraphQLListenAddrFlag,
|
||||
utils.GraphQLPortFlag,
|
||||
utils.GraphQLCORSDomainFlag,
|
||||
utils.GraphQLVirtualHostsFlag,
|
||||
utils.HTTPApiFlag,
|
||||
@@ -187,8 +188,8 @@ var (
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.InsecureUnlockAllowedFlag,
|
||||
utils.RPCGlobalGasCap,
|
||||
utils.RPCGlobalTxFeeCap,
|
||||
utils.RPCGlobalGasCapFlag,
|
||||
utils.RPCGlobalTxFeeCapFlag,
|
||||
}
|
||||
|
||||
whisperFlags = []cli.Flag{
|
||||
@@ -348,18 +349,20 @@ func geth(ctx *cli.Context) error {
|
||||
if args := ctx.Args(); len(args) > 0 {
|
||||
return fmt.Errorf("invalid command: %q", args[0])
|
||||
}
|
||||
|
||||
prepare(ctx)
|
||||
node := makeFullNode(ctx)
|
||||
defer node.Close()
|
||||
startNode(ctx, node)
|
||||
node.Wait()
|
||||
stack, backend := makeFullNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
startNode(ctx, stack, backend)
|
||||
stack.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// startNode boots up the system node and all registered protocols, after which
|
||||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||
// miner.
|
||||
func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) {
|
||||
debug.Memsize.Add("node", stack)
|
||||
|
||||
// Start up the node itself
|
||||
@@ -379,25 +382,6 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}
|
||||
ethClient := ethclient.NewClient(rpcClient)
|
||||
|
||||
// Set contract backend for ethereum service if local node
|
||||
// is serving LES requests.
|
||||
if ctx.GlobalInt(utils.LegacyLightServFlag.Name) > 0 || ctx.GlobalInt(utils.LightServeFlag.Name) > 0 {
|
||||
var ethService *eth.Ethereum
|
||||
if err := stack.Service(ðService); err != nil {
|
||||
utils.Fatalf("Failed to retrieve ethereum service: %v", err)
|
||||
}
|
||||
ethService.SetContractBackend(ethClient)
|
||||
}
|
||||
// Set contract backend for les service if local node is
|
||||
// running as a light client.
|
||||
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
var lesService *les.LightEthereum
|
||||
if err := stack.Service(&lesService); err != nil {
|
||||
utils.Fatalf("Failed to retrieve light ethereum service: %v", err)
|
||||
}
|
||||
lesService.SetContractBackend(ethClient)
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Open any wallets already attached
|
||||
for _, wallet := range stack.AccountManager().Wallets() {
|
||||
@@ -449,7 +433,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
if timestamp := time.Unix(int64(done.Latest.Time), 0); time.Since(timestamp) < 10*time.Minute {
|
||||
log.Info("Synchronisation completed", "latestnum", done.Latest.Number, "latesthash", done.Latest.Hash(),
|
||||
"age", common.PrettyAge(timestamp))
|
||||
stack.Stop()
|
||||
stack.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -461,24 +445,24 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
utils.Fatalf("Light clients do not support mining")
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
ethBackend, ok := backend.(*eth.EthAPIBackend)
|
||||
if !ok {
|
||||
utils.Fatalf("Ethereum service not running: %v", err)
|
||||
}
|
||||
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.LegacyMinerGasPriceFlag.Name) && !ctx.GlobalIsSet(utils.MinerGasPriceFlag.Name) {
|
||||
gasprice = utils.GlobalBig(ctx, utils.LegacyMinerGasPriceFlag.Name)
|
||||
}
|
||||
ethereum.TxPool().SetGasPrice(gasprice)
|
||||
|
||||
ethBackend.TxPool().SetGasPrice(gasprice)
|
||||
// start mining
|
||||
threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.LegacyMinerThreadsFlag.Name) && !ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
|
||||
threads = ctx.GlobalInt(utils.LegacyMinerThreadsFlag.Name)
|
||||
log.Warn("The flag --minerthreads is deprecated and will be removed in the future, please use --miner.threads")
|
||||
}
|
||||
|
||||
if err := ethereum.StartMining(threads); err != nil {
|
||||
if err := ethBackend.StartMining(threads); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -23,7 +23,6 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@@ -200,11 +199,11 @@ func (e *NoRewardEngine) Author(header *types.Header) (common.Address, error) {
|
||||
return e.inner.Author(header)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||
func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
return e.inner.VerifyHeader(chain, header, seal)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
return e.inner.VerifyHeaders(chain, headers, seals)
|
||||
}
|
||||
|
||||
@@ -212,11 +211,11 @@ func (e *NoRewardEngine) VerifyUncles(chain consensus.ChainReader, block *types.
|
||||
return e.inner.VerifyUncles(chain, block)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (e *NoRewardEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return e.inner.VerifySeal(chain, header)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (e *NoRewardEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return e.inner.Prepare(chain, header)
|
||||
}
|
||||
|
||||
@@ -229,7 +228,7 @@ func (e *NoRewardEngine) accumulateRewards(config *params.ChainConfig, state *st
|
||||
state.AddBalance(header.Coinbase, reward)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
|
||||
func (e *NoRewardEngine) Finalize(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header) {
|
||||
if e.rewardsOn {
|
||||
e.inner.Finalize(chain, header, statedb, txs, uncles)
|
||||
@@ -239,7 +238,7 @@ func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Hea
|
||||
}
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
|
||||
func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
if e.rewardsOn {
|
||||
return e.inner.FinalizeAndAssemble(chain, header, statedb, txs, uncles, receipts)
|
||||
@@ -248,11 +247,11 @@ func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header
|
||||
header.Root = statedb.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
|
||||
// Header seems complete, assemble into a block and return
|
||||
return types.NewBlock(header, txs, uncles, receipts), nil
|
||||
return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
func (e *NoRewardEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
return e.inner.Seal(chain, block, results, stop)
|
||||
}
|
||||
|
||||
@@ -260,11 +259,11 @@ func (e *NoRewardEngine) SealHash(header *types.Header) common.Hash {
|
||||
return e.inner.SealHash(header)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||
func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
|
||||
return e.inner.CalcDifficulty(chain, time, parent)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
func (e *NoRewardEngine) APIs(chain consensus.ChainHeaderReader) []rpc.API {
|
||||
return e.inner.APIs(chain)
|
||||
}
|
||||
|
||||
@@ -840,16 +839,6 @@ func (api *RetestethAPI) ClientVersion(ctx context.Context) (string, error) {
|
||||
return "Geth-" + params.VersionWithCommit(gitCommit, gitDate), nil
|
||||
}
|
||||
|
||||
// splitAndTrim splits input separated by a comma
|
||||
// and trims excessive white space from the substrings.
|
||||
func splitAndTrim(input string) []string {
|
||||
result := strings.Split(input, ",")
|
||||
for i, r := range result {
|
||||
result[i] = strings.TrimSpace(r)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func retesteth(ctx *cli.Context) error {
|
||||
log.Info("Welcome to retesteth!")
|
||||
// register signer API with server
|
||||
@@ -887,8 +876,8 @@ func retesteth(ctx *cli.Context) error {
|
||||
Version: "1.0",
|
||||
},
|
||||
}
|
||||
vhosts := splitAndTrim(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name))
|
||||
cors := splitAndTrim(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name))
|
||||
vhosts := utils.SplitAndTrim(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name))
|
||||
cors := utils.SplitAndTrim(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name))
|
||||
|
||||
// register apis and create handler stack
|
||||
srv := rpc.NewServer()
|
||||
|
@@ -109,6 +109,8 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheTrieJournalFlag,
|
||||
utils.CacheTrieRejournalFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.CacheSnapshotFlag,
|
||||
utils.CacheNoPrefetchFlag,
|
||||
@@ -140,12 +142,10 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
utils.GraphQLEnabledFlag,
|
||||
utils.GraphQLListenAddrFlag,
|
||||
utils.GraphQLPortFlag,
|
||||
utils.GraphQLCORSDomainFlag,
|
||||
utils.GraphQLVirtualHostsFlag,
|
||||
utils.RPCGlobalGasCap,
|
||||
utils.RPCGlobalTxFeeCap,
|
||||
utils.RPCGlobalGasCapFlag,
|
||||
utils.RPCGlobalTxFeeCapFlag,
|
||||
utils.JSpathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreloadJSFlag,
|
||||
@@ -189,6 +189,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
Flags: []cli.Flag{
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.GpoMaxGasPriceFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -211,7 +212,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
Flags: metricsFlags,
|
||||
},
|
||||
{
|
||||
Name: "WHISPER (EXPERIMENTAL)",
|
||||
Name: "WHISPER (deprecated)",
|
||||
Flags: whisperFlags,
|
||||
},
|
||||
{
|
||||
@@ -229,6 +230,8 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
utils.LegacyWSApiFlag,
|
||||
utils.LegacyGpoBlocksFlag,
|
||||
utils.LegacyGpoPercentileFlag,
|
||||
utils.LegacyGraphQLListenAddrFlag,
|
||||
utils.LegacyGraphQLPortFlag,
|
||||
}, debug.DeprecatedFlags...),
|
||||
},
|
||||
{
|
||||
|
@@ -289,7 +289,7 @@ func createNode(ctx *cli.Context) error {
|
||||
config.PrivateKey = privKey
|
||||
}
|
||||
if services := ctx.String("services"); services != "" {
|
||||
config.Services = strings.Split(services, ",")
|
||||
config.Lifecycles = strings.Split(services, ",")
|
||||
}
|
||||
node, err := client.CreateNode(config)
|
||||
if err != nil {
|
||||
|
@@ -73,7 +73,7 @@ func StartNode(stack *node.Node) {
|
||||
defer signal.Stop(sigc)
|
||||
<-sigc
|
||||
log.Info("Got interrupt, shutting down...")
|
||||
go stack.Stop()
|
||||
go stack.Close()
|
||||
for i := 10; i > 0; i-- {
|
||||
<-sigc
|
||||
if i > 1 {
|
||||
|
@@ -192,14 +192,14 @@ func GlobalBig(ctx *cli.Context, name string) *big.Int {
|
||||
// Note, it has limitations, e.g. ~someuser/tmp will not be expanded
|
||||
func expandPath(p string) string {
|
||||
if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") {
|
||||
if home := homeDir(); home != "" {
|
||||
if home := HomeDir(); home != "" {
|
||||
p = home + p[1:]
|
||||
}
|
||||
}
|
||||
return path.Clean(os.ExpandEnv(p))
|
||||
}
|
||||
|
||||
func homeDir() string {
|
||||
func HomeDir() string {
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
return home
|
||||
}
|
||||
|
@@ -19,7 +19,6 @@ package utils
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -40,6 +39,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
@@ -48,6 +48,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethstats"
|
||||
"github.com/ethereum/go-ethereum/graphql"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -62,8 +63,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
pcsclite "github.com/gballet/go-libpcsclite"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
@@ -129,7 +128,7 @@ var (
|
||||
}
|
||||
NetworkIdFlag = cli.Uint64Flag{
|
||||
Name: "networkid",
|
||||
Usage: "Network identifier (integer, 1=Frontier, 3=Ropsten, 4=Rinkeby, 5=Görli)",
|
||||
Usage: "Explicitly set network id (integer)(For testnets: use --ropsten, --rinkeby, --goerli instead)",
|
||||
Value: eth.DefaultConfig.NetworkId,
|
||||
}
|
||||
GoerliFlag = cli.BoolFlag{
|
||||
@@ -163,7 +162,7 @@ var (
|
||||
DocRootFlag = DirectoryFlag{
|
||||
Name: "docroot",
|
||||
Usage: "Document Root for HTTPClient file scheme",
|
||||
Value: DirectoryString(homeDir()),
|
||||
Value: DirectoryString(HomeDir()),
|
||||
}
|
||||
ExitWhenSyncedFlag = cli.BoolFlag{
|
||||
Name: "exitwhensynced",
|
||||
@@ -360,6 +359,16 @@ var (
|
||||
Usage: "Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)",
|
||||
Value: 15,
|
||||
}
|
||||
CacheTrieJournalFlag = cli.StringFlag{
|
||||
Name: "cache.trie.journal",
|
||||
Usage: "Disk journal directory for trie cache to survive node restarts",
|
||||
Value: eth.DefaultConfig.TrieCleanCacheJournal,
|
||||
}
|
||||
CacheTrieRejournalFlag = cli.DurationFlag{
|
||||
Name: "cache.trie.rejournal",
|
||||
Usage: "Time interval to regenerate the trie cache journal",
|
||||
Value: eth.DefaultConfig.TrieCleanCacheRejournal,
|
||||
}
|
||||
CacheGCFlag = cli.IntFlag{
|
||||
Name: "cache.gc",
|
||||
Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)",
|
||||
@@ -445,12 +454,12 @@ var (
|
||||
Name: "allow-insecure-unlock",
|
||||
Usage: "Allow insecure account unlocking when account-related RPCs are exposed by http",
|
||||
}
|
||||
RPCGlobalGasCap = cli.Uint64Flag{
|
||||
RPCGlobalGasCapFlag = cli.Uint64Flag{
|
||||
Name: "rpc.gascap",
|
||||
Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)",
|
||||
Value: eth.DefaultConfig.RPCGasCap,
|
||||
}
|
||||
RPCGlobalTxFeeCap = cli.Float64Flag{
|
||||
RPCGlobalTxFeeCapFlag = cli.Float64Flag{
|
||||
Name: "rpc.txfeecap",
|
||||
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
|
||||
Value: eth.DefaultConfig.RPCTxFeeCap,
|
||||
@@ -506,6 +515,20 @@ var (
|
||||
Usage: "API's offered over the HTTP-RPC interface",
|
||||
Value: "",
|
||||
}
|
||||
GraphQLEnabledFlag = cli.BoolFlag{
|
||||
Name: "graphql",
|
||||
Usage: "Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.",
|
||||
}
|
||||
GraphQLCORSDomainFlag = cli.StringFlag{
|
||||
Name: "graphql.corsdomain",
|
||||
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
|
||||
Value: "",
|
||||
}
|
||||
GraphQLVirtualHostsFlag = cli.StringFlag{
|
||||
Name: "graphql.vhosts",
|
||||
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
||||
Value: strings.Join(node.DefaultConfig.GraphQLVirtualHosts, ","),
|
||||
}
|
||||
WSEnabledFlag = cli.BoolFlag{
|
||||
Name: "ws",
|
||||
Usage: "Enable the WS-RPC server",
|
||||
@@ -530,30 +553,6 @@ var (
|
||||
Usage: "Origins from which to accept websockets requests",
|
||||
Value: "",
|
||||
}
|
||||
GraphQLEnabledFlag = cli.BoolFlag{
|
||||
Name: "graphql",
|
||||
Usage: "Enable the GraphQL server",
|
||||
}
|
||||
GraphQLListenAddrFlag = cli.StringFlag{
|
||||
Name: "graphql.addr",
|
||||
Usage: "GraphQL server listening interface",
|
||||
Value: node.DefaultGraphQLHost,
|
||||
}
|
||||
GraphQLPortFlag = cli.IntFlag{
|
||||
Name: "graphql.port",
|
||||
Usage: "GraphQL server listening port",
|
||||
Value: node.DefaultGraphQLPort,
|
||||
}
|
||||
GraphQLCORSDomainFlag = cli.StringFlag{
|
||||
Name: "graphql.corsdomain",
|
||||
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
|
||||
Value: "",
|
||||
}
|
||||
GraphQLVirtualHostsFlag = cli.StringFlag{
|
||||
Name: "graphql.vhosts",
|
||||
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
||||
Value: strings.Join(node.DefaultConfig.GraphQLVirtualHosts, ","),
|
||||
}
|
||||
ExecFlag = cli.StringFlag{
|
||||
Name: "exec",
|
||||
Usage: "Execute JavaScript statement",
|
||||
@@ -632,6 +631,11 @@ var (
|
||||
Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices",
|
||||
Value: eth.DefaultConfig.GPO.Percentile,
|
||||
}
|
||||
GpoMaxGasPriceFlag = cli.Int64Flag{
|
||||
Name: "gpo.maxprice",
|
||||
Usage: "Maximum gas price will be recommended by gpo",
|
||||
Value: eth.DefaultConfig.GPO.MaxPrice.Int64(),
|
||||
}
|
||||
WhisperEnabledFlag = cli.BoolFlag{
|
||||
Name: "shh",
|
||||
Usage: "Enable Whisper",
|
||||
@@ -639,12 +643,12 @@ var (
|
||||
WhisperMaxMessageSizeFlag = cli.IntFlag{
|
||||
Name: "shh.maxmessagesize",
|
||||
Usage: "Max message size accepted",
|
||||
Value: int(whisper.DefaultMaxMessageSize),
|
||||
Value: 1024 * 1024,
|
||||
}
|
||||
WhisperMinPOWFlag = cli.Float64Flag{
|
||||
Name: "shh.pow",
|
||||
Usage: "Minimum POW accepted",
|
||||
Value: whisper.DefaultMinimumPoW,
|
||||
Value: 0.2,
|
||||
}
|
||||
WhisperRestrictConnectionBetweenLightClientsFlag = cli.BoolFlag{
|
||||
Name: "shh.restrict-light",
|
||||
@@ -789,9 +793,9 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(LegacyBootnodesV4Flag.Name):
|
||||
if ctx.GlobalIsSet(LegacyBootnodesV4Flag.Name) {
|
||||
urls = splitAndTrim(ctx.GlobalString(LegacyBootnodesV4Flag.Name))
|
||||
urls = SplitAndTrim(ctx.GlobalString(LegacyBootnodesV4Flag.Name))
|
||||
} else {
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||
urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||
}
|
||||
case ctx.GlobalBool(LegacyTestnetFlag.Name) || ctx.GlobalBool(RopstenFlag.Name):
|
||||
urls = params.RopstenBootnodes
|
||||
@@ -825,9 +829,9 @@ func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(LegacyBootnodesV5Flag.Name):
|
||||
if ctx.GlobalIsSet(LegacyBootnodesV5Flag.Name) {
|
||||
urls = splitAndTrim(ctx.GlobalString(LegacyBootnodesV5Flag.Name))
|
||||
urls = SplitAndTrim(ctx.GlobalString(LegacyBootnodesV5Flag.Name))
|
||||
} else {
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||
urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||
}
|
||||
case ctx.GlobalBool(RopstenFlag.Name):
|
||||
urls = params.RopstenBootnodes
|
||||
@@ -873,13 +877,12 @@ func setNAT(ctx *cli.Context, cfg *p2p.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// splitAndTrim splits input separated by a comma
|
||||
// SplitAndTrim splits input separated by a comma
|
||||
// and trims excessive white space from the substrings.
|
||||
func splitAndTrim(input string) (ret []string) {
|
||||
func SplitAndTrim(input string) (ret []string) {
|
||||
l := strings.Split(input, ",")
|
||||
for _, r := range l {
|
||||
r = strings.TrimSpace(r)
|
||||
if len(r) > 0 {
|
||||
if r = strings.TrimSpace(r); r != "" {
|
||||
ret = append(ret, r)
|
||||
}
|
||||
}
|
||||
@@ -913,45 +916,38 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(LegacyRPCCORSDomainFlag.Name) {
|
||||
cfg.HTTPCors = splitAndTrim(ctx.GlobalString(LegacyRPCCORSDomainFlag.Name))
|
||||
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(LegacyRPCCORSDomainFlag.Name))
|
||||
log.Warn("The flag --rpccorsdomain is deprecated and will be removed in the future, please use --http.corsdomain")
|
||||
}
|
||||
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
|
||||
cfg.HTTPCors = splitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
|
||||
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(LegacyRPCApiFlag.Name) {
|
||||
cfg.HTTPModules = splitAndTrim(ctx.GlobalString(LegacyRPCApiFlag.Name))
|
||||
cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(LegacyRPCApiFlag.Name))
|
||||
log.Warn("The flag --rpcapi is deprecated and will be removed in the future, please use --http.api")
|
||||
}
|
||||
if ctx.GlobalIsSet(HTTPApiFlag.Name) {
|
||||
cfg.HTTPModules = splitAndTrim(ctx.GlobalString(HTTPApiFlag.Name))
|
||||
cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(HTTPApiFlag.Name))
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(LegacyRPCVirtualHostsFlag.Name) {
|
||||
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(LegacyRPCVirtualHostsFlag.Name))
|
||||
cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(LegacyRPCVirtualHostsFlag.Name))
|
||||
log.Warn("The flag --rpcvhosts is deprecated and will be removed in the future, please use --http.vhosts")
|
||||
}
|
||||
if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) {
|
||||
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name))
|
||||
cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// setGraphQL creates the GraphQL listener interface string from the set
|
||||
// command line flags, returning empty if the GraphQL endpoint is disabled.
|
||||
func setGraphQL(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.GlobalBool(GraphQLEnabledFlag.Name) && cfg.GraphQLHost == "" {
|
||||
cfg.GraphQLHost = "127.0.0.1"
|
||||
if ctx.GlobalIsSet(GraphQLListenAddrFlag.Name) {
|
||||
cfg.GraphQLHost = ctx.GlobalString(GraphQLListenAddrFlag.Name)
|
||||
}
|
||||
}
|
||||
cfg.GraphQLPort = ctx.GlobalInt(GraphQLPortFlag.Name)
|
||||
if ctx.GlobalIsSet(GraphQLCORSDomainFlag.Name) {
|
||||
cfg.GraphQLCors = splitAndTrim(ctx.GlobalString(GraphQLCORSDomainFlag.Name))
|
||||
cfg.GraphQLCors = SplitAndTrim(ctx.GlobalString(GraphQLCORSDomainFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(GraphQLVirtualHostsFlag.Name) {
|
||||
cfg.GraphQLVirtualHosts = splitAndTrim(ctx.GlobalString(GraphQLVirtualHostsFlag.Name))
|
||||
cfg.GraphQLVirtualHosts = SplitAndTrim(ctx.GlobalString(GraphQLVirtualHostsFlag.Name))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -977,19 +973,19 @@ func setWS(ctx *cli.Context, cfg *node.Config) {
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(LegacyWSAllowedOriginsFlag.Name) {
|
||||
cfg.WSOrigins = splitAndTrim(ctx.GlobalString(LegacyWSAllowedOriginsFlag.Name))
|
||||
cfg.WSOrigins = SplitAndTrim(ctx.GlobalString(LegacyWSAllowedOriginsFlag.Name))
|
||||
log.Warn("The flag --wsorigins is deprecated and will be removed in the future, please use --ws.origins")
|
||||
}
|
||||
if ctx.GlobalIsSet(WSAllowedOriginsFlag.Name) {
|
||||
cfg.WSOrigins = splitAndTrim(ctx.GlobalString(WSAllowedOriginsFlag.Name))
|
||||
cfg.WSOrigins = SplitAndTrim(ctx.GlobalString(WSAllowedOriginsFlag.Name))
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(LegacyWSApiFlag.Name) {
|
||||
cfg.WSModules = splitAndTrim(ctx.GlobalString(LegacyWSApiFlag.Name))
|
||||
cfg.WSModules = SplitAndTrim(ctx.GlobalString(LegacyWSApiFlag.Name))
|
||||
log.Warn("The flag --wsapi is deprecated and will be removed in the future, please use --ws.api")
|
||||
}
|
||||
if ctx.GlobalIsSet(WSApiFlag.Name) {
|
||||
cfg.WSModules = splitAndTrim(ctx.GlobalString(WSApiFlag.Name))
|
||||
cfg.WSModules = SplitAndTrim(ctx.GlobalString(WSApiFlag.Name))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1299,6 +1295,9 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) {
|
||||
if ctx.GlobalIsSet(GpoPercentileFlag.Name) {
|
||||
cfg.Percentile = ctx.GlobalInt(GpoPercentileFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(GpoMaxGasPriceFlag.Name) {
|
||||
cfg.MaxPrice = big.NewInt(ctx.GlobalInt64(GpoMaxGasPriceFlag.Name))
|
||||
}
|
||||
}
|
||||
|
||||
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
@@ -1472,15 +1471,12 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
|
||||
}
|
||||
|
||||
// SetShhConfig applies shh-related command line flags to the config.
|
||||
func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
|
||||
if ctx.GlobalIsSet(WhisperMaxMessageSizeFlag.Name) {
|
||||
cfg.MaxMessageSize = uint32(ctx.GlobalUint(WhisperMaxMessageSizeFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(WhisperMinPOWFlag.Name) {
|
||||
cfg.MinimumAcceptedPOW = ctx.GlobalFloat64(WhisperMinPOWFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(WhisperRestrictConnectionBetweenLightClientsFlag.Name) {
|
||||
cfg.RestrictConnectionBetweenLightClients = true
|
||||
func SetShhConfig(ctx *cli.Context, stack *node.Node) {
|
||||
if ctx.GlobalIsSet(WhisperEnabledFlag.Name) ||
|
||||
ctx.GlobalIsSet(WhisperMaxMessageSizeFlag.Name) ||
|
||||
ctx.GlobalIsSet(WhisperMinPOWFlag.Name) ||
|
||||
ctx.GlobalIsSet(WhisperRestrictConnectionBetweenLightClientsFlag.Name) {
|
||||
log.Warn("Whisper support has been deprecated and the code has been moved to github.com/ethereum/whisper")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1536,6 +1532,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheTrieJournalFlag.Name) {
|
||||
cfg.TrieCleanCacheJournal = ctx.GlobalString(CacheTrieJournalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheTrieRejournalFlag.Name) {
|
||||
cfg.TrieCleanCacheRejournal = ctx.GlobalDuration(CacheTrieRejournalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
@@ -1543,6 +1545,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.SnapshotCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheSnapshotFlag.Name) / 100
|
||||
}
|
||||
if !ctx.GlobalIsSet(SnapshotFlag.Name) {
|
||||
cfg.TrieCleanCache += cfg.SnapshotCache
|
||||
cfg.SnapshotCache = 0 // Disabled
|
||||
}
|
||||
if ctx.GlobalIsSet(DocRootFlag.Name) {
|
||||
@@ -1560,23 +1563,23 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(EVMInterpreterFlag.Name) {
|
||||
cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(RPCGlobalGasCap.Name) {
|
||||
cfg.RPCGasCap = ctx.GlobalUint64(RPCGlobalGasCap.Name)
|
||||
if ctx.GlobalIsSet(RPCGlobalGasCapFlag.Name) {
|
||||
cfg.RPCGasCap = ctx.GlobalUint64(RPCGlobalGasCapFlag.Name)
|
||||
}
|
||||
if cfg.RPCGasCap != 0 {
|
||||
log.Info("Set global gas cap", "cap", cfg.RPCGasCap)
|
||||
} else {
|
||||
log.Info("Global gas cap disabled")
|
||||
}
|
||||
if ctx.GlobalIsSet(RPCGlobalTxFeeCap.Name) {
|
||||
cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCap.Name)
|
||||
if ctx.GlobalIsSet(RPCGlobalTxFeeCapFlag.Name) {
|
||||
cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCapFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(DNSDiscoveryFlag.Name) {
|
||||
urls := ctx.GlobalString(DNSDiscoveryFlag.Name)
|
||||
if urls == "" {
|
||||
cfg.DiscoveryURLs = []string{}
|
||||
} else {
|
||||
cfg.DiscoveryURLs = splitAndTrim(urls)
|
||||
cfg.DiscoveryURLs = SplitAndTrim(urls)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1587,19 +1590,19 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.NetworkId = 3
|
||||
}
|
||||
cfg.Genesis = core.DefaultRopstenGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash)
|
||||
SetDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash)
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 4
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.RinkebyGenesisHash)
|
||||
SetDNSDiscoveryDefaults(cfg, params.RinkebyGenesisHash)
|
||||
case ctx.GlobalBool(GoerliFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 5
|
||||
}
|
||||
cfg.Genesis = core.DefaultGoerliGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
|
||||
SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
|
||||
case ctx.GlobalBool(YoloV1Flag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 133519467574833 // "yolov1"
|
||||
@@ -1611,36 +1614,56 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
// Create new developer account or reuse existing one
|
||||
var (
|
||||
developer accounts.Account
|
||||
err error
|
||||
developer accounts.Account
|
||||
passphrase string
|
||||
err error
|
||||
)
|
||||
if accs := ks.Accounts(); len(accs) > 0 {
|
||||
if list := MakePasswordList(ctx); len(list) > 0 {
|
||||
// Just take the first value. Although the function returns a possible multiple values and
|
||||
// some usages iterate through them as attempts, that doesn't make sense in this setting,
|
||||
// when we're definitely concerned with only one account.
|
||||
passphrase = list[0]
|
||||
}
|
||||
// setEtherbase has been called above, configuring the miner address from command line flags.
|
||||
if cfg.Miner.Etherbase != (common.Address{}) {
|
||||
developer = accounts.Account{Address: cfg.Miner.Etherbase}
|
||||
} else if accs := ks.Accounts(); len(accs) > 0 {
|
||||
developer = ks.Accounts()[0]
|
||||
} else {
|
||||
developer, err = ks.NewAccount("")
|
||||
developer, err = ks.NewAccount(passphrase)
|
||||
if err != nil {
|
||||
Fatalf("Failed to create developer account: %v", err)
|
||||
}
|
||||
}
|
||||
if err := ks.Unlock(developer, ""); err != nil {
|
||||
if err := ks.Unlock(developer, passphrase); err != nil {
|
||||
Fatalf("Failed to unlock developer account: %v", err)
|
||||
}
|
||||
log.Info("Using developer account", "address", developer.Address)
|
||||
|
||||
// Create a new developer genesis block or reuse existing one
|
||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
|
||||
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
||||
// Check if we have an already initialized chain and fall back to
|
||||
// that if so. Otherwise we need to generate a new genesis spec.
|
||||
chaindb := MakeChainDatabase(ctx, stack)
|
||||
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
||||
cfg.Genesis = nil // fallback to db content
|
||||
}
|
||||
chaindb.Close()
|
||||
}
|
||||
if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(LegacyMinerGasPriceFlag.Name) {
|
||||
cfg.Miner.GasPrice = big.NewInt(1)
|
||||
}
|
||||
default:
|
||||
if cfg.NetworkId == 1 {
|
||||
setDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash)
|
||||
SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setDNSDiscoveryDefaults configures DNS discovery with the given URL if
|
||||
// SetDNSDiscoveryDefaults configures DNS discovery with the given URL if
|
||||
// no URLs are set.
|
||||
func setDNSDiscoveryDefaults(cfg *eth.Config, genesis common.Hash) {
|
||||
func SetDNSDiscoveryDefaults(cfg *eth.Config, genesis common.Hash) {
|
||||
if cfg.DiscoveryURLs != nil {
|
||||
return // already set through flags/config
|
||||
}
|
||||
@@ -1655,70 +1678,39 @@ func setDNSDiscoveryDefaults(cfg *eth.Config, genesis common.Hash) {
|
||||
}
|
||||
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
var err error
|
||||
func RegisterEthService(stack *node.Node, cfg *eth.Config) ethapi.Backend {
|
||||
if cfg.SyncMode == downloader.LightSync {
|
||||
err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return les.New(ctx, cfg)
|
||||
})
|
||||
backend, err := les.New(stack, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
return backend.ApiBackend
|
||||
} else {
|
||||
err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
fullNode, err := eth.New(ctx, cfg)
|
||||
if fullNode != nil && cfg.LightServ > 0 {
|
||||
ls, _ := les.NewLesServer(fullNode, cfg)
|
||||
fullNode.AddLesServer(ls)
|
||||
backend, err := eth.New(stack, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
if cfg.LightServ > 0 {
|
||||
_, err := les.NewLesServer(stack, backend, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to create the LES server: %v", err)
|
||||
}
|
||||
return fullNode, err
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterShhService configures Whisper and adds it to the given node.
|
||||
func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
|
||||
return whisper.New(cfg), nil
|
||||
}); err != nil {
|
||||
Fatalf("Failed to register the Whisper service: %v", err)
|
||||
}
|
||||
return backend.APIBackend
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to
|
||||
// the given node.
|
||||
func RegisterEthStatsService(stack *node.Node, url string) {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
// Retrieve both eth and les services
|
||||
var ethServ *eth.Ethereum
|
||||
ctx.Service(ðServ)
|
||||
|
||||
var lesServ *les.LightEthereum
|
||||
ctx.Service(&lesServ)
|
||||
|
||||
// Let ethstats use whichever is not nil
|
||||
return ethstats.New(url, ethServ, lesServ)
|
||||
}); err != nil {
|
||||
func RegisterEthStatsService(stack *node.Node, backend ethapi.Backend, url string) {
|
||||
if err := ethstats.New(stack, backend, backend.Engine(), url); err != nil {
|
||||
Fatalf("Failed to register the Ethereum Stats service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterGraphQLService is a utility function to construct a new service and register it against a node.
|
||||
func RegisterGraphQLService(stack *node.Node, endpoint string, cors, vhosts []string, timeouts rpc.HTTPTimeouts) {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
// Try to construct the GraphQL service backed by a full node
|
||||
var ethServ *eth.Ethereum
|
||||
if err := ctx.Service(ðServ); err == nil {
|
||||
return graphql.New(ethServ.APIBackend, endpoint, cors, vhosts, timeouts)
|
||||
}
|
||||
// Try to construct the GraphQL service backed by a light node
|
||||
var lesServ *les.LightEthereum
|
||||
if err := ctx.Service(&lesServ); err == nil {
|
||||
return graphql.New(lesServ.ApiBackend, endpoint, cors, vhosts, timeouts)
|
||||
}
|
||||
// Well, this should not have happened, bail out
|
||||
return nil, errors.New("no Ethereum service")
|
||||
}); err != nil {
|
||||
func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, cfg node.Config) {
|
||||
if err := graphql.New(stack, backend, cfg.GraphQLCors, cfg.GraphQLVirtualHosts); err != nil {
|
||||
Fatalf("Failed to register the GraphQL service: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -89,6 +89,8 @@ var (
|
||||
Name: "testnet",
|
||||
Usage: "Pre-configured test network (Deprecated: Please choose one of --goerli, --rinkeby, or --ropsten.)",
|
||||
}
|
||||
|
||||
// (Deprecated May 2020, shown in aliased flags section)
|
||||
LegacyRPCEnabledFlag = cli.BoolFlag{
|
||||
Name: "rpc",
|
||||
Usage: "Enable the HTTP-RPC server (deprecated, use --http)",
|
||||
@@ -158,6 +160,17 @@ var (
|
||||
Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes) (deprecated, use --bootnodes)",
|
||||
Value: "",
|
||||
}
|
||||
|
||||
// (Deprecated July 2020, shown in aliased flags section)
|
||||
LegacyGraphQLListenAddrFlag = cli.StringFlag{
|
||||
Name: "graphql.addr",
|
||||
Usage: "GraphQL server listening interface (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)",
|
||||
}
|
||||
LegacyGraphQLPortFlag = cli.IntFlag{
|
||||
Name: "graphql.port",
|
||||
Usage: "GraphQL server listening port (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)",
|
||||
Value: node.DefaultHTTPPort,
|
||||
}
|
||||
)
|
||||
|
||||
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
|
||||
|
@@ -1,774 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This is a simple Whisper node. It could be used as a stand-alone bootstrap node.
|
||||
// Also, could be used for different test and diagnostics purposes.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/ecdsa"
|
||||
crand "crypto/rand"
|
||||
"crypto/sha512"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/console/prompt"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/whisper/mailserver"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
)
|
||||
|
||||
const quitCommand = "~Q"
|
||||
const entropySize = 32
|
||||
|
||||
// singletons
|
||||
var (
|
||||
server *p2p.Server
|
||||
shh *whisper.Whisper
|
||||
done chan struct{}
|
||||
mailServer mailserver.WMailServer
|
||||
entropy [entropySize]byte
|
||||
|
||||
input = bufio.NewReader(os.Stdin)
|
||||
)
|
||||
|
||||
// encryption
|
||||
var (
|
||||
symKey []byte
|
||||
pub *ecdsa.PublicKey
|
||||
asymKey *ecdsa.PrivateKey
|
||||
nodeid *ecdsa.PrivateKey
|
||||
topic whisper.TopicType
|
||||
|
||||
asymKeyID string
|
||||
asymFilterID string
|
||||
symFilterID string
|
||||
symPass string
|
||||
msPassword string
|
||||
)
|
||||
|
||||
// cmd arguments
|
||||
var (
|
||||
bootstrapMode = flag.Bool("standalone", false, "boostrap node: don't initiate connection to peers, just wait for incoming connections")
|
||||
forwarderMode = flag.Bool("forwarder", false, "forwarder mode: only forward messages, neither encrypt nor decrypt messages")
|
||||
mailServerMode = flag.Bool("mailserver", false, "mail server mode: delivers expired messages on demand")
|
||||
requestMail = flag.Bool("mailclient", false, "request expired messages from the bootstrap server")
|
||||
asymmetricMode = flag.Bool("asym", false, "use asymmetric encryption")
|
||||
generateKey = flag.Bool("generatekey", false, "generate and show the private key")
|
||||
fileExMode = flag.Bool("fileexchange", false, "file exchange mode")
|
||||
fileReader = flag.Bool("filereader", false, "load and decrypt messages saved as files, display as plain text")
|
||||
testMode = flag.Bool("test", false, "use of predefined parameters for diagnostics (password, etc.)")
|
||||
echoMode = flag.Bool("echo", false, "echo mode: prints some arguments for diagnostics")
|
||||
|
||||
argVerbosity = flag.Int("verbosity", int(log.LvlError), "log verbosity level")
|
||||
argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
|
||||
argWorkTime = flag.Uint("work", 5, "work time in seconds")
|
||||
argMaxSize = flag.Uint("maxsize", uint(whisper.DefaultMaxMessageSize), "max size of message")
|
||||
argPoW = flag.Float64("pow", whisper.DefaultMinimumPoW, "PoW for normal messages in float format (e.g. 2.7)")
|
||||
argServerPoW = flag.Float64("mspow", whisper.DefaultMinimumPoW, "PoW requirement for Mail Server request")
|
||||
|
||||
argIP = flag.String("ip", "", "IP address and port of this node (e.g. 127.0.0.1:30303)")
|
||||
argPub = flag.String("pub", "", "public key for asymmetric encryption")
|
||||
argDBPath = flag.String("dbpath", "", "path to the server's DB directory")
|
||||
argIDFile = flag.String("idfile", "", "file name with node id (private key)")
|
||||
argEnode = flag.String("boot", "", "bootstrap node you want to connect to (e.g. enode://e454......08d50@52.176.211.200:16428)")
|
||||
argTopic = flag.String("topic", "", "topic in hexadecimal format (e.g. 70a4beef)")
|
||||
argSaveDir = flag.String("savedir", "", "directory where all incoming messages will be saved as files")
|
||||
)
|
||||
|
||||
func main() {
|
||||
processArgs()
|
||||
initialize()
|
||||
run()
|
||||
shutdown()
|
||||
}
|
||||
|
||||
func processArgs() {
|
||||
flag.Parse()
|
||||
|
||||
if len(*argIDFile) > 0 {
|
||||
var err error
|
||||
nodeid, err = crypto.LoadECDSA(*argIDFile)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to load file [%s]: %s.", *argIDFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
const enodePrefix = "enode://"
|
||||
if len(*argEnode) > 0 {
|
||||
if (*argEnode)[:len(enodePrefix)] != enodePrefix {
|
||||
*argEnode = enodePrefix + *argEnode
|
||||
}
|
||||
}
|
||||
|
||||
if len(*argTopic) > 0 {
|
||||
x, err := hex.DecodeString(*argTopic)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to parse the topic: %s", err)
|
||||
}
|
||||
topic = whisper.BytesToTopic(x)
|
||||
}
|
||||
|
||||
if *asymmetricMode && len(*argPub) > 0 {
|
||||
var err error
|
||||
if pub, err = crypto.UnmarshalPubkey(common.FromHex(*argPub)); err != nil {
|
||||
utils.Fatalf("invalid public key")
|
||||
}
|
||||
}
|
||||
|
||||
if len(*argSaveDir) > 0 {
|
||||
if _, err := os.Stat(*argSaveDir); os.IsNotExist(err) {
|
||||
utils.Fatalf("Download directory '%s' does not exist", *argSaveDir)
|
||||
}
|
||||
} else if *fileExMode {
|
||||
utils.Fatalf("Parameter 'savedir' is mandatory for file exchange mode")
|
||||
}
|
||||
|
||||
if *echoMode {
|
||||
echo()
|
||||
}
|
||||
}
|
||||
|
||||
func echo() {
|
||||
fmt.Printf("ttl = %d \n", *argTTL)
|
||||
fmt.Printf("workTime = %d \n", *argWorkTime)
|
||||
fmt.Printf("pow = %f \n", *argPoW)
|
||||
fmt.Printf("mspow = %f \n", *argServerPoW)
|
||||
fmt.Printf("ip = %s \n", *argIP)
|
||||
fmt.Printf("pub = %s \n", hexutil.Encode(crypto.FromECDSAPub(pub)))
|
||||
fmt.Printf("idfile = %s \n", *argIDFile)
|
||||
fmt.Printf("dbpath = %s \n", *argDBPath)
|
||||
fmt.Printf("boot = %s \n", *argEnode)
|
||||
}
|
||||
|
||||
func initialize() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
|
||||
|
||||
done = make(chan struct{})
|
||||
var peers []*enode.Node
|
||||
var err error
|
||||
|
||||
if *generateKey {
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate private key: %s", err)
|
||||
}
|
||||
k := hex.EncodeToString(crypto.FromECDSA(key))
|
||||
fmt.Printf("Random private key: %s \n", k)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if *testMode {
|
||||
symPass = "wwww" // ascii code: 0x77777777
|
||||
msPassword = "wwww"
|
||||
}
|
||||
|
||||
if *bootstrapMode {
|
||||
if len(*argIP) == 0 {
|
||||
argIP = scanLineA("Please enter your IP and port (e.g. 127.0.0.1:30348): ")
|
||||
}
|
||||
} else if *fileReader {
|
||||
*bootstrapMode = true
|
||||
} else {
|
||||
if len(*argEnode) == 0 {
|
||||
argEnode = scanLineA("Please enter the peer's enode: ")
|
||||
}
|
||||
peer := enode.MustParse(*argEnode)
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
|
||||
if *mailServerMode {
|
||||
if len(msPassword) == 0 {
|
||||
msPassword, err = prompt.Stdin.PromptPassword("Please enter the Mail Server password: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read Mail Server password: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg := &whisper.Config{
|
||||
MaxMessageSize: uint32(*argMaxSize),
|
||||
MinimumAcceptedPOW: *argPoW,
|
||||
}
|
||||
|
||||
shh = whisper.New(cfg)
|
||||
|
||||
if *argPoW != whisper.DefaultMinimumPoW {
|
||||
err := shh.SetMinimumPoW(*argPoW)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to set PoW: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if uint32(*argMaxSize) != whisper.DefaultMaxMessageSize {
|
||||
err := shh.SetMaxMessageSize(uint32(*argMaxSize))
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to set max message size: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
asymKeyID, err = shh.NewKeyPair()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate a new key pair: %s", err)
|
||||
}
|
||||
|
||||
asymKey, err = shh.GetPrivateKey(asymKeyID)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to retrieve a new key pair: %s", err)
|
||||
}
|
||||
|
||||
if nodeid == nil {
|
||||
tmpID, err := shh.NewKeyPair()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate a new key pair: %s", err)
|
||||
}
|
||||
|
||||
nodeid, err = shh.GetPrivateKey(tmpID)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to retrieve a new key pair: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
maxPeers := 80
|
||||
if *bootstrapMode {
|
||||
maxPeers = 800
|
||||
}
|
||||
|
||||
_, err = crand.Read(entropy[:])
|
||||
if err != nil {
|
||||
utils.Fatalf("crypto/rand failed: %s", err)
|
||||
}
|
||||
|
||||
if *mailServerMode {
|
||||
shh.RegisterServer(&mailServer)
|
||||
if err := mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW); err != nil {
|
||||
utils.Fatalf("Failed to init MailServer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
server = &p2p.Server{
|
||||
Config: p2p.Config{
|
||||
PrivateKey: nodeid,
|
||||
MaxPeers: maxPeers,
|
||||
Name: common.MakeName("wnode", "6.0"),
|
||||
Protocols: shh.Protocols(),
|
||||
ListenAddr: *argIP,
|
||||
NAT: nat.Any(),
|
||||
BootstrapNodes: peers,
|
||||
StaticNodes: peers,
|
||||
TrustedNodes: peers,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func startServer() error {
|
||||
err := server.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to start Whisper peer: %s.", err)
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("my public key: %s \n", hexutil.Encode(crypto.FromECDSAPub(&asymKey.PublicKey)))
|
||||
fmt.Println(server.NodeInfo().Enode)
|
||||
|
||||
if *bootstrapMode {
|
||||
configureNode()
|
||||
fmt.Println("Bootstrap Whisper node started")
|
||||
} else {
|
||||
fmt.Println("Whisper node started")
|
||||
// first see if we can establish connection, then ask for user input
|
||||
waitForConnection(true)
|
||||
configureNode()
|
||||
}
|
||||
|
||||
if *fileExMode {
|
||||
fmt.Printf("Please type the file name to be send. To quit type: '%s'\n", quitCommand)
|
||||
} else if *fileReader {
|
||||
fmt.Printf("Please type the file name to be decrypted. To quit type: '%s'\n", quitCommand)
|
||||
} else if !*forwarderMode {
|
||||
fmt.Printf("Please type the message. To quit type: '%s'\n", quitCommand)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureNode() {
|
||||
var err error
|
||||
var p2pAccept bool
|
||||
|
||||
if *forwarderMode {
|
||||
return
|
||||
}
|
||||
|
||||
if *asymmetricMode {
|
||||
if len(*argPub) == 0 {
|
||||
s := scanLine("Please enter the peer's public key: ")
|
||||
b := common.FromHex(s)
|
||||
if b == nil {
|
||||
utils.Fatalf("Error: can not convert hexadecimal string")
|
||||
}
|
||||
if pub, err = crypto.UnmarshalPubkey(b); err != nil {
|
||||
utils.Fatalf("Error: invalid peer public key")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *requestMail {
|
||||
p2pAccept = true
|
||||
if len(msPassword) == 0 {
|
||||
msPassword, err = prompt.Stdin.PromptPassword("Please enter the Mail Server password: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read Mail Server password: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !*asymmetricMode && !*forwarderMode {
|
||||
if len(symPass) == 0 {
|
||||
symPass, err = prompt.Stdin.PromptPassword("Please enter the password for symmetric encryption: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read password: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
symKeyID, err := shh.AddSymKeyFromPassword(symPass)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to create symmetric key: %s", err)
|
||||
}
|
||||
symKey, err = shh.GetSymKey(symKeyID)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to save symmetric key: %s", err)
|
||||
}
|
||||
if len(*argTopic) == 0 {
|
||||
generateTopic([]byte(symPass))
|
||||
}
|
||||
|
||||
fmt.Printf("Filter is configured for the topic: %x \n", topic)
|
||||
}
|
||||
|
||||
if *mailServerMode {
|
||||
if len(*argDBPath) == 0 {
|
||||
argDBPath = scanLineA("Please enter the path to DB file: ")
|
||||
}
|
||||
}
|
||||
|
||||
symFilter := whisper.Filter{
|
||||
KeySym: symKey,
|
||||
Topics: [][]byte{topic[:]},
|
||||
AllowP2P: p2pAccept,
|
||||
}
|
||||
symFilterID, err = shh.Subscribe(&symFilter)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to install filter: %s", err)
|
||||
}
|
||||
|
||||
asymFilter := whisper.Filter{
|
||||
KeyAsym: asymKey,
|
||||
Topics: [][]byte{topic[:]},
|
||||
AllowP2P: p2pAccept,
|
||||
}
|
||||
asymFilterID, err = shh.Subscribe(&asymFilter)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to install filter: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func generateTopic(password []byte) {
|
||||
x := pbkdf2.Key(password, password, 4096, 128, sha512.New)
|
||||
for i := 0; i < len(x); i++ {
|
||||
topic[i%whisper.TopicLength] ^= x[i]
|
||||
}
|
||||
}
|
||||
|
||||
func waitForConnection(timeout bool) {
|
||||
var cnt int
|
||||
var connected bool
|
||||
for !connected {
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
connected = server.PeerCount() > 0
|
||||
if timeout {
|
||||
cnt++
|
||||
if cnt > 1000 {
|
||||
utils.Fatalf("Timeout expired, failed to connect")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Connected to peer.")
|
||||
}
|
||||
|
||||
func run() {
|
||||
err := startServer()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer server.Stop()
|
||||
shh.Start(nil)
|
||||
defer shh.Stop()
|
||||
|
||||
if !*forwarderMode {
|
||||
go messageLoop()
|
||||
}
|
||||
|
||||
if *requestMail {
|
||||
requestExpiredMessagesLoop()
|
||||
} else if *fileExMode {
|
||||
sendFilesLoop()
|
||||
} else if *fileReader {
|
||||
fileReaderLoop()
|
||||
} else {
|
||||
sendLoop()
|
||||
}
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
close(done)
|
||||
mailServer.Close()
|
||||
}
|
||||
|
||||
func sendLoop() {
|
||||
for {
|
||||
s := scanLine("")
|
||||
if s == quitCommand {
|
||||
fmt.Println("Quit command received")
|
||||
return
|
||||
}
|
||||
sendMsg([]byte(s))
|
||||
if *asymmetricMode {
|
||||
// print your own message for convenience,
|
||||
// because in asymmetric mode it is impossible to decrypt it
|
||||
timestamp := time.Now().Unix()
|
||||
from := crypto.PubkeyToAddress(asymKey.PublicKey)
|
||||
fmt.Printf("\n%d <%x>: %s\n", timestamp, from, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendFilesLoop() {
|
||||
for {
|
||||
s := scanLine("")
|
||||
if s == quitCommand {
|
||||
fmt.Println("Quit command received")
|
||||
return
|
||||
}
|
||||
b, err := ioutil.ReadFile(s)
|
||||
if err != nil {
|
||||
fmt.Printf(">>> Error: %s \n", err)
|
||||
} else {
|
||||
h := sendMsg(b)
|
||||
if (h == common.Hash{}) {
|
||||
fmt.Printf(">>> Error: message was not sent \n")
|
||||
} else {
|
||||
timestamp := time.Now().Unix()
|
||||
from := crypto.PubkeyToAddress(asymKey.PublicKey)
|
||||
fmt.Printf("\n%d <%x>: sent message with hash %x\n", timestamp, from, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fileReaderLoop() {
|
||||
watcher1 := shh.GetFilter(symFilterID)
|
||||
watcher2 := shh.GetFilter(asymFilterID)
|
||||
if watcher1 == nil && watcher2 == nil {
|
||||
fmt.Println("Error: neither symmetric nor asymmetric filter is installed")
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
s := scanLine("")
|
||||
if s == quitCommand {
|
||||
fmt.Println("Quit command received")
|
||||
return
|
||||
}
|
||||
raw, err := ioutil.ReadFile(s)
|
||||
if err != nil {
|
||||
fmt.Printf(">>> Error: %s \n", err)
|
||||
} else {
|
||||
env := whisper.Envelope{Data: raw} // the topic is zero
|
||||
msg := env.Open(watcher1) // force-open envelope regardless of the topic
|
||||
if msg == nil {
|
||||
msg = env.Open(watcher2)
|
||||
}
|
||||
if msg == nil {
|
||||
fmt.Printf(">>> Error: failed to decrypt the message \n")
|
||||
} else {
|
||||
printMessageInfo(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func scanLine(prompt string) string {
|
||||
if len(prompt) > 0 {
|
||||
fmt.Print(prompt)
|
||||
}
|
||||
txt, err := input.ReadString('\n')
|
||||
if err != nil {
|
||||
utils.Fatalf("input error: %s", err)
|
||||
}
|
||||
txt = strings.TrimRight(txt, "\n\r")
|
||||
return txt
|
||||
}
|
||||
|
||||
func scanLineA(prompt string) *string {
|
||||
s := scanLine(prompt)
|
||||
return &s
|
||||
}
|
||||
|
||||
func scanUint(prompt string) uint32 {
|
||||
s := scanLine(prompt)
|
||||
i, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
utils.Fatalf("Fail to parse the lower time limit: %s", err)
|
||||
}
|
||||
return uint32(i)
|
||||
}
|
||||
|
||||
func sendMsg(payload []byte) common.Hash {
|
||||
params := whisper.MessageParams{
|
||||
Src: asymKey,
|
||||
Dst: pub,
|
||||
KeySym: symKey,
|
||||
Payload: payload,
|
||||
Topic: topic,
|
||||
TTL: uint32(*argTTL),
|
||||
PoW: *argPoW,
|
||||
WorkTime: uint32(*argWorkTime),
|
||||
}
|
||||
|
||||
msg, err := whisper.NewSentMessage(¶ms)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to create new message: %s", err)
|
||||
}
|
||||
|
||||
envelope, err := msg.Wrap(¶ms)
|
||||
if err != nil {
|
||||
fmt.Printf("failed to seal message: %v \n", err)
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
err = shh.Send(envelope)
|
||||
if err != nil {
|
||||
fmt.Printf("failed to send message: %v \n", err)
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
return envelope.Hash()
|
||||
}
|
||||
|
||||
func messageLoop() {
|
||||
sf := shh.GetFilter(symFilterID)
|
||||
if sf == nil {
|
||||
utils.Fatalf("symmetric filter is not installed")
|
||||
}
|
||||
|
||||
af := shh.GetFilter(asymFilterID)
|
||||
if af == nil {
|
||||
utils.Fatalf("asymmetric filter is not installed")
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Millisecond * 50)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
m1 := sf.Retrieve()
|
||||
m2 := af.Retrieve()
|
||||
messages := append(m1, m2...)
|
||||
for _, msg := range messages {
|
||||
reportedOnce := false
|
||||
if !*fileExMode && len(msg.Payload) <= 2048 {
|
||||
printMessageInfo(msg)
|
||||
reportedOnce = true
|
||||
}
|
||||
|
||||
// All messages are saved upon specifying argSaveDir.
|
||||
// fileExMode only specifies how messages are displayed on the console after they are saved.
|
||||
// if fileExMode == true, only the hashes are displayed, since messages might be too big.
|
||||
if len(*argSaveDir) > 0 {
|
||||
writeMessageToFile(*argSaveDir, msg, !reportedOnce)
|
||||
}
|
||||
}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printMessageInfo(msg *whisper.ReceivedMessage) {
|
||||
timestamp := fmt.Sprintf("%d", msg.Sent) // unix timestamp for diagnostics
|
||||
text := string(msg.Payload)
|
||||
|
||||
var address common.Address
|
||||
if msg.Src != nil {
|
||||
address = crypto.PubkeyToAddress(*msg.Src)
|
||||
}
|
||||
|
||||
if whisper.IsPubKeyEqual(msg.Src, &asymKey.PublicKey) {
|
||||
fmt.Printf("\n%s <%x>: %s\n", timestamp, address, text) // message from myself
|
||||
} else {
|
||||
fmt.Printf("\n%s [%x]: %s\n", timestamp, address, text) // message from a peer
|
||||
}
|
||||
}
|
||||
|
||||
func writeMessageToFile(dir string, msg *whisper.ReceivedMessage, show bool) {
|
||||
if len(dir) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
timestamp := fmt.Sprintf("%d", msg.Sent)
|
||||
name := fmt.Sprintf("%x", msg.EnvelopeHash)
|
||||
|
||||
var address common.Address
|
||||
if msg.Src != nil {
|
||||
address = crypto.PubkeyToAddress(*msg.Src)
|
||||
}
|
||||
|
||||
env := shh.GetEnvelope(msg.EnvelopeHash)
|
||||
if env == nil {
|
||||
fmt.Printf("\nUnexpected error: envelope not found: %x\n", msg.EnvelopeHash)
|
||||
return
|
||||
}
|
||||
|
||||
// this is a sample code; uncomment if you don't want to save your own messages.
|
||||
//if whisper.IsPubKeyEqual(msg.Src, &asymKey.PublicKey) {
|
||||
// fmt.Printf("\n%s <%x>: message from myself received, not saved: '%s'\n", timestamp, address, name)
|
||||
// return
|
||||
//}
|
||||
|
||||
fullpath := filepath.Join(dir, name)
|
||||
err := ioutil.WriteFile(fullpath, env.Data, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("\n%s {%x}: message received but not saved: %s\n", timestamp, address, err)
|
||||
} else if show {
|
||||
fmt.Printf("\n%s {%x}: message received and saved as '%s' (%d bytes)\n", timestamp, address, name, len(env.Data))
|
||||
}
|
||||
}
|
||||
|
||||
func requestExpiredMessagesLoop() {
|
||||
var key, peerID, bloom []byte
|
||||
var timeLow, timeUpp uint32
|
||||
var t string
|
||||
var xt whisper.TopicType
|
||||
|
||||
keyID, err := shh.AddSymKeyFromPassword(msPassword)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to create symmetric key for mail request: %s", err)
|
||||
}
|
||||
key, err = shh.GetSymKey(keyID)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to save symmetric key for mail request: %s", err)
|
||||
}
|
||||
peerID = extractIDFromEnode(*argEnode)
|
||||
shh.AllowP2PMessagesFromPeer(peerID)
|
||||
|
||||
for {
|
||||
timeLow = scanUint("Please enter the lower limit of the time range (unix timestamp): ")
|
||||
timeUpp = scanUint("Please enter the upper limit of the time range (unix timestamp): ")
|
||||
t = scanLine("Enter the topic (hex). Press enter to request all messages, regardless of the topic: ")
|
||||
if len(t) == whisper.TopicLength*2 {
|
||||
x, err := hex.DecodeString(t)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to parse the topic: %s \n", err)
|
||||
continue
|
||||
}
|
||||
xt = whisper.BytesToTopic(x)
|
||||
bloom = whisper.TopicToBloom(xt)
|
||||
obfuscateBloom(bloom)
|
||||
} else if len(t) == 0 {
|
||||
bloom = whisper.MakeFullNodeBloom()
|
||||
} else {
|
||||
fmt.Println("Error: topic is invalid, request aborted")
|
||||
continue
|
||||
}
|
||||
|
||||
if timeUpp == 0 {
|
||||
timeUpp = 0xFFFFFFFF
|
||||
}
|
||||
|
||||
data := make([]byte, 8, 8+whisper.BloomFilterSize)
|
||||
binary.BigEndian.PutUint32(data, timeLow)
|
||||
binary.BigEndian.PutUint32(data[4:], timeUpp)
|
||||
data = append(data, bloom...)
|
||||
|
||||
var params whisper.MessageParams
|
||||
params.PoW = *argServerPoW
|
||||
params.Payload = data
|
||||
params.KeySym = key
|
||||
params.Src = asymKey
|
||||
params.WorkTime = 5
|
||||
|
||||
msg, err := whisper.NewSentMessage(¶ms)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to create new message: %s", err)
|
||||
}
|
||||
env, err := msg.Wrap(¶ms)
|
||||
if err != nil {
|
||||
utils.Fatalf("Wrap failed: %s", err)
|
||||
}
|
||||
|
||||
err = shh.RequestHistoricMessages(peerID, env)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to send P2P message: %s", err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
}
|
||||
}
|
||||
|
||||
func extractIDFromEnode(s string) []byte {
|
||||
n, err := enode.Parse(enode.ValidSchemes, s)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to parse node: %s", err)
|
||||
}
|
||||
return n.ID().Bytes()
|
||||
}
|
||||
|
||||
// obfuscateBloom adds 16 random bits to the bloom
|
||||
// filter, in order to obfuscate the containing topics.
|
||||
// it does so deterministically within every session.
|
||||
// despite additional bits, it will match on average
|
||||
// 32000 times less messages than full node's bloom filter.
|
||||
func obfuscateBloom(bloom []byte) {
|
||||
const half = entropySize / 2
|
||||
for i := 0; i < half; i++ {
|
||||
x := int(entropy[i])
|
||||
if entropy[half+i] < 128 {
|
||||
x += 256
|
||||
}
|
||||
|
||||
bloom[x/8] = 1 << uint(x%8) // set the bit number X
|
||||
}
|
||||
}
|
@@ -67,6 +67,40 @@ func (i *HexOrDecimal256) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%#x", (*big.Int)(i))), nil
|
||||
}
|
||||
|
||||
// Decimal256 unmarshals big.Int as a decimal string. When unmarshalling,
|
||||
// it however accepts either "0x"-prefixed (hex encoded) or non-prefixed (decimal)
|
||||
type Decimal256 big.Int
|
||||
|
||||
// NewHexOrDecimal256 creates a new Decimal256
|
||||
func NewDecimal256(x int64) *Decimal256 {
|
||||
b := big.NewInt(x)
|
||||
d := Decimal256(*b)
|
||||
return &d
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (i *Decimal256) UnmarshalText(input []byte) error {
|
||||
bigint, ok := ParseBig256(string(input))
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid hex or decimal integer %q", input)
|
||||
}
|
||||
*i = Decimal256(*bigint)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (i *Decimal256) MarshalText() ([]byte, error) {
|
||||
return []byte(i.String()), nil
|
||||
}
|
||||
|
||||
// String implements Stringer.
|
||||
func (i *Decimal256) String() string {
|
||||
if i == nil {
|
||||
return "0"
|
||||
}
|
||||
return fmt.Sprintf("%#d", (*big.Int)(i))
|
||||
}
|
||||
|
||||
// ParseBig256 parses s as a 256 bit integer in decimal or hexadecimal syntax.
|
||||
// Leading zeros are accepted. The empty string parses as zero.
|
||||
func ParseBig256(s string) (*big.Int, bool) {
|
||||
|
@@ -36,14 +36,15 @@ type LazyQueue struct {
|
||||
// Items are stored in one of two internal queues ordered by estimated max
|
||||
// priority until the next and the next-after-next refresh. Update and Refresh
|
||||
// always places items in queue[1].
|
||||
queue [2]*sstack
|
||||
popQueue *sstack
|
||||
period time.Duration
|
||||
maxUntil mclock.AbsTime
|
||||
indexOffset int
|
||||
setIndex SetIndexCallback
|
||||
priority PriorityCallback
|
||||
maxPriority MaxPriorityCallback
|
||||
queue [2]*sstack
|
||||
popQueue *sstack
|
||||
period time.Duration
|
||||
maxUntil mclock.AbsTime
|
||||
indexOffset int
|
||||
setIndex SetIndexCallback
|
||||
priority PriorityCallback
|
||||
maxPriority MaxPriorityCallback
|
||||
lastRefresh1, lastRefresh2 mclock.AbsTime
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -54,14 +55,17 @@ type (
|
||||
// NewLazyQueue creates a new lazy queue
|
||||
func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPriority MaxPriorityCallback, clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue {
|
||||
q := &LazyQueue{
|
||||
popQueue: newSstack(nil),
|
||||
setIndex: setIndex,
|
||||
priority: priority,
|
||||
maxPriority: maxPriority,
|
||||
clock: clock,
|
||||
period: refreshPeriod}
|
||||
popQueue: newSstack(nil),
|
||||
setIndex: setIndex,
|
||||
priority: priority,
|
||||
maxPriority: maxPriority,
|
||||
clock: clock,
|
||||
period: refreshPeriod,
|
||||
lastRefresh1: clock.Now(),
|
||||
lastRefresh2: clock.Now(),
|
||||
}
|
||||
q.Reset()
|
||||
q.Refresh()
|
||||
q.refresh(clock.Now())
|
||||
return q
|
||||
}
|
||||
|
||||
@@ -71,9 +75,19 @@ func (q *LazyQueue) Reset() {
|
||||
q.queue[1] = newSstack(q.setIndex1)
|
||||
}
|
||||
|
||||
// Refresh should be called at least with the frequency specified by the refreshPeriod parameter
|
||||
// Refresh performs queue re-evaluation if necessary
|
||||
func (q *LazyQueue) Refresh() {
|
||||
q.maxUntil = q.clock.Now() + mclock.AbsTime(q.period)
|
||||
now := q.clock.Now()
|
||||
for time.Duration(now-q.lastRefresh2) >= q.period*2 {
|
||||
q.refresh(now)
|
||||
q.lastRefresh2 = q.lastRefresh1
|
||||
q.lastRefresh1 = now
|
||||
}
|
||||
}
|
||||
|
||||
// refresh re-evaluates items in the older queue and swaps the two queues
|
||||
func (q *LazyQueue) refresh(now mclock.AbsTime) {
|
||||
q.maxUntil = now + mclock.AbsTime(q.period)
|
||||
for q.queue[0].Len() != 0 {
|
||||
q.Push(heap.Pop(q.queue[0]).(*item).value)
|
||||
}
|
||||
@@ -139,6 +153,7 @@ func (q *LazyQueue) MultiPop(callback func(data interface{}, priority int64) boo
|
||||
}
|
||||
return
|
||||
}
|
||||
nextIndex = q.peekIndex() // re-check because callback is allowed to push items back
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -28,7 +28,7 @@ import (
|
||||
// API is a user facing RPC API to allow controlling the signer and voting
|
||||
// mechanisms of the proof-of-authority scheme.
|
||||
type API struct {
|
||||
chain consensus.ChainReader
|
||||
chain consensus.ChainHeaderReader
|
||||
clique *Clique
|
||||
}
|
||||
|
||||
|
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
@@ -137,9 +138,8 @@ var (
|
||||
errRecentlySigned = errors.New("recently signed")
|
||||
)
|
||||
|
||||
// SignerFn is a signer callback function to request a header to be signed by a
|
||||
// backing account.
|
||||
type SignerFn func(accounts.Account, string, []byte) ([]byte, error)
|
||||
// SignerFn hashes and signs the data to be signed by a backing account.
|
||||
type SignerFn func(signer accounts.Account, mimeType string, message []byte) ([]byte, error)
|
||||
|
||||
// ecrecover extracts the Ethereum account address from a signed header.
|
||||
func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) {
|
||||
@@ -213,14 +213,14 @@ func (c *Clique) Author(header *types.Header) (common.Address, error) {
|
||||
}
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules.
|
||||
func (c *Clique) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||
func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
return c.verifyHeader(chain, header, nil)
|
||||
}
|
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
|
||||
// method returns a quit channel to abort the operations and a results channel to
|
||||
// retrieve the async verifications (the order is that of the input slice).
|
||||
func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
abort := make(chan struct{})
|
||||
results := make(chan error, len(headers))
|
||||
|
||||
@@ -242,7 +242,7 @@ func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Hea
|
||||
// caller may optionally pass in a batch of parents (ascending order) to avoid
|
||||
// looking those up from the database. This is useful for concurrently verifying
|
||||
// a batch of new headers.
|
||||
func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
|
||||
func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
|
||||
if header.Number == nil {
|
||||
return errUnknownBlock
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
|
||||
// rather depend on a batch of previous headers. The caller may optionally pass
|
||||
// in a batch of parents (ascending order) to avoid looking those up from the
|
||||
// database. This is useful for concurrently verifying a batch of new headers.
|
||||
func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
|
||||
func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
|
||||
// The genesis block is the always valid dead-end
|
||||
number := header.Number.Uint64()
|
||||
if number == 0 {
|
||||
@@ -345,7 +345,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
|
||||
}
|
||||
|
||||
// snapshot retrieves the authorization snapshot at a given point in time.
|
||||
func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
|
||||
func (c *Clique) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
|
||||
// Search for a snapshot in memory or on disk for checkpoints
|
||||
var (
|
||||
headers []*types.Header
|
||||
@@ -436,7 +436,7 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e
|
||||
|
||||
// VerifySeal implements consensus.Engine, checking whether the signature contained
|
||||
// in the header satisfies the consensus protocol requirements.
|
||||
func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (c *Clique) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return c.verifySeal(chain, header, nil)
|
||||
}
|
||||
|
||||
@@ -444,7 +444,7 @@ func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) e
|
||||
// consensus protocol requirements. The method accepts an optional list of parent
|
||||
// headers that aren't yet part of the local blockchain to generate the snapshots
|
||||
// from.
|
||||
func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
|
||||
func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
|
||||
// Verifying the genesis block is not supported
|
||||
number := header.Number.Uint64()
|
||||
if number == 0 {
|
||||
@@ -487,7 +487,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
|
||||
// Prepare implements consensus.Engine, preparing all the consensus fields of the
|
||||
// header for running the transactions on top.
|
||||
func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
// If the block isn't a checkpoint, cast a random vote (good enough for now)
|
||||
header.Coinbase = common.Address{}
|
||||
header.Nonce = types.BlockNonce{}
|
||||
@@ -520,7 +520,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
c.lock.RUnlock()
|
||||
}
|
||||
// Set the correct difficulty
|
||||
header.Difficulty = CalcDifficulty(snap, c.signer)
|
||||
header.Difficulty = calcDifficulty(snap, c.signer)
|
||||
|
||||
// Ensure the extra data has all its components
|
||||
if len(header.Extra) < extraVanity {
|
||||
@@ -552,7 +552,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
|
||||
// Finalize implements consensus.Engine, ensuring no uncles are set, nor block
|
||||
// rewards given.
|
||||
func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||
func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||
// No block rewards in PoA, so the state remains as is and uncles are dropped
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
header.UncleHash = types.CalcUncleHash(nil)
|
||||
@@ -560,13 +560,13 @@ func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, sta
|
||||
|
||||
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
|
||||
// nor block rewards given, and returns the final block.
|
||||
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
// No block rewards in PoA, so the state remains as is and uncles are dropped
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
header.UncleHash = types.CalcUncleHash(nil)
|
||||
|
||||
// Assemble and return the final block for sealing
|
||||
return types.NewBlock(header, txs, nil, receipts), nil
|
||||
return types.NewBlock(header, txs, nil, receipts, new(trie.Trie)), nil
|
||||
}
|
||||
|
||||
// Authorize injects a private key into the consensus engine to mint new blocks
|
||||
@@ -581,7 +581,7 @@ func (c *Clique) Authorize(signer common.Address, signFn SignerFn) {
|
||||
|
||||
// Seal implements consensus.Engine, attempting to create a sealed block using
|
||||
// the local signing credentials.
|
||||
func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
header := block.Header()
|
||||
|
||||
// Sealing the genesis block is not supported
|
||||
@@ -652,20 +652,18 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have based on the previous blocks in the chain and the
|
||||
// current signer.
|
||||
func (c *Clique) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||
// that a new block should have:
|
||||
// * DIFF_NOTURN(2) if BLOCK_NUMBER % SIGNER_COUNT != SIGNER_INDEX
|
||||
// * DIFF_INTURN(1) if BLOCK_NUMBER % SIGNER_COUNT == SIGNER_INDEX
|
||||
func (c *Clique) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
|
||||
snap, err := c.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return CalcDifficulty(snap, c.signer)
|
||||
return calcDifficulty(snap, c.signer)
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have based on the previous blocks in the chain and the
|
||||
// current signer.
|
||||
func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
|
||||
func calcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
|
||||
if snap.inturn(snap.Number+1, signer) {
|
||||
return new(big.Int).Set(diffInTurn)
|
||||
}
|
||||
@@ -684,7 +682,7 @@ func (c *Clique) Close() error {
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC API to allow
|
||||
// controlling the signer voting.
|
||||
func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
func (c *Clique) APIs(chain consensus.ChainHeaderReader) []rpc.API {
|
||||
return []rpc.API{{
|
||||
Namespace: "clique",
|
||||
Version: "1.0",
|
||||
|
@@ -423,7 +423,7 @@ func TestClique(t *testing.T) {
|
||||
})
|
||||
// Iterate through the blocks and seal them individually
|
||||
for j, block := range blocks {
|
||||
// Geth the header and prepare it for signing
|
||||
// Get the header and prepare it for signing
|
||||
header := block.Header()
|
||||
if j > 0 {
|
||||
header.ParentHash = blocks[j-1].Hash()
|
||||
|
@@ -27,9 +27,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// ChainReader defines a small collection of methods needed to access the local
|
||||
// blockchain during header and/or uncle verification.
|
||||
type ChainReader interface {
|
||||
// ChainHeaderReader defines a small collection of methods needed to access the local
|
||||
// blockchain during header verification.
|
||||
type ChainHeaderReader interface {
|
||||
// Config retrieves the blockchain's chain configuration.
|
||||
Config() *params.ChainConfig
|
||||
|
||||
@@ -44,6 +44,12 @@ type ChainReader interface {
|
||||
|
||||
// GetHeaderByHash retrieves a block header from the database by its hash.
|
||||
GetHeaderByHash(hash common.Hash) *types.Header
|
||||
}
|
||||
|
||||
// ChainReader defines a small collection of methods needed to access the local
|
||||
// blockchain during header and/or uncle verification.
|
||||
type ChainReader interface {
|
||||
ChainHeaderReader
|
||||
|
||||
// GetBlock retrieves a block from the database by hash and number.
|
||||
GetBlock(hash common.Hash, number uint64) *types.Block
|
||||
@@ -59,13 +65,13 @@ type Engine interface {
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of a
|
||||
// given engine. Verifying the seal may be done optionally here, or explicitly
|
||||
// via the VerifySeal method.
|
||||
VerifyHeader(chain ChainReader, header *types.Header, seal bool) error
|
||||
VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error
|
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications (the order is that of
|
||||
// the input slice).
|
||||
VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
|
||||
VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
|
||||
|
||||
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||
// rules of a given engine.
|
||||
@@ -73,18 +79,18 @@ type Engine interface {
|
||||
|
||||
// VerifySeal checks whether the crypto seal on a header is valid according to
|
||||
// the consensus rules of the given engine.
|
||||
VerifySeal(chain ChainReader, header *types.Header) error
|
||||
VerifySeal(chain ChainHeaderReader, header *types.Header) error
|
||||
|
||||
// Prepare initializes the consensus fields of a block header according to the
|
||||
// rules of a particular engine. The changes are executed inline.
|
||||
Prepare(chain ChainReader, header *types.Header) error
|
||||
Prepare(chain ChainHeaderReader, header *types.Header) error
|
||||
|
||||
// Finalize runs any post-transaction state modifications (e.g. block rewards)
|
||||
// but does not assemble the block.
|
||||
//
|
||||
// Note: The block header and state database might be updated to reflect any
|
||||
// consensus rules that happen at finalization (e.g. block rewards).
|
||||
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header)
|
||||
|
||||
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
|
||||
@@ -92,7 +98,7 @@ type Engine interface {
|
||||
//
|
||||
// Note: The block header and state database might be updated to reflect any
|
||||
// consensus rules that happen at finalization (e.g. block rewards).
|
||||
FinalizeAndAssemble(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
|
||||
|
||||
// Seal generates a new sealing request for the given input block and pushes
|
||||
@@ -100,17 +106,17 @@ type Engine interface {
|
||||
//
|
||||
// Note, the method returns immediately and will send the result async. More
|
||||
// than one result may also be returned depending on the consensus algorithm.
|
||||
Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
|
||||
Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
|
||||
|
||||
// SealHash returns the hash of a block prior to it being sealed.
|
||||
SealHash(header *types.Header) common.Hash
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have.
|
||||
CalcDifficulty(chain ChainReader, time uint64, parent *types.Header) *big.Int
|
||||
CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int
|
||||
|
||||
// APIs returns the RPC APIs this consensus engine provides.
|
||||
APIs(chain ChainReader) []rpc.API
|
||||
APIs(chain ChainHeaderReader) []rpc.API
|
||||
|
||||
// Close terminates any background threads maintained by the consensus engine.
|
||||
Close() error
|
||||
|
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
@@ -86,7 +87,7 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum ethash engine.
|
||||
func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||
func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
return nil
|
||||
@@ -107,7 +108,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications.
|
||||
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.config.PowMode == ModeFullFake || len(headers) == 0 {
|
||||
abort, results := make(chan struct{}), make(chan error, len(headers))
|
||||
@@ -169,7 +170,7 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*type
|
||||
return abort, errorsOut
|
||||
}
|
||||
|
||||
func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error {
|
||||
func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error {
|
||||
var parent *types.Header
|
||||
if index == 0 {
|
||||
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
||||
@@ -243,7 +244,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum ethash engine.
|
||||
// See YP section 4.3.4. "Block Header Validity"
|
||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||
// Ensure that the header's extra-data section is of a reasonable size
|
||||
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
||||
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
||||
@@ -306,7 +307,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time
|
||||
// given the parent block's time and difficulty.
|
||||
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
|
||||
return CalcDifficulty(chain.Config(), time, parent)
|
||||
}
|
||||
|
||||
@@ -486,14 +487,14 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
||||
|
||||
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
|
||||
// the PoW difficulty requirements.
|
||||
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (ethash *Ethash) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return ethash.verifySeal(chain, header, false)
|
||||
}
|
||||
|
||||
// verifySeal checks whether a block satisfies the PoW difficulty requirements,
|
||||
// either using the usual ethash cache for it, or alternatively using a full DAG
|
||||
// to make remote mining fast.
|
||||
func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error {
|
||||
func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, fulldag bool) error {
|
||||
// If we're running a fake PoW, accept any seal as valid
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
time.Sleep(ethash.fakeDelay)
|
||||
@@ -558,7 +559,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Head
|
||||
|
||||
// Prepare implements consensus.Engine, initializing the difficulty field of a
|
||||
// header to conform to the ethash protocol. The changes are done inline.
|
||||
func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
@@ -569,7 +570,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||
|
||||
// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
|
||||
// setting the final state on the header
|
||||
func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||
// Accumulate any block and uncle rewards and commit the final state root
|
||||
accumulateRewards(chain.Config(), state, header, uncles)
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
@@ -577,13 +578,13 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header
|
||||
|
||||
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
|
||||
// uncle rewards, setting the final state and assembling the block.
|
||||
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
// Accumulate any block and uncle rewards and commit the final state root
|
||||
accumulateRewards(chain.Config(), state, header, uncles)
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
|
||||
// Header seems complete, assemble into a block and return
|
||||
return types.NewBlock(header, txs, uncles, receipts), nil
|
||||
return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil
|
||||
}
|
||||
|
||||
// SealHash returns the hash of a block prior to it being sealed.
|
||||
|
@@ -656,7 +656,7 @@ func (ethash *Ethash) Hashrate() float64 {
|
||||
}
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC APIs.
|
||||
func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API {
|
||||
// In order to ensure backward compatibility, we exposes ethash RPC APIs
|
||||
// to both eth and ethash namespaces.
|
||||
return []rpc.API{
|
||||
|
@@ -48,7 +48,7 @@ var (
|
||||
|
||||
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
|
||||
// the block's difficulty requirements.
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
// If we're running a fake PoW, simply return a 0 nonce immediately
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
header := block.Header()
|
||||
|
@@ -306,9 +306,9 @@ func (b *bridge) Sign(call jsre.Call) (goja.Value, error) {
|
||||
}
|
||||
|
||||
// Send the request to the backend and return
|
||||
sign, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount"))
|
||||
sign, callable := goja.AssertFunction(getJeth(call.VM).Get("sign"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.unlockAccount is not callable")
|
||||
return nil, fmt.Errorf("jeth.sign is not callable")
|
||||
}
|
||||
return sign(goja.Null(), message, account, passwd)
|
||||
}
|
||||
@@ -353,14 +353,14 @@ func (b *bridge) SleepBlocks(call jsre.Call) (goja.Value, error) {
|
||||
}
|
||||
|
||||
// Poll the current block number until either it or a timeout is reached.
|
||||
var (
|
||||
deadline = time.Now().Add(time.Duration(sleep) * time.Second)
|
||||
lastNumber = ^hexutil.Uint64(0)
|
||||
)
|
||||
deadline := time.Now().Add(time.Duration(sleep) * time.Second)
|
||||
var lastNumber hexutil.Uint64
|
||||
if err := b.client.Call(&lastNumber, "eth_blockNumber"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for time.Now().Before(deadline) {
|
||||
var number hexutil.Uint64
|
||||
err := b.client.Call(&number, "eth_blockNumber")
|
||||
if err != nil {
|
||||
if err := b.client.Call(&number, "eth_blockNumber"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if number != lastNumber {
|
||||
|
@@ -109,7 +109,8 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester {
|
||||
if confOverride != nil {
|
||||
confOverride(ethConf)
|
||||
}
|
||||
if err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return eth.New(ctx, ethConf) }); err != nil {
|
||||
ethBackend, err := eth.New(stack, ethConf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to register Ethereum protocol: %v", err)
|
||||
}
|
||||
// Start the node and assemble the JavaScript console around it
|
||||
@@ -135,13 +136,10 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester {
|
||||
t.Fatalf("failed to create JavaScript console: %v", err)
|
||||
}
|
||||
// Create the final tester and return
|
||||
var ethereum *eth.Ethereum
|
||||
stack.Service(ðereum)
|
||||
|
||||
return &tester{
|
||||
workspace: workspace,
|
||||
stack: stack,
|
||||
ethereum: ethereum,
|
||||
ethereum: ethBackend,
|
||||
console: console,
|
||||
input: prompter,
|
||||
output: printer,
|
||||
|
@@ -27,10 +27,17 @@ var (
|
||||
)
|
||||
|
||||
// CheckpointOracleABI is the input ABI used to generate the binding from.
|
||||
const CheckpointOracleABI = "[{\"constant\":true,\"inputs\":[],\"name\":\"GetAllAdmin\",\"outputs\":[{\"name\":\"\",\"type\":\"address[]\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"GetLatestCheckpoint\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"},{\"name\":\"\",\"type\":\"bytes32\"},{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_recentNumber\",\"type\":\"uint256\"},{\"name\":\"_recentHash\",\"type\":\"bytes32\"},{\"name\":\"_hash\",\"type\":\"bytes32\"},{\"name\":\"_sectionIndex\",\"type\":\"uint64\"},{\"name\":\"v\",\"type\":\"uint8[]\"},{\"name\":\"r\",\"type\":\"bytes32[]\"},{\"name\":\"s\",\"type\":\"bytes32[]\"}],\"name\":\"SetCheckpoint\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"name\":\"_adminlist\",\"type\":\"address[]\"},{\"name\":\"_sectionSize\",\"type\":\"uint256\"},{\"name\":\"_processConfirms\",\"type\":\"uint256\"},{\"name\":\"_threshold\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"index\",\"type\":\"uint64\"},{\"indexed\":false,\"name\":\"checkpointHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"v\",\"type\":\"uint8\"},{\"indexed\":false,\"name\":\"r\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"s\",\"type\":\"bytes32\"}],\"name\":\"NewCheckpointVote\",\"type\":\"event\"}]"
|
||||
const CheckpointOracleABI = "[{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_adminlist\",\"type\":\"address[]\"},{\"internalType\":\"uint256\",\"name\":\"_sectionSize\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_processConfirms\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_threshold\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"checkpointHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"v\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"r\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"s\",\"type\":\"bytes32\"}],\"name\":\"NewCheckpointVote\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"GetAllAdmin\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"GetLatestCheckpoint\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_recentNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"_recentHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_hash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"_sectionIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint8[]\",\"name\":\"v\",\"type\":\"uint8[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"r\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"s\",\"type\":\"bytes32[]\"}],\"name\":\"SetCheckpoint\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
||||
|
||||
// CheckpointOracleFuncSigs maps the 4-byte function signature to its string representation.
|
||||
var CheckpointOracleFuncSigs = map[string]string{
|
||||
"45848dfc": "GetAllAdmin()",
|
||||
"4d6a304c": "GetLatestCheckpoint()",
|
||||
"d459fc46": "SetCheckpoint(uint256,bytes32,bytes32,uint64,uint8[],bytes32[],bytes32[])",
|
||||
}
|
||||
|
||||
// CheckpointOracleBin is the compiled bytecode used for deploying new contracts.
|
||||
const CheckpointOracleBin = `0x608060405234801561001057600080fd5b506040516108153803806108158339818101604052608081101561003357600080fd5b81019080805164010000000081111561004b57600080fd5b8201602081018481111561005e57600080fd5b815185602082028301116401000000008211171561007b57600080fd5b505060208201516040830151606090930151919450925060005b84518110156101415760016000808784815181106100af57fe5b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002060006101000a81548160ff02191690831515021790555060018582815181106100fc57fe5b60209081029190910181015182546001808201855560009485529290932090920180546001600160a01b0319166001600160a01b039093169290921790915501610095565b50600592909255600655600755506106b78061015e6000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806345848dfc146100465780634d6a304c1461009e578063d459fc46146100cf575b600080fd5b61004e6102b0565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561008a578181015183820152602001610072565b505050509050019250505060405180910390f35b6100a661034f565b6040805167ffffffffffffffff9094168452602084019290925282820152519081900360600190f35b61029c600480360360e08110156100e557600080fd5b81359160208101359160408201359167ffffffffffffffff6060820135169181019060a08101608082013564010000000081111561012257600080fd5b82018360208201111561013457600080fd5b8035906020019184602083028401116401000000008311171561015657600080fd5b91908080602002602001604051908101604052809392919081815260200183836020028082843760009201919091525092959493602081019350359150506401000000008111156101a657600080fd5b8201836020820111156101b857600080fd5b803590602001918460208302840111640100000000831117156101da57600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929594936020810193503591505064010000000081111561022a57600080fd5b82018360208201111561023c57600080fd5b8035906020019184602083028401116401000000008311171561025e57600080fd5b91908080602002602001604051908101604052809392919081815260200183836020028082843760009201919091525092955061036a945050505050565b604080519115158252519081900360200190f35b6060806001805490506040519080825280602002602001820160405280156102e2578160200160208202803883390190505b50905060005b60015481101561034957600181815481106102ff57fe5b9060005260206000200160009054906101000a90046001600160a01b031682828151811061032957fe5b6001600160a01b03909216602092830291909101909101526001016102e8565b50905090565b60025460045460035467ffffffffffffffff90921691909192565b3360009081526020819052604081205460ff1661038657600080fd5b8688401461039357600080fd5b82518451146103a157600080fd5b81518451146103af57600080fd5b6006546005548660010167ffffffffffffffff1602014310156103d457506000610677565b60025467ffffffffffffffff90811690861610156103f457506000610677565b60025467ffffffffffffffff8681169116148015610426575067ffffffffffffffff8516151580610426575060035415155b1561043357506000610677565b8561044057506000610677565b60408051601960f81b6020808301919091526000602183018190523060601b60228401526001600160c01b031960c08a901b166036840152603e8084018b905284518085039091018152605e909301909352815191012090805b86518110156106715760006001848984815181106104b457fe5b60200260200101518985815181106104c857fe5b60200260200101518986815181106104dc57fe5b602002602001015160405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561053b573d6000803e3d6000fd5b505060408051601f1901516001600160a01b03811660009081526020819052919091205490925060ff16905061057057600080fd5b826001600160a01b0316816001600160a01b03161161058e57600080fd5b8092508867ffffffffffffffff167fce51ffa16246bcaf0899f6504f473cd0114f430f566cef71ab7e03d3dde42a418b8a85815181106105ca57fe5b60200260200101518a86815181106105de57fe5b60200260200101518a87815181106105f257fe5b6020026020010151604051808581526020018460ff1660ff16815260200183815260200182815260200194505050505060405180910390a260075482600101106106685750505060048790555050436003556002805467ffffffffffffffff191667ffffffffffffffff86161790556001610677565b5060010161049a565b50600080fd5b97965050505050505056fea265627a7a723058207f6a191ce575596a2f1e907c8c0a01003d16b69fb2c4f432d10878e8c0a99a0264736f6c634300050a0032`
|
||||
var CheckpointOracleBin = "0x608060405234801561001057600080fd5b506040516108703803806108708339818101604052608081101561003357600080fd5b810190808051604051939291908464010000000082111561005357600080fd5b90830190602082018581111561006857600080fd5b825186602082028301116401000000008211171561008557600080fd5b82525081516020918201928201910280838360005b838110156100b257818101518382015260200161009a565b50505050919091016040908152602083015190830151606090930151909450919250600090505b84518110156101855760016000808784815181106100f357fe5b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002060006101000a81548160ff021916908315150217905550600185828151811061014057fe5b60209081029190910181015182546001808201855560009485529290932090920180546001600160a01b0319166001600160a01b0390931692909217909155016100d9565b50600592909255600655600755506106ce806101a26000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806345848dfc146100465780634d6a304c1461009e578063d459fc46146100cf575b600080fd5b61004e6102b0565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561008a578181015183820152602001610072565b505050509050019250505060405180910390f35b6100a6610365565b6040805167ffffffffffffffff9094168452602084019290925282820152519081900360600190f35b61029c600480360360e08110156100e557600080fd5b81359160208101359160408201359167ffffffffffffffff6060820135169181019060a08101608082013564010000000081111561012257600080fd5b82018360208201111561013457600080fd5b8035906020019184602083028401116401000000008311171561015657600080fd5b91908080602002602001604051908101604052809392919081815260200183836020028082843760009201919091525092959493602081019350359150506401000000008111156101a657600080fd5b8201836020820111156101b857600080fd5b803590602001918460208302840111640100000000831117156101da57600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929594936020810193503591505064010000000081111561022a57600080fd5b82018360208201111561023c57600080fd5b8035906020019184602083028401116401000000008311171561025e57600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929550610380945050505050565b604080519115158252519081900360200190f35b600154606090819067ffffffffffffffff811180156102ce57600080fd5b506040519080825280602002602001820160405280156102f8578160200160208202803683370190505b50905060005b60015481101561035f576001818154811061031557fe5b9060005260206000200160009054906101000a90046001600160a01b031682828151811061033f57fe5b6001600160a01b03909216602092830291909101909101526001016102fe565b50905090565b60025460045460035467ffffffffffffffff90921691909192565b3360009081526020819052604081205460ff1661039c57600080fd5b868840146103a957600080fd5b82518451146103b757600080fd5b81518451146103c557600080fd5b6006546005548660010167ffffffffffffffff1602014310156103ea5750600061068d565b60025467ffffffffffffffff908116908616101561040a5750600061068d565b60025467ffffffffffffffff868116911614801561043c575067ffffffffffffffff851615158061043c575060035415155b156104495750600061068d565b856104565750600061068d565b60408051601960f81b6020808301919091526000602183018190523060601b60228401526001600160c01b031960c08a901b166036840152603e8084018b905284518085039091018152605e909301909352815191012090805b86518110156106875760006001848984815181106104ca57fe5b60200260200101518985815181106104de57fe5b60200260200101518986815181106104f257fe5b602002602001015160405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa158015610551573d6000803e3d6000fd5b505060408051601f1901516001600160a01b03811660009081526020819052919091205490925060ff16905061058657600080fd5b826001600160a01b0316816001600160a01b0316116105a457600080fd5b8092508867ffffffffffffffff167fce51ffa16246bcaf0899f6504f473cd0114f430f566cef71ab7e03d3dde42a418b8a85815181106105e057fe5b60200260200101518a86815181106105f457fe5b60200260200101518a878151811061060857fe5b6020026020010151604051808581526020018460ff1660ff16815260200183815260200182815260200194505050505060405180910390a2600754826001011061067e5750505060048790555050436003556002805467ffffffffffffffff191667ffffffffffffffff8616179055600161068d565b506001016104b0565b50600080fd5b97965050505050505056fea26469706673582212202ddf9eda76bf59c0fc65584c0b22d84ecef2c703765de60439596d6ac34c2b7264736f6c634300060b0033"
|
||||
|
||||
// DeployCheckpointOracle deploys a new Ethereum contract, binding an instance of CheckpointOracle to it.
|
||||
func DeployCheckpointOracle(auth *bind.TransactOpts, backend bind.ContractBackend, _adminlist []common.Address, _sectionSize *big.Int, _processConfirms *big.Int, _threshold *big.Int) (common.Address, *types.Transaction, *CheckpointOracle, error) {
|
||||
@@ -38,6 +45,7 @@ func DeployCheckpointOracle(auth *bind.TransactOpts, backend bind.ContractBacken
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
}
|
||||
|
||||
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(CheckpointOracleBin), backend, _adminlist, _sectionSize, _processConfirms, _threshold)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
@@ -153,7 +161,7 @@ func bindCheckpointOracle(address common.Address, caller bind.ContractCaller, tr
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_CheckpointOracle *CheckpointOracleRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
func (_CheckpointOracle *CheckpointOracleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
|
||||
return _CheckpointOracle.Contract.CheckpointOracleCaller.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
@@ -172,7 +180,7 @@ func (_CheckpointOracle *CheckpointOracleRaw) Transact(opts *bind.TransactOpts,
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_CheckpointOracle *CheckpointOracleCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
func (_CheckpointOracle *CheckpointOracleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
|
||||
return _CheckpointOracle.Contract.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
@@ -189,58 +197,64 @@ func (_CheckpointOracle *CheckpointOracleTransactorRaw) Transact(opts *bind.Tran
|
||||
|
||||
// GetAllAdmin is a free data retrieval call binding the contract method 0x45848dfc.
|
||||
//
|
||||
// Solidity: function GetAllAdmin() constant returns(address[])
|
||||
// Solidity: function GetAllAdmin() view returns(address[])
|
||||
func (_CheckpointOracle *CheckpointOracleCaller) GetAllAdmin(opts *bind.CallOpts) ([]common.Address, error) {
|
||||
var (
|
||||
ret0 = new([]common.Address)
|
||||
)
|
||||
out := ret0
|
||||
err := _CheckpointOracle.contract.Call(opts, out, "GetAllAdmin")
|
||||
return *ret0, err
|
||||
var out []interface{}
|
||||
err := _CheckpointOracle.contract.Call(opts, &out, "GetAllAdmin")
|
||||
|
||||
if err != nil {
|
||||
return *new([]common.Address), err
|
||||
}
|
||||
|
||||
out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address)
|
||||
|
||||
return out0, err
|
||||
|
||||
}
|
||||
|
||||
// GetAllAdmin is a free data retrieval call binding the contract method 0x45848dfc.
|
||||
//
|
||||
// Solidity: function GetAllAdmin() constant returns(address[])
|
||||
// Solidity: function GetAllAdmin() view returns(address[])
|
||||
func (_CheckpointOracle *CheckpointOracleSession) GetAllAdmin() ([]common.Address, error) {
|
||||
return _CheckpointOracle.Contract.GetAllAdmin(&_CheckpointOracle.CallOpts)
|
||||
}
|
||||
|
||||
// GetAllAdmin is a free data retrieval call binding the contract method 0x45848dfc.
|
||||
//
|
||||
// Solidity: function GetAllAdmin() constant returns(address[])
|
||||
// Solidity: function GetAllAdmin() view returns(address[])
|
||||
func (_CheckpointOracle *CheckpointOracleCallerSession) GetAllAdmin() ([]common.Address, error) {
|
||||
return _CheckpointOracle.Contract.GetAllAdmin(&_CheckpointOracle.CallOpts)
|
||||
}
|
||||
|
||||
// GetLatestCheckpoint is a free data retrieval call binding the contract method 0x4d6a304c.
|
||||
//
|
||||
// Solidity: function GetLatestCheckpoint() constant returns(uint64, bytes32, uint256)
|
||||
// Solidity: function GetLatestCheckpoint() view returns(uint64, bytes32, uint256)
|
||||
func (_CheckpointOracle *CheckpointOracleCaller) GetLatestCheckpoint(opts *bind.CallOpts) (uint64, [32]byte, *big.Int, error) {
|
||||
var (
|
||||
ret0 = new(uint64)
|
||||
ret1 = new([32]byte)
|
||||
ret2 = new(*big.Int)
|
||||
)
|
||||
out := &[]interface{}{
|
||||
ret0,
|
||||
ret1,
|
||||
ret2,
|
||||
var out []interface{}
|
||||
err := _CheckpointOracle.contract.Call(opts, &out, "GetLatestCheckpoint")
|
||||
|
||||
if err != nil {
|
||||
return *new(uint64), *new([32]byte), *new(*big.Int), err
|
||||
}
|
||||
err := _CheckpointOracle.contract.Call(opts, out, "GetLatestCheckpoint")
|
||||
return *ret0, *ret1, *ret2, err
|
||||
|
||||
out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64)
|
||||
out1 := *abi.ConvertType(out[1], new([32]byte)).(*[32]byte)
|
||||
out2 := *abi.ConvertType(out[2], new(*big.Int)).(**big.Int)
|
||||
|
||||
return out0, out1, out2, err
|
||||
|
||||
}
|
||||
|
||||
// GetLatestCheckpoint is a free data retrieval call binding the contract method 0x4d6a304c.
|
||||
//
|
||||
// Solidity: function GetLatestCheckpoint() constant returns(uint64, bytes32, uint256)
|
||||
// Solidity: function GetLatestCheckpoint() view returns(uint64, bytes32, uint256)
|
||||
func (_CheckpointOracle *CheckpointOracleSession) GetLatestCheckpoint() (uint64, [32]byte, *big.Int, error) {
|
||||
return _CheckpointOracle.Contract.GetLatestCheckpoint(&_CheckpointOracle.CallOpts)
|
||||
}
|
||||
|
||||
// GetLatestCheckpoint is a free data retrieval call binding the contract method 0x4d6a304c.
|
||||
//
|
||||
// Solidity: function GetLatestCheckpoint() constant returns(uint64, bytes32, uint256)
|
||||
// Solidity: function GetLatestCheckpoint() view returns(uint64, bytes32, uint256)
|
||||
func (_CheckpointOracle *CheckpointOracleCallerSession) GetLatestCheckpoint() (uint64, [32]byte, *big.Int, error) {
|
||||
return _CheckpointOracle.Contract.GetLatestCheckpoint(&_CheckpointOracle.CallOpts)
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
pragma solidity ^0.5.10;
|
||||
pragma solidity ^0.6.0;
|
||||
|
||||
/**
|
||||
* @title CheckpointOracle
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
// BlockValidator is responsible for validating block headers, uncles and
|
||||
@@ -61,7 +62,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
|
||||
return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
|
||||
}
|
||||
if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
|
||||
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
|
||||
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
|
||||
}
|
||||
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
|
||||
@@ -88,8 +89,8 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
|
||||
if rbloom != header.Bloom {
|
||||
return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
|
||||
}
|
||||
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]]))
|
||||
receiptSha := types.DeriveSha(receipts)
|
||||
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, Rn]]))
|
||||
receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil))
|
||||
if receiptSha != header.ReceiptHash {
|
||||
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
|
||||
}
|
||||
|
@@ -70,8 +70,11 @@ var (
|
||||
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
|
||||
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
|
||||
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
|
||||
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
|
||||
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
|
||||
|
||||
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
|
||||
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
|
||||
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
|
||||
blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil)
|
||||
|
||||
blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
|
||||
blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
|
||||
@@ -109,13 +112,18 @@ const (
|
||||
// - Version 7
|
||||
// The following incompatible database changes were added:
|
||||
// * Use freezer as the ancient database to maintain all ancient data
|
||||
BlockChainVersion uint64 = 7
|
||||
// - Version 8
|
||||
// The following incompatible database changes were added:
|
||||
// * New scheme for contract code in order to separate the codes and trie nodes
|
||||
BlockChainVersion uint64 = 8
|
||||
)
|
||||
|
||||
// CacheConfig contains the configuration values for the trie caching/pruning
|
||||
// that's resident in a blockchain.
|
||||
type CacheConfig struct {
|
||||
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
|
||||
TrieCleanJournal string // Disk journal for saving clean cache entries.
|
||||
TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically
|
||||
TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
|
||||
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
|
||||
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
|
||||
@@ -125,6 +133,16 @@ type CacheConfig struct {
|
||||
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
|
||||
}
|
||||
|
||||
// defaultCacheConfig are the default caching values if none are specified by the
|
||||
// user (also used during testing).
|
||||
var defaultCacheConfig = &CacheConfig{
|
||||
TrieCleanLimit: 256,
|
||||
TrieDirtyLimit: 256,
|
||||
TrieTimeLimit: 5 * time.Minute,
|
||||
SnapshotLimit: 256,
|
||||
SnapshotWait: true,
|
||||
}
|
||||
|
||||
// BlockChain represents the canonical chain given a database with a genesis
|
||||
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
|
||||
//
|
||||
@@ -199,13 +217,7 @@ type BlockChain struct {
|
||||
// Processor.
|
||||
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
|
||||
if cacheConfig == nil {
|
||||
cacheConfig = &CacheConfig{
|
||||
TrieCleanLimit: 256,
|
||||
TrieDirtyLimit: 256,
|
||||
TrieTimeLimit: 5 * time.Minute,
|
||||
SnapshotLimit: 256,
|
||||
SnapshotWait: true,
|
||||
}
|
||||
cacheConfig = defaultCacheConfig
|
||||
}
|
||||
bodyCache, _ := lru.New(bodyCacheLimit)
|
||||
bodyRLPCache, _ := lru.New(bodyCacheLimit)
|
||||
@@ -220,7 +232,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
cacheConfig: cacheConfig,
|
||||
db: db,
|
||||
triegc: prque.New(nil),
|
||||
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
|
||||
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit, cacheConfig.TrieCleanJournal),
|
||||
quit: make(chan struct{}),
|
||||
shouldPreserve: shouldPreserve,
|
||||
bodyCache: bodyCache,
|
||||
@@ -263,15 +275,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
txIndexBlock = frozen
|
||||
}
|
||||
}
|
||||
|
||||
if err := bc.loadLastState(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The first thing the node will do is reconstruct the verification data for
|
||||
// the head block (ethash cache or clique voting snapshot). Might as well do
|
||||
// it in advance.
|
||||
bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
|
||||
|
||||
// Make sure the state associated with the block is available
|
||||
head := bc.CurrentBlock()
|
||||
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
|
||||
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
|
||||
if err := bc.SetHead(head.NumberU64()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Ensure that a previous crash in SetHead doesn't leave extra ancients
|
||||
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
|
||||
var (
|
||||
needRewind bool
|
||||
@@ -281,7 +296,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
// blockchain repair. If the head full block is even lower than the ancient
|
||||
// chain, truncate the ancient store.
|
||||
fullBlock := bc.CurrentBlock()
|
||||
if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
|
||||
if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 {
|
||||
needRewind = true
|
||||
low = fullBlock.NumberU64()
|
||||
}
|
||||
@@ -296,15 +311,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
}
|
||||
}
|
||||
if needRewind {
|
||||
var hashes []common.Hash
|
||||
previous := bc.CurrentHeader().Number.Uint64()
|
||||
for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
|
||||
hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
|
||||
log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
|
||||
if err := bc.SetHead(low); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bc.Rollback(hashes)
|
||||
log.Warn("Truncate ancient chain", "from", previous, "to", low)
|
||||
}
|
||||
}
|
||||
// The first thing the node will do is reconstruct the verification data for
|
||||
// the head block (ethash cache or clique voting snapshot). Might as well do
|
||||
// it in advance.
|
||||
bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
|
||||
|
||||
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
||||
for hash := range BadHashes {
|
||||
if header := bc.GetHeaderByHash(hash); header != nil {
|
||||
@@ -313,7 +330,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
// make sure the headerByNumber (if present) is in our current canonical chain
|
||||
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
|
||||
log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
|
||||
bc.SetHead(header.Number.Uint64() - 1)
|
||||
if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Error("Chain rewind was successful, resuming normal operation")
|
||||
}
|
||||
}
|
||||
@@ -328,6 +347,19 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
bc.txLookupLimit = *txLookupLimit
|
||||
go bc.maintainTxIndex(txIndexBlock)
|
||||
}
|
||||
// If periodic cache journal is required, spin it up.
|
||||
if bc.cacheConfig.TrieCleanRejournal > 0 {
|
||||
if bc.cacheConfig.TrieCleanRejournal < time.Minute {
|
||||
log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
|
||||
bc.cacheConfig.TrieCleanRejournal = time.Minute
|
||||
}
|
||||
triedb := bc.stateCache.TrieDB()
|
||||
bc.wg.Add(1)
|
||||
go func() {
|
||||
defer bc.wg.Done()
|
||||
triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
|
||||
}()
|
||||
}
|
||||
return bc, nil
|
||||
}
|
||||
|
||||
@@ -367,15 +399,6 @@ func (bc *BlockChain) loadLastState() error {
|
||||
log.Warn("Head block missing, resetting chain", "hash", head)
|
||||
return bc.Reset()
|
||||
}
|
||||
// Make sure the state associated with the block is available
|
||||
if _, err := state.New(currentBlock.Root(), bc.stateCache, bc.snaps); err != nil {
|
||||
// Dangling block without a state associated, init from scratch
|
||||
log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
|
||||
if err := bc.repair(¤tBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
|
||||
}
|
||||
// Everything seems to be fine, set as the head block
|
||||
bc.currentBlock.Store(currentBlock)
|
||||
headBlockGauge.Update(int64(currentBlock.NumberU64()))
|
||||
@@ -409,30 +432,48 @@ func (bc *BlockChain) loadLastState() error {
|
||||
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
|
||||
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
|
||||
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
|
||||
|
||||
if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
|
||||
log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetHead rewinds the local chain to a new head. In the case of headers, everything
|
||||
// above the new head will be deleted and the new one set. In the case of blocks
|
||||
// though, the head may be further rewound if block bodies are missing (non-archive
|
||||
// nodes after a fast sync).
|
||||
// SetHead rewinds the local chain to a new head. Depending on whether the node
|
||||
// was fast synced or full synced and in which state, the method will try to
|
||||
// delete minimal data from disk whilst retaining chain consistency.
|
||||
func (bc *BlockChain) SetHead(head uint64) error {
|
||||
log.Warn("Rewinding blockchain", "target", head)
|
||||
|
||||
bc.chainmu.Lock()
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) {
|
||||
// Rewind the block chain, ensuring we don't end up with a stateless head block
|
||||
if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
|
||||
// Retrieve the last pivot block to short circuit rollbacks beyond it and the
|
||||
// current freezer limit to start nuking id underflown
|
||||
pivot := rawdb.ReadLastPivotNumber(bc.db)
|
||||
frozen, _ := bc.db.Ancients()
|
||||
|
||||
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
|
||||
// Rewind the block chain, ensuring we don't end up with a stateless head
|
||||
// block. Note, depth equality is permitted to allow using SetHead as a
|
||||
// chain reparation mechanism without deleting any data!
|
||||
if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
|
||||
newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
|
||||
if newHeadBlock == nil {
|
||||
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
|
||||
newHeadBlock = bc.genesisBlock
|
||||
} else {
|
||||
if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
|
||||
// Rewound state missing, rolled back to before pivot, reset to genesis
|
||||
newHeadBlock = bc.genesisBlock
|
||||
// Block exists, keep rewinding until we find one with state
|
||||
for {
|
||||
if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
|
||||
log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
|
||||
if pivot == nil || newHeadBlock.NumberU64() > *pivot {
|
||||
newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
|
||||
continue
|
||||
} else {
|
||||
log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
|
||||
newHeadBlock = bc.genesisBlock
|
||||
}
|
||||
}
|
||||
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
|
||||
break
|
||||
}
|
||||
}
|
||||
rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
|
||||
@@ -444,7 +485,6 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
bc.currentBlock.Store(newHeadBlock)
|
||||
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
|
||||
}
|
||||
|
||||
// Rewind the fast block in a simpleton way to the target head
|
||||
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
|
||||
newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
|
||||
@@ -461,8 +501,17 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
bc.currentFastBlock.Store(newHeadFastBlock)
|
||||
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
|
||||
}
|
||||
}
|
||||
head := bc.CurrentBlock().NumberU64()
|
||||
|
||||
// If setHead underflown the freezer threshold and the block processing
|
||||
// intent afterwards is full block importing, delete the chain segment
|
||||
// between the stateful-block and the sethead target.
|
||||
var wipe bool
|
||||
if head+1 < frozen {
|
||||
wipe = pivot == nil || head >= *pivot
|
||||
}
|
||||
return head, wipe // Only force wipe if full synced
|
||||
}
|
||||
// Rewind the header chain, deleting all block bodies until then
|
||||
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
|
||||
// Ignore the error here since light client won't hit this path
|
||||
@@ -470,10 +519,9 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
if num+1 <= frozen {
|
||||
// Truncate all relative data(header, total difficulty, body, receipt
|
||||
// and canonical hash) from ancient store.
|
||||
if err := bc.db.TruncateAncients(num + 1); err != nil {
|
||||
if err := bc.db.TruncateAncients(num); err != nil {
|
||||
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
|
||||
}
|
||||
|
||||
// Remove the hash <-> number mapping from the active store.
|
||||
rawdb.DeleteHeaderNumber(db, hash)
|
||||
} else {
|
||||
@@ -485,8 +533,18 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
}
|
||||
// Todo(rjl493456442) txlookup, bloombits, etc
|
||||
}
|
||||
bc.hc.SetHead(head, updateFn, delFn)
|
||||
|
||||
// If SetHead was only called as a chain reparation method, try to skip
|
||||
// touching the header chain altogether, unless the freezer is broken
|
||||
if block := bc.CurrentBlock(); block.NumberU64() == head {
|
||||
if target, force := updateFn(bc.db, block.Header()); force {
|
||||
bc.hc.SetHead(target, updateFn, delFn)
|
||||
}
|
||||
} else {
|
||||
// Rewind the chain to the requested head and keep going backwards until a
|
||||
// block with a state is found or fast sync pivot is passed
|
||||
log.Warn("Rewinding blockchain", "target", head)
|
||||
bc.hc.SetHead(head, updateFn, delFn)
|
||||
}
|
||||
// Clear out any stale content from the caches
|
||||
bc.bodyCache.Purge()
|
||||
bc.bodyRLPCache.Purge()
|
||||
@@ -609,28 +667,6 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// repair tries to repair the current blockchain by rolling back the current block
|
||||
// until one with associated state is found. This is needed to fix incomplete db
|
||||
// writes caused either by crashes/power outages, or simply non-committed tries.
|
||||
//
|
||||
// This method only rolls back the current block. The current header and current
|
||||
// fast block are left intact.
|
||||
func (bc *BlockChain) repair(head **types.Block) error {
|
||||
for {
|
||||
// Abort if we've rewound to a head block that does have associated state
|
||||
if _, err := state.New((*head).Root(), bc.stateCache, bc.snaps); err == nil {
|
||||
log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
|
||||
return nil
|
||||
}
|
||||
// Otherwise rewind one block and recheck state availability there
|
||||
block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
|
||||
if block == nil {
|
||||
return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
|
||||
}
|
||||
*head = block
|
||||
}
|
||||
}
|
||||
|
||||
// Export writes the active chain to the given writer.
|
||||
func (bc *BlockChain) Export(w io.Writer) error {
|
||||
return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
|
||||
@@ -676,7 +712,7 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) {
|
||||
// Add the block to the canonical chain number scheme and mark as the head
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||
rawdb.WriteHeadBlockHash(batch, block.Hash())
|
||||
|
||||
// If the block is better than our head or is on a different chain, force update heads
|
||||
@@ -862,12 +898,30 @@ func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.
|
||||
return uncles
|
||||
}
|
||||
|
||||
// TrieNode retrieves a blob of data associated with a trie node (or code hash)
|
||||
// TrieNode retrieves a blob of data associated with a trie node
|
||||
// either from ephemeral in-memory cache, or from persistent storage.
|
||||
func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
|
||||
return bc.stateCache.TrieDB().Node(hash)
|
||||
}
|
||||
|
||||
// ContractCode retrieves a blob of data associated with a contract hash
|
||||
// either from ephemeral in-memory cache, or from persistent storage.
|
||||
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
|
||||
return bc.stateCache.ContractCode(common.Hash{}, hash)
|
||||
}
|
||||
|
||||
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
|
||||
// hash either from ephemeral in-memory cache, or from persistent storage.
|
||||
//
|
||||
// If the code doesn't exist in the in-memory cache, check the storage with
|
||||
// new code scheme.
|
||||
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
|
||||
type codeReader interface {
|
||||
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
|
||||
}
|
||||
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
|
||||
}
|
||||
|
||||
// Stop stops the blockchain service. If any imports are currently in progress
|
||||
// it will abort them using the procInterrupt.
|
||||
func (bc *BlockChain) Stop() {
|
||||
@@ -919,6 +973,12 @@ func (bc *BlockChain) Stop() {
|
||||
log.Error("Dangling trie nodes after full cleanup")
|
||||
}
|
||||
}
|
||||
// Ensure all live cached entries be saved into disk, so that we can skip
|
||||
// cache warmup when node restarts.
|
||||
if bc.cacheConfig.TrieCleanJournal != "" {
|
||||
triedb := bc.stateCache.TrieDB()
|
||||
triedb.SaveCache(bc.cacheConfig.TrieCleanJournal)
|
||||
}
|
||||
log.Info("Blockchain stopped")
|
||||
}
|
||||
|
||||
@@ -961,52 +1021,6 @@ const (
|
||||
SideStatTy
|
||||
)
|
||||
|
||||
// Rollback is designed to remove a chain of links from the database that aren't
|
||||
// certain enough to be valid.
|
||||
func (bc *BlockChain) Rollback(chain []common.Hash) {
|
||||
bc.chainmu.Lock()
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
batch := bc.db.NewBatch()
|
||||
for i := len(chain) - 1; i >= 0; i-- {
|
||||
hash := chain[i]
|
||||
|
||||
// Degrade the chain markers if they are explicitly reverted.
|
||||
// In theory we should update all in-memory markers in the
|
||||
// last step, however the direction of rollback is from high
|
||||
// to low, so it's safe the update in-memory markers directly.
|
||||
currentHeader := bc.hc.CurrentHeader()
|
||||
if currentHeader.Hash() == hash {
|
||||
newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)
|
||||
rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash)
|
||||
bc.hc.SetCurrentHeader(newHeadHeader)
|
||||
}
|
||||
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
|
||||
newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
|
||||
rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash())
|
||||
bc.currentFastBlock.Store(newFastBlock)
|
||||
headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
|
||||
}
|
||||
if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
|
||||
newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
|
||||
rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash())
|
||||
bc.currentBlock.Store(newBlock)
|
||||
headBlockGauge.Update(int64(newBlock.NumberU64()))
|
||||
}
|
||||
}
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to rollback chain markers", "err", err)
|
||||
}
|
||||
// Truncate ancient data which exceeds the current header.
|
||||
//
|
||||
// Notably, it can happen that system crashes without truncating the ancient data
|
||||
// but the head indicator has been updated in the active store. Regarding this issue,
|
||||
// system will self recovery by truncating the extra data during the setup phase.
|
||||
if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil {
|
||||
log.Crit("Truncate ancient store failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// truncateAncient rewinds the blockchain to the specified header and deletes all
|
||||
// data in the ancient store that exceeds the specified header.
|
||||
func (bc *BlockChain) truncateAncient(head uint64) error {
|
||||
@@ -1203,9 +1217,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
// range. In this case, all tx indices of newly imported blocks should be
|
||||
// generated.
|
||||
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||
}
|
||||
stats.processed++
|
||||
}
|
||||
@@ -1263,6 +1277,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
}
|
||||
// writeLive writes blockchain and corresponding receipt chain into active store.
|
||||
writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
|
||||
skipPresenceCheck := false
|
||||
batch := bc.db.NewBatch()
|
||||
for i, block := range blockChain {
|
||||
// Short circuit insertion if shutting down or processing failed
|
||||
@@ -1273,14 +1288,22 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
|
||||
return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
|
||||
}
|
||||
if bc.HasBlock(block.Hash(), block.NumberU64()) {
|
||||
stats.ignored++
|
||||
continue
|
||||
if !skipPresenceCheck {
|
||||
// Ignore if the entire data is already known
|
||||
if bc.HasBlock(block.Hash(), block.NumberU64()) {
|
||||
stats.ignored++
|
||||
continue
|
||||
} else {
|
||||
// If block N is not present, neither are the later blocks.
|
||||
// This should be true, but if we are mistaken, the shortcut
|
||||
// here will only cause overwriting of some existing data
|
||||
skipPresenceCheck = true
|
||||
}
|
||||
}
|
||||
// Write all the data out into the database
|
||||
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
|
||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
|
||||
rawdb.WriteTxLookupEntries(batch, block) // Always write tx indices for live blocks, we assume they are needed
|
||||
rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed
|
||||
|
||||
// Write everything belongs to the blocks into the database. So that
|
||||
// we can ensure all components of body is completed(body, receipts,
|
||||
@@ -1685,13 +1708,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||
}
|
||||
switch {
|
||||
// First block is pruned, insert as sidechain and reorg only if TD grows enough
|
||||
case err == consensus.ErrPrunedAncestor:
|
||||
case errors.Is(err, consensus.ErrPrunedAncestor):
|
||||
log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
|
||||
return bc.insertSideChain(block, it)
|
||||
|
||||
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
|
||||
case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
|
||||
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
|
||||
case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())):
|
||||
for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) {
|
||||
log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, err
|
||||
@@ -1874,13 +1897,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||
stats.report(chain, it.index, dirty)
|
||||
}
|
||||
// Any blocks remaining here? The only ones we care about are the future ones
|
||||
if block != nil && err == consensus.ErrFutureBlock {
|
||||
if block != nil && errors.Is(err, consensus.ErrFutureBlock) {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, err
|
||||
}
|
||||
block, err = it.next()
|
||||
|
||||
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
|
||||
for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, err
|
||||
}
|
||||
@@ -1908,7 +1931,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
// ones. Any other errors means that the block is invalid, and should not be written
|
||||
// to disk.
|
||||
err := consensus.ErrPrunedAncestor
|
||||
for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
|
||||
for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
|
||||
// Check the canonical state root for that number
|
||||
if number := block.NumberU64(); current.NumberU64() >= number {
|
||||
canonical := bc.GetBlockByNumber(number)
|
||||
@@ -2131,6 +2154,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
|
||||
blockReorgAddMeter.Mark(int64(len(newChain)))
|
||||
blockReorgDropMeter.Mark(int64(len(oldChain)))
|
||||
blockReorgMeter.Mark(1)
|
||||
} else {
|
||||
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
|
||||
}
|
||||
|
1653
core/blockchain_repair_test.go
Normal file
1653
core/blockchain_repair_test.go
Normal file
File diff suppressed because it is too large
Load Diff
1949
core/blockchain_sethead_test.go
Normal file
1949
core/blockchain_sethead_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
// So we can deterministically seed different blockchains
|
||||
@@ -681,12 +682,12 @@ func TestFastVsFullChains(t *testing.T) {
|
||||
}
|
||||
if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() {
|
||||
t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock)
|
||||
} else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(arblock.Transactions()) || types.DeriveSha(anblock.Transactions()) != types.DeriveSha(arblock.Transactions()) {
|
||||
} else if types.DeriveSha(fblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) || types.DeriveSha(anblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) {
|
||||
t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions())
|
||||
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
|
||||
t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
|
||||
}
|
||||
if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
|
||||
if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, new(trie.Trie)) != types.DeriveSha(areceipts, new(trie.Trie)) {
|
||||
t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
|
||||
}
|
||||
}
|
||||
@@ -731,12 +732,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
return db, func() { os.RemoveAll(dir) }
|
||||
}
|
||||
// Configure a subchain to roll back
|
||||
remove := []common.Hash{}
|
||||
for _, block := range blocks[height/2:] {
|
||||
remove = append(remove, block.Hash())
|
||||
}
|
||||
remove := blocks[height/2].NumberU64()
|
||||
|
||||
// Create a small assertion method to check the three heads
|
||||
assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
|
||||
t.Helper()
|
||||
|
||||
if num := chain.CurrentBlock().NumberU64(); num != block {
|
||||
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
|
||||
}
|
||||
@@ -750,14 +751,18 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
// Import the chain as an archive node and ensure all pointers are updated
|
||||
archiveDb, delfn := makeDb()
|
||||
defer delfn()
|
||||
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
|
||||
archiveCaching := *defaultCacheConfig
|
||||
archiveCaching.TrieDirtyDisabled = true
|
||||
|
||||
archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
if n, err := archive.InsertChain(blocks); err != nil {
|
||||
t.Fatalf("failed to process block %d: %v", n, err)
|
||||
}
|
||||
defer archive.Stop()
|
||||
|
||||
assert(t, "archive", archive, height, height, height)
|
||||
archive.Rollback(remove)
|
||||
archive.SetHead(remove - 1)
|
||||
assert(t, "archive", archive, height/2, height/2, height/2)
|
||||
|
||||
// Import the chain as a non-archive node and ensure all pointers are updated
|
||||
@@ -777,7 +782,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
||||
}
|
||||
assert(t, "fast", fast, height, height, 0)
|
||||
fast.Rollback(remove)
|
||||
fast.SetHead(remove - 1)
|
||||
assert(t, "fast", fast, height/2, height/2, 0)
|
||||
|
||||
// Import the chain as a ancient-first node and ensure all pointers are updated
|
||||
@@ -793,12 +798,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
||||
}
|
||||
assert(t, "ancient", ancient, height, height, 0)
|
||||
ancient.Rollback(remove)
|
||||
assert(t, "ancient", ancient, height/2, height/2, 0)
|
||||
if frozen, err := ancientDb.Ancients(); err != nil || frozen != height/2+1 {
|
||||
t.Fatalf("failed to truncate ancient store, want %v, have %v", height/2+1, frozen)
|
||||
}
|
||||
ancient.SetHead(remove - 1)
|
||||
assert(t, "ancient", ancient, 0, 0, 0)
|
||||
|
||||
if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
|
||||
t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
|
||||
}
|
||||
// Import the chain as a light node and ensure all pointers are updated
|
||||
lightDb, delfn := makeDb()
|
||||
defer delfn()
|
||||
@@ -809,7 +814,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
defer light.Stop()
|
||||
|
||||
assert(t, "light", light, height, 0, 0)
|
||||
light.Rollback(remove)
|
||||
light.SetHead(remove - 1)
|
||||
assert(t, "light", light, height/2, 0, 0)
|
||||
}
|
||||
|
||||
@@ -1585,6 +1590,7 @@ func TestBlockchainRecovery(t *testing.T) {
|
||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
||||
}
|
||||
defer os.Remove(frdir)
|
||||
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
@@ -1602,6 +1608,7 @@ func TestBlockchainRecovery(t *testing.T) {
|
||||
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
|
||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
||||
}
|
||||
rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior
|
||||
ancient.Stop()
|
||||
|
||||
// Destroy head fast block manually
|
||||
@@ -1912,11 +1919,9 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
||||
asserter(t, blocks[len(blocks)-1])
|
||||
|
||||
// Import a long canonical chain with some known data as prefix.
|
||||
var rollback []common.Hash
|
||||
for i := len(blocks) / 2; i < len(blocks); i++ {
|
||||
rollback = append(rollback, blocks[i].Hash())
|
||||
}
|
||||
chain.Rollback(rollback)
|
||||
rollback := blocks[len(blocks)/2].NumberU64()
|
||||
|
||||
chain.SetHead(rollback - 1)
|
||||
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
||||
t.Fatalf("failed to insert chain data: %v", err)
|
||||
}
|
||||
@@ -1936,11 +1941,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
||||
asserter(t, blocks3[len(blocks3)-1])
|
||||
|
||||
// Rollback the heavier chain and re-insert the longer chain again
|
||||
for i := 0; i < len(blocks3); i++ {
|
||||
rollback = append(rollback, blocks3[i].Hash())
|
||||
}
|
||||
chain.Rollback(rollback)
|
||||
|
||||
chain.SetHead(rollback - 1)
|
||||
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
||||
t.Fatalf("failed to insert chain data: %v", err)
|
||||
}
|
||||
|
@@ -65,18 +65,23 @@ func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
|
||||
}
|
||||
// Rotate the bloom and insert into our collection
|
||||
byteIndex := b.nextSec / 8
|
||||
bitMask := byte(1) << byte(7-b.nextSec%8)
|
||||
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
bloomByteIndex := types.BloomByteLength - 1 - i/8
|
||||
bloomBitMask := byte(1) << byte(i%8)
|
||||
|
||||
if (bloom[bloomByteIndex] & bloomBitMask) != 0 {
|
||||
b.blooms[i][byteIndex] |= bitMask
|
||||
bitIndex := byte(7 - b.nextSec%8)
|
||||
for byt := 0; byt < types.BloomByteLength; byt++ {
|
||||
bloomByte := bloom[types.BloomByteLength-1-byt]
|
||||
if bloomByte == 0 {
|
||||
continue
|
||||
}
|
||||
base := 8 * byt
|
||||
b.blooms[base+7][byteIndex] |= ((bloomByte >> 7) & 1) << bitIndex
|
||||
b.blooms[base+6][byteIndex] |= ((bloomByte >> 6) & 1) << bitIndex
|
||||
b.blooms[base+5][byteIndex] |= ((bloomByte >> 5) & 1) << bitIndex
|
||||
b.blooms[base+4][byteIndex] |= ((bloomByte >> 4) & 1) << bitIndex
|
||||
b.blooms[base+3][byteIndex] |= ((bloomByte >> 3) & 1) << bitIndex
|
||||
b.blooms[base+2][byteIndex] |= ((bloomByte >> 2) & 1) << bitIndex
|
||||
b.blooms[base+1][byteIndex] |= ((bloomByte >> 1) & 1) << bitIndex
|
||||
b.blooms[base][byteIndex] |= (bloomByte & 1) << bitIndex
|
||||
}
|
||||
b.nextSec++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user