Compare commits
290 Commits
v1.9.4
...
ChrisChinc
Author | SHA1 | Date | |
---|---|---|---|
|
8b6f5d4f72 | ||
|
2e1ecc02bd | ||
|
6df973df27 | ||
|
4be8840120 | ||
|
529b81dadb | ||
|
6a62fe399b | ||
|
dae3aee5ff | ||
|
05ccbb5edd | ||
|
4f55e24c02 | ||
|
91b228966e | ||
|
1b9c5b393b | ||
|
57d4898e29 | ||
|
c2117982b8 | ||
|
1c4c486a85 | ||
|
ac72787768 | ||
|
26284ec3cc | ||
|
fef8c985bc | ||
|
36a1e0b67d | ||
|
37531b1884 | ||
|
855690523a | ||
|
38d1b0cba2 | ||
|
eddcecc160 | ||
|
9938d954c8 | ||
|
90caa2cabb | ||
|
5f2002bbcc | ||
|
a9614c3c91 | ||
|
46c4b699c8 | ||
|
1821328162 | ||
|
8045504abf | ||
|
c22fdec3c7 | ||
|
049e17116e | ||
|
dcffb7777f | ||
|
8694d14e65 | ||
|
172f7778fe | ||
|
34bb132b10 | ||
|
675f4e75b8 | ||
|
4a231cd951 | ||
|
976a0f5558 | ||
|
a1313b5b1e | ||
|
711ed74e09 | ||
|
058a4ac5f1 | ||
|
33791dbeb5 | ||
|
5a9c96454e | ||
|
4cc89a5a32 | ||
|
15d09038a6 | ||
|
3c776c7199 | ||
|
24cab2d535 | ||
|
594e038e75 | ||
|
a903912b96 | ||
|
44c365c3e2 | ||
|
7b68975a00 | ||
|
60deeb103e | ||
|
0b284f6c6c | ||
|
8a5c81349e | ||
|
33c56ebc67 | ||
|
31baf3a9af | ||
|
ad2fc7c6a6 | ||
|
58cf5686ea | ||
|
b4aa4a6965 | ||
|
b88b4632c2 | ||
|
1f1cefc036 | ||
|
4c8fcd93da | ||
|
fcc84c38dd | ||
|
6d200efe72 | ||
|
92956e2930 | ||
|
9b09c0fc83 | ||
|
770316dc20 | ||
|
0af96d2556 | ||
|
d5acc5ed9e | ||
|
fcafa0baa5 | ||
|
1ee754b056 | ||
|
b3b8d36995 | ||
|
9b32f592dc | ||
|
3e97b04a3d | ||
|
f20c8d495a | ||
|
8704e8a8fc | ||
|
94e8418939 | ||
|
feda78e052 | ||
|
8592a57553 | ||
|
b2de0bd87b | ||
|
e9e69d6e29 | ||
|
a90cc66f3c | ||
|
8bd37a1d91 | ||
|
b5c4ea56b8 | ||
|
fc392395fb | ||
|
b211742e5f | ||
|
0218d7001d | ||
|
4d663d57d6 | ||
|
8a63f7f504 | ||
|
c49a4165d0 | ||
|
a1bc0e3cb6 | ||
|
a013f02df2 | ||
|
50be790869 | ||
|
9e0f934e2b | ||
|
4f7b7f84ae | ||
|
c6285e6437 | ||
|
35f95aef6f | ||
|
7a509b4732 | ||
|
433937fb42 | ||
|
2eeb8dd271 | ||
|
b7cf41e4b3 | ||
|
3bb6815fc1 | ||
|
a67fe48b43 | ||
|
93b1171316 | ||
|
6ae9dc15cc | ||
|
49cf000df7 | ||
|
c4b7fdd27e | ||
|
275cd4988d | ||
|
f51cf573b5 | ||
|
191364c350 | ||
|
d90d1db609 | ||
|
b8bc9b3d8e | ||
|
f383eaa102 | ||
|
cecc7230c0 | ||
|
4b40b5377b | ||
|
370cb95b7f | ||
|
017449971e | ||
|
bc01593afb | ||
|
c9dce0bfd7 | ||
|
e78f631dfc | ||
|
6b6882f08b | ||
|
c2d65d34d5 | ||
|
13ccf6016e | ||
|
7ce7c3967c | ||
|
fc7e0fe6c7 | ||
|
5cc6e7a71e | ||
|
d556d39a2c | ||
|
54d332e1db | ||
|
e0bf5f0ccb | ||
|
1ff3d7c2d4 | ||
|
08611cfd75 | ||
|
9a529d64d1 | ||
|
a91b704b01 | ||
|
c9f28ca8e5 | ||
|
58e33d9e5a | ||
|
7800ba978d | ||
|
717f8a4e8f | ||
|
7b189d6f1f | ||
|
c4844e9ee2 | ||
|
23c8c74131 | ||
|
0676320169 | ||
|
d62e9b2857 | ||
|
878e35bfde | ||
|
2e98706a99 | ||
|
8c1e8de839 | ||
|
b26eedf9e9 | ||
|
44b41641f8 | ||
|
9ef90dbf30 | ||
|
d9d2a4eef9 | ||
|
9d67222f4e | ||
|
f5a68a40bf | ||
|
f06ae5ca6a | ||
|
3a0480e07d | ||
|
5d21667587 | ||
|
fdff182f11 | ||
|
0abcf03fde | ||
|
58f2ce8671 | ||
|
dd21f079e8 | ||
|
36a684ca1e | ||
|
bcc1234778 | ||
|
c1db636fb3 | ||
|
5b558ad936 | ||
|
b6d4f6b66e | ||
|
0ec5ab4175 | ||
|
0754100464 | ||
|
475ae8bd93 | ||
|
89ab8a74c0 | ||
|
72e62efc76 | ||
|
f56f969dd3 | ||
|
216ff5a952 | ||
|
7be89a7a01 | ||
|
59177bc8c0 | ||
|
c5b46a79c1 | ||
|
b4bc3b3c35 | ||
|
75e029db8b | ||
|
b8ced9e00b | ||
|
f8790b9482 | ||
|
8bd5bb8918 | ||
|
a7dfaa0bda | ||
|
e1dcea8bf0 | ||
|
b3d6304f1e | ||
|
f4ec85486a | ||
|
dfdb204b48 | ||
|
15fb780de6 | ||
|
3a4a3d080b | ||
|
b7ba944e88 | ||
|
9b59c75405 | ||
|
f71e85b8e2 | ||
|
8008c5b1fa | ||
|
9c6cf960b4 | ||
|
df206d2513 | ||
|
9e8cc00b73 | ||
|
ac5e28ea38 | ||
|
3b0f3483c4 | ||
|
7f70a70106 | ||
|
94e8250983 | ||
|
c013192ba7 | ||
|
0b6338321f | ||
|
b9c90c5581 | ||
|
5fefe39ba5 | ||
|
dfe891270a | ||
|
c5c5e0dbe8 | ||
|
3f4a875bf6 | ||
|
a3d263dd3a | ||
|
190fb8180a | ||
|
b02afb6b3d | ||
|
422604b438 | ||
|
57d697629d | ||
|
11d09fd3ba | ||
|
689486449d | ||
|
7c4a4eb58a | ||
|
9e71f55bfa | ||
|
51c3290bee | ||
|
738b51ae31 | ||
|
f03b2db7db | ||
|
49d1a032da | ||
|
765fe446cf | ||
|
afe0b65405 | ||
|
987648b0ad | ||
|
9504c5c360 | ||
|
f8a95d996f | ||
|
bf5c6b29fa | ||
|
22e3bbbf0a | ||
|
4ea9b62b5c | ||
|
6f1a600f6c | ||
|
de2259d27c | ||
|
adf007dadc | ||
|
4b8f56cf98 | ||
|
a718daa674 | ||
|
b9bac1f384 | ||
|
fc3661f89c | ||
|
9948724deb | ||
|
c702bd70ed | ||
|
734e00af9e | ||
|
b566cfdffd | ||
|
7a6d5d0cce | ||
|
0ff7380465 | ||
|
0ce5e113be | ||
|
86fe283d19 | ||
|
44b74cfc40 | ||
|
9278951a62 | ||
|
12f2a25d5e | ||
|
8927f7724a | ||
|
93422e9d15 | ||
|
5d91acccd5 | ||
|
9641cacea8 | ||
|
64571f9379 | ||
|
e306304414 | ||
|
2c37142d2f | ||
|
3eca7b5d27 | ||
|
b0b277525c | ||
|
ecdbb402ee | ||
|
9c81387bef | ||
|
72617a0742 | ||
|
db79143a13 | ||
|
538f763fdc | ||
|
d4bb3798d8 | ||
|
08953e42c1 | ||
|
b9299bbc46 | ||
|
9a77065948 | ||
|
a28093ced4 | ||
|
d5b79e752e | ||
|
7300365956 | ||
|
c476460cb2 | ||
|
028af3457d | ||
|
a73f3f4518 | ||
|
bd05968077 | ||
|
6e730915bd | ||
|
c713ea7c22 | ||
|
7f5f62aaa0 | ||
|
b2f696e025 | ||
|
62b43ee0d5 | ||
|
a2a60869c8 | ||
|
df89233b57 | ||
|
ead711779d | ||
|
2133f18f15 | ||
|
1a6ef5ae58 | ||
|
ad03d9801c | ||
|
62391ddbeb | ||
|
0568e81701 | ||
|
32b07e8b1f | ||
|
aca39a6498 | ||
|
be500b57d2 | ||
|
a308f012ba | ||
|
311419c7d6 | ||
|
63b18027dc | ||
|
a1c09b9387 | ||
|
05347b3d98 | ||
|
75aec8a28d | ||
|
24ef83518c |
6
.github/CODEOWNERS
vendored
6
.github/CODEOWNERS
vendored
@@ -16,8 +16,8 @@ light/ @zsfelfoldi @rjl493456442
|
||||
mobile/ @karalabe @ligi
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
rpc/ @fjl @holiman
|
||||
p2p/simulations @zelig @nonsense @janos @justelad
|
||||
p2p/protocols @zelig @nonsense @janos @justelad
|
||||
p2p/testing @zelig @nonsense @janos @justelad
|
||||
p2p/simulations @zelig @janos @justelad
|
||||
p2p/protocols @zelig @janos @justelad
|
||||
p2p/testing @zelig @janos @justelad
|
||||
signer/ @holiman
|
||||
whisper/ @gballet @gluk256
|
||||
|
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,6 +1,6 @@
|
||||
Hi there,
|
||||
|
||||
please note that this is an issue tracker reserved for bug reports and feature requests.
|
||||
Please note that this is an issue tracker reserved for bug reports and feature requests.
|
||||
|
||||
For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com.
|
||||
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -24,6 +24,7 @@ build/_vendor/pkg
|
||||
|
||||
# used by the Makefile
|
||||
/build/_workspace/
|
||||
/build/cache/
|
||||
/build/bin/
|
||||
/geth*.zip
|
||||
|
||||
|
50
.golangci.yml
Normal file
50
.golangci.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
# This file configures github.com/golangci/golangci-lint.
|
||||
|
||||
run:
|
||||
timeout: 2m
|
||||
tests: true
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs-use-default: true
|
||||
skip-files:
|
||||
- core/genesis_alloc.go
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- goconst
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
# - staticcheck
|
||||
- unconvert
|
||||
# - unused
|
||||
- varcheck
|
||||
|
||||
linters-settings:
|
||||
gofmt:
|
||||
simplify: true
|
||||
goconst:
|
||||
min-len: 3 # minimum length of string constant
|
||||
min-occurrences: 6 # minimum number of occurrences
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: crypto/blake2b/
|
||||
linters:
|
||||
- deadcode
|
||||
- path: crypto/bn256/cloudflare
|
||||
linters:
|
||||
- deadcode
|
||||
- path: p2p/discv5/
|
||||
linters:
|
||||
- deadcode
|
||||
- path: core/vm/instructions_test.go
|
||||
linters:
|
||||
- goconst
|
||||
- path: cmd/faucet/
|
||||
linters:
|
||||
- deadcode
|
22
.travis.yml
22
.travis.yml
@@ -7,7 +7,7 @@ jobs:
|
||||
- stage: lint
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
@@ -19,6 +19,8 @@ jobs:
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
@@ -27,6 +29,8 @@ jobs:
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
@@ -34,6 +38,17 @@ jobs:
|
||||
# These are the latest Go versions.
|
||||
- stage: build
|
||||
os: linux
|
||||
arch: amd64
|
||||
dist: xenial
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- stage: build
|
||||
if: type = pull_request
|
||||
os: linux
|
||||
arch: arm64
|
||||
dist: xenial
|
||||
go: 1.13.x
|
||||
script:
|
||||
@@ -42,6 +57,7 @@ jobs:
|
||||
|
||||
- stage: build
|
||||
os: osx
|
||||
osx_image: xcode11.3
|
||||
go: 1.13.x
|
||||
script:
|
||||
- echo "Increase the maximum number of open file descriptors on macOS"
|
||||
@@ -77,7 +93,7 @@ jobs:
|
||||
- python-paramiko
|
||||
script:
|
||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
- go run build/ci.go debsrc -goversion 1.13.8 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- stage: build
|
||||
@@ -167,7 +183,7 @@ jobs:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://dl.google.com/go/go1.13.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://dl.google.com/go/go1.13.8.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
|
43
Makefile
43
Makefile
@@ -10,33 +10,34 @@
|
||||
|
||||
GOBIN = ./build/bin
|
||||
GO ?= latest
|
||||
GORUN = env GO111MODULE=on go run
|
||||
|
||||
geth:
|
||||
build/env.sh go run build/ci.go install ./cmd/geth
|
||||
$(GORUN) build/ci.go install ./cmd/geth
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||
|
||||
all:
|
||||
build/env.sh go run build/ci.go install
|
||||
$(GORUN) build/ci.go install
|
||||
|
||||
android:
|
||||
build/env.sh go run build/ci.go aar --local
|
||||
$(GORUN) build/ci.go aar --local
|
||||
@echo "Done building."
|
||||
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
|
||||
|
||||
ios:
|
||||
build/env.sh go run build/ci.go xcode --local
|
||||
$(GORUN) build/ci.go xcode --local
|
||||
@echo "Done building."
|
||||
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
|
||||
|
||||
test: all
|
||||
build/env.sh go run build/ci.go test
|
||||
$(GORUN) build/ci.go test
|
||||
|
||||
lint: ## Run linters.
|
||||
build/env.sh go run build/ci.go lint
|
||||
$(GORUN) build/ci.go lint
|
||||
|
||||
clean:
|
||||
./build/clean_go_build_cache.sh
|
||||
env GO111MODULE=on go clean -cache
|
||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||
|
||||
# The devtools target installs tools required for 'go generate'.
|
||||
@@ -63,12 +64,12 @@ geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 get
|
||||
@ls -ld $(GOBIN)/geth-linux-*
|
||||
|
||||
geth-linux-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
||||
@echo "Linux 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
||||
|
||||
geth-linux-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
||||
@echo "Linux amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
||||
|
||||
@@ -77,42 +78,42 @@ geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-ar
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
||||
|
||||
geth-linux-arm-5:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
||||
@echo "Linux ARMv5 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
||||
|
||||
geth-linux-arm-6:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
||||
@echo "Linux ARMv6 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
||||
|
||||
geth-linux-arm-7:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
||||
@echo "Linux ARMv7 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
||||
|
||||
geth-linux-arm64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
||||
@echo "Linux ARM64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
||||
|
||||
geth-linux-mips:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips
|
||||
|
||||
geth-linux-mipsle:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPSle cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
|
||||
|
||||
geth-linux-mips64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
|
||||
|
||||
geth-linux-mips64le:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS64le cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
|
||||
|
||||
@@ -121,12 +122,12 @@ geth-darwin: geth-darwin-386 geth-darwin-amd64
|
||||
@ls -ld $(GOBIN)/geth-darwin-*
|
||||
|
||||
geth-darwin-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
||||
@echo "Darwin 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
||||
|
||||
geth-darwin-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
||||
@echo "Darwin amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
||||
|
||||
@@ -135,11 +136,11 @@ geth-windows: geth-windows-386 geth-windows-amd64
|
||||
@ls -ld $(GOBIN)/geth-windows-*
|
||||
|
||||
geth-windows-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
||||
@echo "Windows 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
||||
|
||||
geth-windows-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
||||
@echo "Windows amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
||||
|
@@ -39,7 +39,7 @@ directory.
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
@@ -294,7 +294,7 @@ also need to configure a miner to process transactions and create new blocks for
|
||||
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
|
||||
requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
|
||||
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
|
||||
and the [Genoil miner](https://github.com/Genoil/cpp-ethereum) repository.
|
||||
and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
|
||||
|
||||
In a private network setting, however a single CPU miner instance is more than enough for
|
||||
practical purposes as it can produce a stable stream of blocks at the correct intervals
|
||||
@@ -303,7 +303,7 @@ ones either). To start a `geth` instance for mining, run it with all your usual
|
||||
by:
|
||||
|
||||
```shell
|
||||
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
$ geth <usual-flags> --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
```
|
||||
|
||||
Which will start mining blocks and transactions on a single CPU thread, crediting all
|
||||
|
@@ -75,9 +75,6 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) {
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
@@ -94,9 +91,6 @@ func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) {
|
||||
|
||||
// UnpackIntoMap unpacks a log into the provided map[string]interface{}
|
||||
func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) {
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
@@ -114,12 +108,13 @@ func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte)
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
Name string
|
||||
Constant bool
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
Type string
|
||||
Name string
|
||||
Constant bool
|
||||
StateMutability string
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
@@ -140,10 +135,11 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
name = fmt.Sprintf("%s%d", field.Name, idx)
|
||||
_, ok = abi.Methods[name]
|
||||
}
|
||||
isConst := field.Constant || field.StateMutability == "pure" || field.StateMutability == "view"
|
||||
abi.Methods[name] = Method{
|
||||
Name: name,
|
||||
RawName: field.Name,
|
||||
Const: field.Constant,
|
||||
Const: isConst,
|
||||
Inputs: field.Inputs,
|
||||
Outputs: field.Outputs,
|
||||
}
|
||||
|
@@ -57,7 +57,7 @@ const jsondata2 = `
|
||||
]`
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
Uint256, _ := NewType("uint256", nil)
|
||||
Uint256, _ := NewType("uint256", "", nil)
|
||||
exp := ABI{
|
||||
Methods: map[string]Method{
|
||||
"balance": {
|
||||
@@ -172,7 +172,7 @@ func TestTestSlice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMethodSignature(t *testing.T) {
|
||||
String, _ := NewType("string", nil)
|
||||
String, _ := NewType("string", "", nil)
|
||||
m := Method{"foo", "foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
|
||||
exp := "foo(string,string)"
|
||||
if m.Sig() != exp {
|
||||
@@ -184,7 +184,7 @@ func TestMethodSignature(t *testing.T) {
|
||||
t.Errorf("expected ids to match %x != %x", m.ID(), idexp)
|
||||
}
|
||||
|
||||
uintt, _ := NewType("uint256", nil)
|
||||
uintt, _ := NewType("uint256", "", nil)
|
||||
m = Method{"foo", "foo", false, []Argument{{"bar", uintt, false}}, nil}
|
||||
exp = "foo(uint256)"
|
||||
if m.Sig() != exp {
|
||||
@@ -192,7 +192,7 @@ func TestMethodSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
// Method with tuple arguments
|
||||
s, _ := NewType("tuple", []ArgumentMarshaling{
|
||||
s, _ := NewType("tuple", "", []ArgumentMarshaling{
|
||||
{Name: "a", Type: "int256"},
|
||||
{Name: "b", Type: "int256[]"},
|
||||
{Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
|
||||
@@ -602,9 +602,9 @@ func TestBareEvents(t *testing.T) {
|
||||
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
||||
]`
|
||||
|
||||
arg0, _ := NewType("uint256", nil)
|
||||
arg1, _ := NewType("address", nil)
|
||||
tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||
arg0, _ := NewType("uint256", "", nil)
|
||||
arg1, _ := NewType("address", "", nil)
|
||||
tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||
|
||||
expectedEvents := map[string]struct {
|
||||
Anonymous bool
|
||||
@@ -927,7 +927,7 @@ func TestABI_MethodById(t *testing.T) {
|
||||
}
|
||||
b := fmt.Sprintf("%v", m2)
|
||||
if a != b {
|
||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.ID()))
|
||||
t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID())
|
||||
}
|
||||
}
|
||||
// Also test empty
|
||||
|
@@ -34,10 +34,11 @@ type Argument struct {
|
||||
type Arguments []Argument
|
||||
|
||||
type ArgumentMarshaling struct {
|
||||
Name string
|
||||
Type string
|
||||
Components []ArgumentMarshaling
|
||||
Indexed bool
|
||||
Name string
|
||||
Type string
|
||||
InternalType string
|
||||
Components []ArgumentMarshaling
|
||||
Indexed bool
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
@@ -48,7 +49,7 @@ func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||
return fmt.Errorf("argument json err: %v", err)
|
||||
}
|
||||
|
||||
argument.Type, err = NewType(arg.Type, arg.Components)
|
||||
argument.Type, err = NewType(arg.Type, arg.InternalType, arg.Components)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -88,6 +89,13 @@ func (arguments Arguments) isTuple() bool {
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
if len(data) == 0 {
|
||||
if len(arguments) != 0 {
|
||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
} else {
|
||||
return nil // Nothing to unmarshal, return
|
||||
}
|
||||
}
|
||||
// make sure the passed value is arguments pointer
|
||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
@@ -104,11 +112,17 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
|
||||
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value
|
||||
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
||||
if len(data) == 0 {
|
||||
if len(arguments) != 0 {
|
||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
} else {
|
||||
return nil // Nothing to unmarshal, return
|
||||
}
|
||||
}
|
||||
marshalledValues, err := arguments.UnpackValues(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return arguments.unpackIntoMap(v, marshalledValues)
|
||||
}
|
||||
|
||||
|
@@ -46,12 +46,17 @@ import (
|
||||
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
||||
|
||||
var (
|
||||
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
|
||||
errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
|
||||
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
|
||||
errBlockDoesNotExist = errors.New("block does not exist in blockchain")
|
||||
errTransactionDoesNotExist = errors.New("transaction does not exist")
|
||||
errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
|
||||
)
|
||||
|
||||
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
|
||||
// the background. Its main purpose is to allow easily testing contract bindings.
|
||||
// Simulated backend implements the following interfaces:
|
||||
// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor,
|
||||
// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender
|
||||
type SimulatedBackend struct {
|
||||
database ethdb.Database // In memory database to store our testing data
|
||||
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
|
||||
@@ -76,7 +81,7 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis
|
||||
database: database,
|
||||
blockchain: blockchain,
|
||||
config: genesis.Config,
|
||||
events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
|
||||
events: filters.NewEventSystem(&filterBackend{database, blockchain}, false),
|
||||
}
|
||||
backend.rollback()
|
||||
return backend
|
||||
@@ -122,15 +127,28 @@ func (b *SimulatedBackend) rollback() {
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||
}
|
||||
|
||||
// stateByBlockNumber retrieves a state by a given blocknumber.
|
||||
func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
|
||||
if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 {
|
||||
return b.blockchain.State()
|
||||
}
|
||||
block, err := b.BlockByNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.blockchain.StateAt(block.Hash())
|
||||
}
|
||||
|
||||
// CodeAt returns the code associated with a certain account in the blockchain.
|
||||
func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return nil, errBlockNumberUnsupported
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
return statedb.GetCode(contract), nil
|
||||
}
|
||||
|
||||
@@ -139,10 +157,11 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return nil, errBlockNumberUnsupported
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
return statedb.GetBalance(contract), nil
|
||||
}
|
||||
|
||||
@@ -151,10 +170,11 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return 0, errBlockNumberUnsupported
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
return statedb.GetNonce(contract), nil
|
||||
}
|
||||
|
||||
@@ -163,16 +183,20 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return nil, errBlockNumberUnsupported
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
val := statedb.GetState(contract, key)
|
||||
return val[:], nil
|
||||
}
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction.
|
||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
||||
return receipt, nil
|
||||
}
|
||||
@@ -196,6 +220,115 @@ func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.
|
||||
return nil, false, ethereum.NotFound
|
||||
}
|
||||
|
||||
// BlockByHash retrieves a block based on the block hash
|
||||
func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if hash == b.pendingBlock.Hash() {
|
||||
return b.pendingBlock, nil
|
||||
}
|
||||
|
||||
block := b.blockchain.GetBlockByHash(hash)
|
||||
if block != nil {
|
||||
return block, nil
|
||||
}
|
||||
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
// BlockByNumber retrieves a block from the database by number, caching it
|
||||
// (associated with its hash) if found.
|
||||
func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
|
||||
return b.blockchain.CurrentBlock(), nil
|
||||
}
|
||||
|
||||
block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
|
||||
if block == nil {
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// HeaderByHash returns a block header from the current canonical chain.
|
||||
func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if hash == b.pendingBlock.Hash() {
|
||||
return b.pendingBlock.Header(), nil
|
||||
}
|
||||
|
||||
header := b.blockchain.GetHeaderByHash(hash)
|
||||
if header == nil {
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
return header, nil
|
||||
}
|
||||
|
||||
// HeaderByNumber returns a block header from the current canonical chain. If number is
|
||||
// nil, the latest known header is returned.
|
||||
func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if block == nil || block.Cmp(b.pendingBlock.Number()) == 0 {
|
||||
return b.blockchain.CurrentHeader(), nil
|
||||
}
|
||||
|
||||
return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||
}
|
||||
|
||||
// TransactionCount returns the number of transactions in a given block
|
||||
func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockHash == b.pendingBlock.Hash() {
|
||||
return uint(b.pendingBlock.Transactions().Len()), nil
|
||||
}
|
||||
|
||||
block := b.blockchain.GetBlockByHash(blockHash)
|
||||
if block == nil {
|
||||
return uint(0), errBlockDoesNotExist
|
||||
}
|
||||
|
||||
return uint(block.Transactions().Len()), nil
|
||||
}
|
||||
|
||||
// TransactionInBlock returns the transaction for a specific block at a specific index
|
||||
func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockHash == b.pendingBlock.Hash() {
|
||||
transactions := b.pendingBlock.Transactions()
|
||||
if uint(len(transactions)) < index+1 {
|
||||
return nil, errTransactionDoesNotExist
|
||||
}
|
||||
|
||||
return transactions[index], nil
|
||||
}
|
||||
|
||||
block := b.blockchain.GetBlockByHash(blockHash)
|
||||
if block == nil {
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
transactions := block.Transactions()
|
||||
if uint(len(transactions)) < index+1 {
|
||||
return nil, errTransactionDoesNotExist
|
||||
}
|
||||
|
||||
return transactions[index], nil
|
||||
}
|
||||
|
||||
// PendingCodeAt returns the code associated with an account in the pending state.
|
||||
func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
|
||||
b.mu.Lock()
|
||||
@@ -419,10 +552,38 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere
|
||||
}), nil
|
||||
}
|
||||
|
||||
// SubscribeNewHead returns an event subscription for a new header
|
||||
func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
|
||||
// subscribe to a new head
|
||||
sink := make(chan *types.Header)
|
||||
sub := b.events.SubscribeNewHeads(sink)
|
||||
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case head := <-sink:
|
||||
select {
|
||||
case ch <- head:
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
|
||||
// AdjustTime adds a time shift to the simulated clock.
|
||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
@@ -502,22 +663,34 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||
return nullSubscription()
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return fb.bc.SubscribeChainEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return fb.bc.SubscribeLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return nullSubscription()
|
||||
}
|
||||
|
||||
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
||||
|
||||
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func nullSubscription() event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
<-quit
|
||||
return nil
|
||||
})
|
||||
}
|
||||
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return fb.bc.SubscribeChainEvent(ch)
|
||||
}
|
||||
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
||||
}
|
||||
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return fb.bc.SubscribeLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
||||
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
@@ -14,20 +14,24 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backends_test
|
||||
package backends
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
func TestSimulatedBackend(t *testing.T) {
|
||||
@@ -37,7 +41,7 @@ func TestSimulatedBackend(t *testing.T) {
|
||||
genAlloc := make(core.GenesisAlloc)
|
||||
genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)}
|
||||
|
||||
sim := backends.NewSimulatedBackend(genAlloc, gasLimit)
|
||||
sim := NewSimulatedBackend(genAlloc, gasLimit)
|
||||
defer sim.Close()
|
||||
|
||||
// should return an error if the tx is not found
|
||||
@@ -72,12 +76,767 @@ func TestSimulatedBackend(t *testing.T) {
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
tx, isPending, err = sim.TransactionByHash(context.Background(), txHash)
|
||||
_, isPending, err = sim.TransactionByHash(context.Background(), txHash)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting transaction with hash: %v", txHash.String())
|
||||
}
|
||||
if isPending {
|
||||
t.Fatal("transaction should not have pending status")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
|
||||
// the following is based on this contract:
|
||||
// contract T {
|
||||
// event received(address sender, uint amount, bytes memo);
|
||||
// event receivedAddr(address sender);
|
||||
//
|
||||
// function receive(bytes calldata memo) external payable returns (string memory res) {
|
||||
// emit received(msg.sender, msg.value, memo);
|
||||
// emit receivedAddr(msg.sender);
|
||||
// return "hello world";
|
||||
// }
|
||||
// }
|
||||
const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]`
|
||||
const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
|
||||
const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
|
||||
|
||||
// expected return value contains "hello world"
|
||||
var expectedReturn = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
func TestNewSimulatedBackend(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
expectedBal := big.NewInt(10000000000)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: expectedBal},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
|
||||
if sim.config != params.AllEthashProtocolChanges {
|
||||
t.Errorf("expected sim config to equal params.AllEthashProtocolChanges, got %v", sim.config)
|
||||
}
|
||||
|
||||
if sim.blockchain.Config() != params.AllEthashProtocolChanges {
|
||||
t.Errorf("expected sim blockchain config to equal params.AllEthashProtocolChanges, got %v", sim.config)
|
||||
}
|
||||
|
||||
statedb, _ := sim.blockchain.State()
|
||||
bal := statedb.GetBalance(testAddr)
|
||||
if bal.Cmp(expectedBal) != 0 {
|
||||
t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_AdjustTime(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
|
||||
prevTime := sim.pendingBlock.Time()
|
||||
err := sim.AdjustTime(time.Second)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
newTime := sim.pendingBlock.Time()
|
||||
|
||||
if newTime-prevTime != uint64(time.Second.Seconds()) {
|
||||
t.Errorf("adjusted time not equal to a second. prev: %v, new: %v", prevTime, newTime)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_BalanceAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
expectedBal := big.NewInt(10000000000)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: expectedBal},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
bal, err := sim.BalanceAt(bgCtx, testAddr, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if bal.Cmp(expectedBal) != 0 {
|
||||
t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_BlockByHash(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
block, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
blockByHash, err := sim.BlockByHash(bgCtx, block.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
|
||||
if block.Hash() != blockByHash.Hash() {
|
||||
t.Errorf("did not get expected block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_BlockByNumber(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
block, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
if block.NumberU64() != 0 {
|
||||
t.Errorf("did not get most recent block, instead got block number %v", block.NumberU64())
|
||||
}
|
||||
|
||||
// create one block
|
||||
sim.Commit()
|
||||
|
||||
block, err = sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
if block.NumberU64() != 1 {
|
||||
t.Errorf("did not get most recent block, instead got block number %v", block.NumberU64())
|
||||
}
|
||||
|
||||
blockByNumber, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get block by number: %v", err)
|
||||
}
|
||||
if blockByNumber.Hash() != block.Hash() {
|
||||
t.Errorf("did not get the same block with height of 1 as before")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_NonceAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
nonce, err := sim.NonceAt(bgCtx, testAddr, big.NewInt(0))
|
||||
if err != nil {
|
||||
t.Errorf("could not get nonce for test addr: %v", err)
|
||||
}
|
||||
|
||||
if nonce != uint64(0) {
|
||||
t.Errorf("received incorrect nonce. expected 0, got %v", nonce)
|
||||
}
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
newNonce, err := sim.NonceAt(bgCtx, testAddr, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get nonce for test addr: %v", err)
|
||||
}
|
||||
|
||||
if newNonce != nonce+uint64(1) {
|
||||
t.Errorf("received incorrect nonce. expected 1, got %v", nonce)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_SendTransaction(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
block, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get block at height 1: %v", err)
|
||||
}
|
||||
|
||||
if signedTx.Hash() != block.Transactions()[0].Hash() {
|
||||
t.Errorf("did not commit sent transaction. expected hash %v got hash %v", block.Transactions()[0].Hash(), signedTx.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_TransactionByHash(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
|
||||
// ensure tx is committed pending
|
||||
receivedTx, pending, err := sim.TransactionByHash(bgCtx, signedTx.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get transaction by hash %v: %v", signedTx.Hash(), err)
|
||||
}
|
||||
if !pending {
|
||||
t.Errorf("expected transaction to be in pending state")
|
||||
}
|
||||
if receivedTx.Hash() != signedTx.Hash() {
|
||||
t.Errorf("did not received committed transaction. expected hash %v got hash %v", signedTx.Hash(), receivedTx.Hash())
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
// ensure tx is not and committed pending
|
||||
receivedTx, pending, err = sim.TransactionByHash(bgCtx, signedTx.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get transaction by hash %v: %v", signedTx.Hash(), err)
|
||||
}
|
||||
if pending {
|
||||
t.Errorf("expected transaction to not be in pending state")
|
||||
}
|
||||
if receivedTx.Hash() != signedTx.Hash() {
|
||||
t.Errorf("did not received committed transaction. expected hash %v got hash %v", signedTx.Hash(), receivedTx.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_EstimateGas(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
gas, err := sim.EstimateGas(bgCtx, ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &testAddr,
|
||||
Value: big.NewInt(1000),
|
||||
Data: []byte{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("could not estimate gas: %v", err)
|
||||
}
|
||||
|
||||
if gas != params.TxGas {
|
||||
t.Errorf("expected 21000 gas cost for a transaction got %v", gas)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_HeaderByHash(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
header, err := sim.HeaderByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
headerByHash, err := sim.HeaderByHash(bgCtx, header.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
|
||||
if header.Hash() != headerByHash.Hash() {
|
||||
t.Errorf("did not get expected block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_HeaderByNumber(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
latestBlockHeader, err := sim.HeaderByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for tip of chain: %v", err)
|
||||
}
|
||||
if latestBlockHeader == nil {
|
||||
t.Errorf("received a nil block header")
|
||||
}
|
||||
if latestBlockHeader.Number.Uint64() != uint64(0) {
|
||||
t.Errorf("expected block header number 0, instead got %v", latestBlockHeader.Number.Uint64())
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
latestBlockHeader, err = sim.HeaderByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for blockheight of 1: %v", err)
|
||||
}
|
||||
|
||||
blockHeader, err := sim.HeaderByNumber(bgCtx, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for blockheight of 1: %v", err)
|
||||
}
|
||||
|
||||
if blockHeader.Hash() != latestBlockHeader.Hash() {
|
||||
t.Errorf("block header and latest block header are not the same")
|
||||
}
|
||||
if blockHeader.Number.Int64() != int64(1) {
|
||||
t.Errorf("did not get blockheader for block 1. instead got block %v", blockHeader.Number.Int64())
|
||||
}
|
||||
|
||||
block, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get block for blockheight of 1: %v", err)
|
||||
}
|
||||
|
||||
if block.Hash() != blockHeader.Hash() {
|
||||
t.Errorf("block hash and block header hash do not match. expected %v, got %v", block.Hash(), blockHeader.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_TransactionCount(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
currentBlock, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil || currentBlock == nil {
|
||||
t.Error("could not get current block")
|
||||
}
|
||||
|
||||
count, err := sim.TransactionCount(bgCtx, currentBlock.Hash())
|
||||
if err != nil {
|
||||
t.Error("could not get current block's transaction count")
|
||||
}
|
||||
|
||||
if count != 0 {
|
||||
t.Errorf("expected transaction count of %v does not match actual count of %v", 0, count)
|
||||
}
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
lastBlock, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for tip of chain: %v", err)
|
||||
}
|
||||
|
||||
count, err = sim.TransactionCount(bgCtx, lastBlock.Hash())
|
||||
if err != nil {
|
||||
t.Error("could not get current block's transaction count")
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Errorf("expected transaction count of %v does not match actual count of %v", 1, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_TransactionInBlock(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
transaction, err := sim.TransactionInBlock(bgCtx, sim.pendingBlock.Hash(), uint(0))
|
||||
if err == nil && err != errTransactionDoesNotExist {
|
||||
t.Errorf("expected a transaction does not exist error to be received but received %v", err)
|
||||
}
|
||||
if transaction != nil {
|
||||
t.Errorf("expected transaction to be nil but received %v", transaction)
|
||||
}
|
||||
|
||||
// expect pending nonce to be 0 since account has not been used
|
||||
pendingNonce, err := sim.PendingNonceAt(bgCtx, testAddr)
|
||||
if err != nil {
|
||||
t.Errorf("did not get the pending nonce: %v", err)
|
||||
}
|
||||
|
||||
if pendingNonce != uint64(0) {
|
||||
t.Errorf("expected pending nonce of 0 got %v", pendingNonce)
|
||||
}
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
lastBlock, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for tip of chain: %v", err)
|
||||
}
|
||||
|
||||
transaction, err = sim.TransactionInBlock(bgCtx, lastBlock.Hash(), uint(1))
|
||||
if err == nil && err != errTransactionDoesNotExist {
|
||||
t.Errorf("expected a transaction does not exist error to be received but received %v", err)
|
||||
}
|
||||
if transaction != nil {
|
||||
t.Errorf("expected transaction to be nil but received %v", transaction)
|
||||
}
|
||||
|
||||
transaction, err = sim.TransactionInBlock(bgCtx, lastBlock.Hash(), uint(0))
|
||||
if err != nil {
|
||||
t.Errorf("could not get transaction in the lastest block with hash %v: %v", lastBlock.Hash().String(), err)
|
||||
}
|
||||
|
||||
if signedTx.Hash().String() != transaction.Hash().String() {
|
||||
t.Errorf("received transaction that did not match the sent transaction. expected hash %v, got hash %v", signedTx.Hash().String(), transaction.Hash().String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_PendingNonceAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
// expect pending nonce to be 0 since account has not been used
|
||||
pendingNonce, err := sim.PendingNonceAt(bgCtx, testAddr)
|
||||
if err != nil {
|
||||
t.Errorf("did not get the pending nonce: %v", err)
|
||||
}
|
||||
|
||||
if pendingNonce != uint64(0) {
|
||||
t.Errorf("expected pending nonce of 0 got %v", pendingNonce)
|
||||
}
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
|
||||
// expect pending nonce to be 1 since account has submitted one transaction
|
||||
pendingNonce, err = sim.PendingNonceAt(bgCtx, testAddr)
|
||||
if err != nil {
|
||||
t.Errorf("did not get the pending nonce: %v", err)
|
||||
}
|
||||
|
||||
if pendingNonce != uint64(1) {
|
||||
t.Errorf("expected pending nonce of 1 got %v", pendingNonce)
|
||||
}
|
||||
|
||||
// make a new transaction with a nonce of 1
|
||||
tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not send tx: %v", err)
|
||||
}
|
||||
|
||||
// expect pending nonce to be 2 since account now has two transactions
|
||||
pendingNonce, err = sim.PendingNonceAt(bgCtx, testAddr)
|
||||
if err != nil {
|
||||
t.Errorf("did not get the pending nonce: %v", err)
|
||||
}
|
||||
|
||||
if pendingNonce != uint64(2) {
|
||||
t.Errorf("expected pending nonce of 2 got %v", pendingNonce)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_TransactionReceipt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
receipt, err := sim.TransactionReceipt(bgCtx, signedTx.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get transaction receipt: %v", err)
|
||||
}
|
||||
|
||||
if receipt.ContractAddress != testAddr && receipt.TxHash != signedTx.Hash() {
|
||||
t.Errorf("received receipt is not correct: %v", receipt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_SuggestGasPrice(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{},
|
||||
10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
gasPrice, err := sim.SuggestGasPrice(bgCtx)
|
||||
if err != nil {
|
||||
t.Errorf("could not get gas price: %v", err)
|
||||
}
|
||||
if gasPrice.Uint64() != uint64(1) {
|
||||
t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_PendingCodeAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
},
|
||||
10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
code, err := sim.CodeAt(bgCtx, testAddr, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) != 0 {
|
||||
t.Errorf("got code for account that does not have contract code")
|
||||
}
|
||||
|
||||
parsed, err := abi.JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
auth := bind.NewKeyedTransactor(testKey)
|
||||
contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim)
|
||||
if err != nil {
|
||||
t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract)
|
||||
}
|
||||
|
||||
code, err = sim.PendingCodeAt(bgCtx, contractAddr)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) == 0 {
|
||||
t.Errorf("did not get code for account that has contract code")
|
||||
}
|
||||
// ensure code received equals code deployed
|
||||
if !bytes.Equal(code, common.FromHex(deployedCode)) {
|
||||
t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_CodeAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
},
|
||||
10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
code, err := sim.CodeAt(bgCtx, testAddr, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) != 0 {
|
||||
t.Errorf("got code for account that does not have contract code")
|
||||
}
|
||||
|
||||
parsed, err := abi.JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
auth := bind.NewKeyedTransactor(testKey)
|
||||
contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim)
|
||||
if err != nil {
|
||||
t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract)
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
code, err = sim.CodeAt(bgCtx, contractAddr, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) == 0 {
|
||||
t.Errorf("did not get code for account that has contract code")
|
||||
}
|
||||
// ensure code received equals code deployed
|
||||
if !bytes.Equal(code, common.FromHex(deployedCode)) {
|
||||
t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code)
|
||||
}
|
||||
}
|
||||
|
||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||
func TestSimulatedBackend_PendingAndCallContract(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
},
|
||||
10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
parsed, err := abi.JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
contractAuth := bind.NewKeyedTransactor(testKey)
|
||||
addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(abiBin), sim)
|
||||
if err != nil {
|
||||
t.Errorf("could not deploy contract: %v", err)
|
||||
}
|
||||
|
||||
input, err := parsed.Pack("receive", []byte("X"))
|
||||
if err != nil {
|
||||
t.Errorf("could pack receive function on contract: %v", err)
|
||||
}
|
||||
|
||||
// make sure you can call the contract in pending state
|
||||
res, err := sim.PendingCallContract(bgCtx, ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &addr,
|
||||
Data: input,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("could not call receive method on contract: %v", err)
|
||||
}
|
||||
if len(res) == 0 {
|
||||
t.Errorf("result of contract call was empty: %v", res)
|
||||
}
|
||||
|
||||
// while comparing against the byte array is more exact, also compare against the human readable string for readability
|
||||
if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") {
|
||||
t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
// make sure you can call the contract
|
||||
res, err = sim.CallContract(bgCtx, ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &addr,
|
||||
Data: input,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not call receive method on contract: %v", err)
|
||||
}
|
||||
if len(res) == 0 {
|
||||
t.Errorf("result of contract call was empty: %v", res)
|
||||
}
|
||||
|
||||
if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") {
|
||||
t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
|
||||
}
|
||||
}
|
||||
|
@@ -218,7 +218,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
}
|
||||
}
|
||||
// If the contract surely has code (or code is not needed), estimate the transaction
|
||||
msg := ethereum.CallMsg{From: opts.From, To: contract, Value: value, Data: input}
|
||||
msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: gasPrice, Value: value, Data: input}
|
||||
gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate gas needed: %v", err)
|
||||
|
@@ -47,13 +47,17 @@ const (
|
||||
// to be used as is in client code, but rather as an intermediate struct which
|
||||
// enforces compile time type safety and naming convention opposed to having to
|
||||
// manually maintain hard coded strings that break on runtime.
|
||||
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string) (string, error) {
|
||||
// Process each individual contract requested binding
|
||||
contracts := make(map[string]*tmplContract)
|
||||
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string, aliases map[string]string) (string, error) {
|
||||
var (
|
||||
// contracts is the map of each individual contract requested binding
|
||||
contracts = make(map[string]*tmplContract)
|
||||
|
||||
// Map used to flag each encountered library as such
|
||||
isLib := make(map[string]struct{})
|
||||
// structs is the map of all reclared structs shared by passed contracts.
|
||||
structs = make(map[string]*tmplStruct)
|
||||
|
||||
// isLib is the map used to flag each encountered library as such
|
||||
isLib = make(map[string]struct{})
|
||||
)
|
||||
for i := 0; i < len(types); i++ {
|
||||
// Parse the actual ABI to generate the binding for
|
||||
evmABI, err := abi.JSON(strings.NewReader(abis[i]))
|
||||
@@ -73,20 +77,36 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
calls = make(map[string]*tmplMethod)
|
||||
transacts = make(map[string]*tmplMethod)
|
||||
events = make(map[string]*tmplEvent)
|
||||
structs = make(map[string]*tmplStruct)
|
||||
|
||||
// identifiers are used to detect duplicated identifier of function
|
||||
// and event. For all calls, transacts and events, abigen will generate
|
||||
// corresponding bindings. However we have to ensure there is no
|
||||
// identifier coliision in the bindings of these categories.
|
||||
callIdentifiers = make(map[string]bool)
|
||||
transactIdentifiers = make(map[string]bool)
|
||||
eventIdentifiers = make(map[string]bool)
|
||||
)
|
||||
for _, original := range evmABI.Methods {
|
||||
// Normalize the method for capital cases and non-anonymous inputs/outputs
|
||||
normalized := original
|
||||
normalized.Name = methodNormalizer[lang](original.Name)
|
||||
|
||||
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
|
||||
// Ensure there is no duplicated identifier
|
||||
var identifiers = callIdentifiers
|
||||
if !original.Const {
|
||||
identifiers = transactIdentifiers
|
||||
}
|
||||
if identifiers[normalizedName] {
|
||||
return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName)
|
||||
}
|
||||
identifiers[normalizedName] = true
|
||||
normalized.Name = normalizedName
|
||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||
copy(normalized.Inputs, original.Inputs)
|
||||
for j, input := range normalized.Inputs {
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if _, exist := structs[input.Type.String()]; input.Type.T == abi.TupleTy && !exist {
|
||||
if hasStruct(input.Type) {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
}
|
||||
@@ -96,7 +116,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
if output.Name != "" {
|
||||
normalized.Outputs[j].Name = capitalise(output.Name)
|
||||
}
|
||||
if _, exist := structs[output.Type.String()]; output.Type.T == abi.TupleTy && !exist {
|
||||
if hasStruct(output.Type) {
|
||||
bindStructType[lang](output.Type, structs)
|
||||
}
|
||||
}
|
||||
@@ -114,19 +134,23 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
}
|
||||
// Normalize the event for capital cases and non-anonymous outputs
|
||||
normalized := original
|
||||
normalized.Name = methodNormalizer[lang](original.Name)
|
||||
|
||||
// Ensure there is no duplicated identifier
|
||||
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
|
||||
if eventIdentifiers[normalizedName] {
|
||||
return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName)
|
||||
}
|
||||
eventIdentifiers[normalizedName] = true
|
||||
normalized.Name = normalizedName
|
||||
|
||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||
copy(normalized.Inputs, original.Inputs)
|
||||
for j, input := range normalized.Inputs {
|
||||
// Indexed fields are input, non-indexed ones are outputs
|
||||
if input.Indexed {
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if _, exist := structs[input.Type.String()]; input.Type.T == abi.TupleTy && !exist {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if hasStruct(input.Type) {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
}
|
||||
// Append the event to the accumulator list
|
||||
@@ -147,7 +171,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
Transacts: transacts,
|
||||
Events: events,
|
||||
Libraries: make(map[string]string),
|
||||
Structs: structs,
|
||||
}
|
||||
// Function 4-byte signatures are stored in the same sequence
|
||||
// as types, if available.
|
||||
@@ -179,6 +202,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
Package: pkg,
|
||||
Contracts: contracts,
|
||||
Libraries: libs,
|
||||
Structs: structs,
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
@@ -244,7 +268,7 @@ func bindBasicTypeGo(kind abi.Type) string {
|
||||
func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
return structs[kind.String()].Name
|
||||
return structs[kind.TupleRawName+kind.String()].Name
|
||||
case abi.ArrayTy:
|
||||
return fmt.Sprintf("[%d]", kind.Size) + bindTypeGo(*kind.Elem, structs)
|
||||
case abi.SliceTy:
|
||||
@@ -321,7 +345,7 @@ func pluralizeJavaType(typ string) string {
|
||||
func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
return structs[kind.String()].Name
|
||||
return structs[kind.TupleRawName+kind.String()].Name
|
||||
case abi.ArrayTy, abi.SliceTy:
|
||||
return pluralizeJavaType(bindTypeJava(*kind.Elem, structs))
|
||||
default:
|
||||
@@ -340,6 +364,13 @@ var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct)
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
bound := bindTypeGo(kind, structs)
|
||||
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
if bound == "string" || bound == "[]byte" {
|
||||
bound = "common.Hash"
|
||||
}
|
||||
@@ -350,6 +381,13 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
bound := bindTypeJava(kind, structs)
|
||||
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
if bound == "String" || bound == "byte[]" {
|
||||
bound = "Hash"
|
||||
}
|
||||
@@ -369,7 +407,14 @@ var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct
|
||||
func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
if s, exist := structs[kind.String()]; exist {
|
||||
// We compose raw struct name and canonical parameter expression
|
||||
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||
// is empty, so we use canonical parameter expression to distinguish
|
||||
// different struct definition. From the consideration of backward
|
||||
// compatibility, we concat these two together so that if kind.TupleRawName
|
||||
// is not empty, it can have unique id.
|
||||
id := kind.TupleRawName + kind.String()
|
||||
if s, exist := structs[id]; exist {
|
||||
return s.Name
|
||||
}
|
||||
var fields []*tmplField
|
||||
@@ -377,8 +422,11 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
field := bindStructTypeGo(*elem, structs)
|
||||
fields = append(fields, &tmplField{Type: field, Name: capitalise(kind.TupleRawNames[i]), SolKind: *elem})
|
||||
}
|
||||
name := fmt.Sprintf("Struct%d", len(structs))
|
||||
structs[kind.String()] = &tmplStruct{
|
||||
name := kind.TupleRawName
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Struct%d", len(structs))
|
||||
}
|
||||
structs[id] = &tmplStruct{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
}
|
||||
@@ -398,7 +446,14 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
if s, exist := structs[kind.String()]; exist {
|
||||
// We compose raw struct name and canonical parameter expression
|
||||
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||
// is empty, so we use canonical parameter expression to distinguish
|
||||
// different struct definition. From the consideration of backward
|
||||
// compatibility, we concat these two together so that if kind.TupleRawName
|
||||
// is not empty, it can have unique id.
|
||||
id := kind.TupleRawName + kind.String()
|
||||
if s, exist := structs[id]; exist {
|
||||
return s.Name
|
||||
}
|
||||
var fields []*tmplField
|
||||
@@ -406,8 +461,11 @@ func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
field := bindStructTypeJava(*elem, structs)
|
||||
fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem})
|
||||
}
|
||||
name := fmt.Sprintf("Class%d", len(structs))
|
||||
structs[kind.String()] = &tmplStruct{
|
||||
name := kind.TupleRawName
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Class%d", len(structs))
|
||||
}
|
||||
structs[id] = &tmplStruct{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
}
|
||||
@@ -452,6 +510,15 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
|
||||
}
|
||||
}
|
||||
|
||||
// alias returns an alias of the given string based on the aliasing rules
|
||||
// or returns itself if no rule is matched.
|
||||
func alias(aliases map[string]string, n string) string {
|
||||
if alias, exist := aliases[n]; exist {
|
||||
return alias
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// methodNormalizer is a name transformer that modifies Solidity method names to
|
||||
// conform to target language naming concentions.
|
||||
var methodNormalizer = map[Lang]func(string) string{
|
||||
@@ -497,6 +564,21 @@ func structured(args abi.Arguments) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// hasStruct returns an indicator whether the given type is struct, struct slice
|
||||
// or struct array.
|
||||
func hasStruct(t abi.Type) bool {
|
||||
switch t.T {
|
||||
case abi.SliceTy:
|
||||
return hasStruct(*t.Elem)
|
||||
case abi.ArrayTy:
|
||||
return hasStruct(*t.Elem)
|
||||
case abi.TupleTy:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// resolveArgName converts a raw argument representation into a user friendly format.
|
||||
func resolveArgName(arg abi.Argument, structs map[string]*tmplStruct) string {
|
||||
var (
|
||||
@@ -512,7 +594,7 @@ loop:
|
||||
case abi.ArrayTy:
|
||||
prefix += fmt.Sprintf("[%d]", typ.Size)
|
||||
default:
|
||||
embedded = typ.String()
|
||||
embedded = typ.TupleRawName + typ.String()
|
||||
break loop
|
||||
}
|
||||
typ = typ.Elem
|
||||
|
File diff suppressed because one or more lines are too long
@@ -23,6 +23,7 @@ type tmplData struct {
|
||||
Package string // Name of the package to place the generated file in
|
||||
Contracts map[string]*tmplContract // List of contracts to generate into this file
|
||||
Libraries map[string]string // Map the bytecode's link pattern to the library name
|
||||
Structs map[string]*tmplStruct // Contract struct type definitions
|
||||
}
|
||||
|
||||
// tmplContract contains the data needed to generate an individual contract binding.
|
||||
@@ -36,8 +37,7 @@ type tmplContract struct {
|
||||
Transacts map[string]*tmplMethod // Contract calls that write state data
|
||||
Events map[string]*tmplEvent // Contract events accessors
|
||||
Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs
|
||||
Structs map[string]*tmplStruct // Contract struct type definitions
|
||||
Library bool
|
||||
Library bool // Indicator whether the contract is a library
|
||||
}
|
||||
|
||||
// tmplMethod is a wrapper around an abi.Method that contains a few preprocessed
|
||||
@@ -65,7 +65,7 @@ type tmplField struct {
|
||||
// tmplStruct is a wrapper around an abi.tuple contains a auto-generated
|
||||
// struct name.
|
||||
type tmplStruct struct {
|
||||
Name string // Auto-generated struct name(We can't obtain the raw struct name through abi)
|
||||
Name string // Auto-generated struct name(before solidity v0.5.11) or raw name.
|
||||
Fields []*tmplField // Struct fields definition depends on the binding language.
|
||||
}
|
||||
|
||||
@@ -108,8 +108,16 @@ var (
|
||||
_ = event.NewSubscription
|
||||
)
|
||||
|
||||
{{$structs := .Structs}}
|
||||
{{range $structs}}
|
||||
// {{.Name}} is an auto generated low-level Go binding around an user-defined struct.
|
||||
type {{.Name}} struct {
|
||||
{{range $field := .Fields}}
|
||||
{{$field.Name}} {{$field.Type}}{{end}}
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{range $contract := .Contracts}}
|
||||
{{$structs := $contract.Structs}}
|
||||
// {{.Type}}ABI is the input ABI used to generate the binding from.
|
||||
const {{.Type}}ABI = "{{.InputABI}}"
|
||||
|
||||
@@ -285,14 +293,6 @@ var (
|
||||
return _{{$contract.Type}}.Contract.contract.Transact(opts, method, params...)
|
||||
}
|
||||
|
||||
{{range .Structs}}
|
||||
// {{.Name}} is an auto generated low-level Go binding around an user-defined struct.
|
||||
type {{.Name}} struct {
|
||||
{{range $field := .Fields}}
|
||||
{{$field.Name}} {{$field.Type}}{{end}}
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{range .Calls}}
|
||||
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||
//
|
||||
@@ -483,7 +483,7 @@ var (
|
||||
|
||||
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
||||
//
|
||||
// Solidity: {{.Original.String}}
|
||||
// Solidity: {{formatevent .Original $structs}}
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
|
||||
event := new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
|
||||
@@ -507,8 +507,8 @@ package {{.Package}};
|
||||
import org.ethereum.geth.*;
|
||||
import java.util.*;
|
||||
|
||||
{{$structs := .Structs}}
|
||||
{{range $contract := .Contracts}}
|
||||
{{$structs := $contract.Structs}}
|
||||
{{if not .Library}}public {{end}}class {{.Type}} {
|
||||
// ABI is the input ABI used to generate the binding from.
|
||||
public final static String ABI = "{{.InputABI}}";
|
||||
|
@@ -80,15 +80,19 @@ func makeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
||||
copy(topic[:], hash[:])
|
||||
|
||||
default:
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
|
||||
// Attempt to generate the topic from funky types
|
||||
val := reflect.ValueOf(rule)
|
||||
|
||||
switch {
|
||||
|
||||
// static byte array
|
||||
case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8:
|
||||
reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported indexed type: %T", rule)
|
||||
}
|
||||
@@ -162,6 +166,7 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er
|
||||
|
||||
default:
|
||||
// Ran out of plain primitive types, try custom types
|
||||
|
||||
switch field.Type() {
|
||||
case reflectHash: // Also covers all dynamic types
|
||||
field.Set(reflect.ValueOf(topics[0]))
|
||||
@@ -173,16 +178,21 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er
|
||||
|
||||
case reflectBigInt:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
if arg.Type.T == abi.IntTy {
|
||||
if num.Cmp(abi.MaxInt256) > 0 {
|
||||
num.Add(abi.MaxUint256, big.NewInt(0).Neg(num))
|
||||
num.Add(num, big.NewInt(1))
|
||||
num.Neg(num)
|
||||
}
|
||||
}
|
||||
field.Set(reflect.ValueOf(num))
|
||||
|
||||
default:
|
||||
// Ran out of custom types, try the crazies
|
||||
switch {
|
||||
|
||||
// static byte array
|
||||
case arg.Type.T == abi.FixedBytesTy:
|
||||
reflect.Copy(field, reflect.ValueOf(topics[0][:arg.Type.Size]))
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported indexed type: %v", arg.Type)
|
||||
}
|
||||
@@ -209,8 +219,7 @@ func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics
|
||||
case abi.BoolTy:
|
||||
out[arg.Name] = topics[0][common.HashLength-1] == 1
|
||||
case abi.IntTy, abi.UintTy:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
out[arg.Name] = num
|
||||
out[arg.Name] = abi.ReadInteger(arg.Type.T, arg.Type.Kind, topics[0].Bytes())
|
||||
case abi.AddressTy:
|
||||
var addr common.Address
|
||||
copy(addr[:], topics[0][common.HashLength-common.AddressLength:])
|
||||
@@ -218,7 +227,11 @@ func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics
|
||||
case abi.HashTy:
|
||||
out[arg.Name] = topics[0]
|
||||
case abi.FixedBytesTy:
|
||||
out[arg.Name] = topics[0][:]
|
||||
array, err := abi.ReadFixedBytes(arg.Type, topics[0].Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out[arg.Name] = array
|
||||
case abi.StringTy, abi.BytesTy, abi.SliceTy, abi.ArrayTy:
|
||||
// Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash
|
||||
// whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package bind
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@@ -55,27 +56,44 @@ func TestMakeTopics(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTopics(t *testing.T) {
|
||||
type bytesStruct struct {
|
||||
StaticBytes [5]byte
|
||||
}
|
||||
bytesType, _ := abi.NewType("bytes5", nil)
|
||||
type args struct {
|
||||
createObj func() interface{}
|
||||
resultObj func() interface{}
|
||||
fields abi.Arguments
|
||||
topics []common.Hash
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
type args struct {
|
||||
createObj func() interface{}
|
||||
resultObj func() interface{}
|
||||
resultMap func() map[string]interface{}
|
||||
fields abi.Arguments
|
||||
topics []common.Hash
|
||||
}
|
||||
|
||||
type bytesStruct struct {
|
||||
StaticBytes [5]byte
|
||||
}
|
||||
type int8Struct struct {
|
||||
Int8Value int8
|
||||
}
|
||||
type int256Struct struct {
|
||||
Int256Value *big.Int
|
||||
}
|
||||
|
||||
type topicTest struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}
|
||||
|
||||
func setupTopicsTests() []topicTest {
|
||||
bytesType, _ := abi.NewType("bytes5", "", nil)
|
||||
int8Type, _ := abi.NewType("int8", "", nil)
|
||||
int256Type, _ := abi.NewType("int256", "", nil)
|
||||
|
||||
tests := []topicTest{
|
||||
{
|
||||
name: "support fixed byte types, right padded to 32 bytes",
|
||||
args: args{
|
||||
createObj: func() interface{} { return &bytesStruct{} },
|
||||
resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} },
|
||||
resultMap: func() map[string]interface{} {
|
||||
return map[string]interface{}{"staticBytes": [5]byte{1, 2, 3, 4, 5}}
|
||||
},
|
||||
fields: abi.Arguments{abi.Argument{
|
||||
Name: "staticBytes",
|
||||
Type: bytesType,
|
||||
@@ -87,7 +105,54 @@ func TestParseTopics(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "int8 with negative value",
|
||||
args: args{
|
||||
createObj: func() interface{} { return &int8Struct{} },
|
||||
resultObj: func() interface{} { return &int8Struct{Int8Value: -1} },
|
||||
resultMap: func() map[string]interface{} {
|
||||
return map[string]interface{}{"int8Value": int8(-1)}
|
||||
},
|
||||
fields: abi.Arguments{abi.Argument{
|
||||
Name: "int8Value",
|
||||
Type: int8Type,
|
||||
Indexed: true,
|
||||
}},
|
||||
topics: []common.Hash{
|
||||
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "int256 with negative value",
|
||||
args: args{
|
||||
createObj: func() interface{} { return &int256Struct{} },
|
||||
resultObj: func() interface{} { return &int256Struct{Int256Value: big.NewInt(-1)} },
|
||||
resultMap: func() map[string]interface{} {
|
||||
return map[string]interface{}{"int256Value": big.NewInt(-1)}
|
||||
},
|
||||
fields: abi.Arguments{abi.Argument{
|
||||
Name: "int256Value",
|
||||
Type: int256Type,
|
||||
Indexed: true,
|
||||
}},
|
||||
topics: []common.Hash{
|
||||
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
return tests
|
||||
}
|
||||
|
||||
func TestParseTopics(t *testing.T) {
|
||||
tests := setupTopicsTests()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
createObj := tt.args.createObj()
|
||||
@@ -101,3 +166,20 @@ func TestParseTopics(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTopicsIntoMap(t *testing.T) {
|
||||
tests := setupTopicsTests()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
outMap := make(map[string]interface{})
|
||||
if err := parseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
resultMap := tt.args.resultMap()
|
||||
if !reflect.DeepEqual(outMap, resultMap) {
|
||||
t.Errorf("parseTopicsIntoMap() = %v, want %v", outMap, resultMap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -173,7 +173,7 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
type EventTransferWithTag struct {
|
||||
// this is valid because `value` is not exportable,
|
||||
// so value is only unmarshalled into `Value1`.
|
||||
value *big.Int
|
||||
value *big.Int //lint:ignore U1000 unused field is part of test
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
@@ -354,40 +354,6 @@ func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, ass
|
||||
return a.Unpack(dest, "e", data)
|
||||
}
|
||||
|
||||
/*
|
||||
Taken from
|
||||
https://github.com/ethereum/go-ethereum/pull/15568
|
||||
*/
|
||||
|
||||
type testResult struct {
|
||||
Values [2]*big.Int
|
||||
Value1 *big.Int
|
||||
Value2 *big.Int
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
definition string
|
||||
want testResult
|
||||
}
|
||||
|
||||
func (tc testCase) encoded(intType, arrayType Type) []byte {
|
||||
var b bytes.Buffer
|
||||
if tc.want.Value1 != nil {
|
||||
val, _ := intType.pack(reflect.ValueOf(tc.want.Value1))
|
||||
b.Write(val)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want.Values, [2]*big.Int{nil, nil}) {
|
||||
val, _ := arrayType.pack(reflect.ValueOf(tc.want.Values))
|
||||
b.Write(val)
|
||||
}
|
||||
if tc.want.Value2 != nil {
|
||||
val, _ := intType.pack(reflect.ValueOf(tc.want.Value2))
|
||||
b.Write(val)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
||||
func TestEventUnpackIndexed(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
|
@@ -73,7 +73,7 @@ func packNum(value reflect.Value) []byte {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
return U256(new(big.Int).Set(value.Interface().(*big.Int)))
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
|
@@ -613,7 +613,7 @@ func TestPack(t *testing.T) {
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
|
||||
},
|
||||
} {
|
||||
typ, err := NewType(test.typ, test.components)
|
||||
typ, err := NewType(test.typ, "", test.components)
|
||||
if err != nil {
|
||||
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
||||
}
|
||||
|
@@ -53,6 +53,7 @@ type Type struct {
|
||||
stringKind string // holds the unparsed string for deriving signatures
|
||||
|
||||
// Tuple relative fields
|
||||
TupleRawName string // Raw struct name defined in source code, may be empty.
|
||||
TupleElems []*Type // Type information of all tuple fields
|
||||
TupleRawNames []string // Raw field name of all tuple fields
|
||||
}
|
||||
@@ -63,7 +64,7 @@ var (
|
||||
)
|
||||
|
||||
// NewType creates a new reflection type of abi type given in t.
|
||||
func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
// check that array brackets are equal if they exist
|
||||
if strings.Count(t, "[") != strings.Count(t, "]") {
|
||||
return Type{}, fmt.Errorf("invalid arg type in abi")
|
||||
@@ -73,9 +74,14 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
// if there are brackets, get ready to go into slice/array mode and
|
||||
// recursively create the type
|
||||
if strings.Count(t, "[") != 0 {
|
||||
i := strings.LastIndex(t, "[")
|
||||
// Note internalType can be empty here.
|
||||
subInternal := internalType
|
||||
if i := strings.LastIndex(internalType, "["); i != -1 {
|
||||
subInternal = subInternal[:i]
|
||||
}
|
||||
// recursively embed the type
|
||||
embeddedType, err := NewType(t[:i], components)
|
||||
i := strings.LastIndex(t, "[")
|
||||
embeddedType, err := NewType(t[:i], subInternal, components)
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
@@ -173,7 +179,7 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
)
|
||||
expression += "("
|
||||
for idx, c := range components {
|
||||
cType, err := NewType(c.Type, c.Components)
|
||||
cType, err := NewType(c.Type, c.InternalType, c.Components)
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
@@ -199,6 +205,17 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
typ.TupleRawNames = names
|
||||
typ.T = TupleTy
|
||||
typ.stringKind = expression
|
||||
|
||||
const structPrefix = "struct "
|
||||
// After solidity 0.5.10, a new field of abi "internalType"
|
||||
// is introduced. From that we can obtain the struct name
|
||||
// user defined in the source code.
|
||||
if internalType != "" && strings.HasPrefix(internalType, structPrefix) {
|
||||
// Foo.Bar type definition is not allowed in golang,
|
||||
// convert the format to FooBar
|
||||
typ.TupleRawName = strings.Replace(internalType[len(structPrefix):], ".", "", -1)
|
||||
}
|
||||
|
||||
case "function":
|
||||
typ.Kind = reflect.Array
|
||||
typ.T = FunctionTy
|
||||
|
@@ -106,7 +106,7 @@ func TestTypeRegexp(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
typ, err := NewType(tt.blob, tt.components)
|
||||
typ, err := NewType(tt.blob, "", tt.components)
|
||||
if err != nil {
|
||||
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
|
||||
}
|
||||
@@ -281,7 +281,7 @@ func TestTypeCheck(t *testing.T) {
|
||||
B *big.Int
|
||||
}{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ, test.components)
|
||||
typ, err := NewType(test.typ, "", test.components)
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
} else if err != nil && len(test.err) != 0 {
|
||||
|
@@ -26,16 +26,18 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
maxUint256 = big.NewInt(0).Add(
|
||||
// MaxUint256 is the maximum value that can be represented by a uint256
|
||||
MaxUint256 = big.NewInt(0).Add(
|
||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil),
|
||||
big.NewInt(-1))
|
||||
maxInt256 = big.NewInt(0).Add(
|
||||
// MaxInt256 is the maximum value that can be represented by a int256
|
||||
MaxInt256 = big.NewInt(0).Add(
|
||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil),
|
||||
big.NewInt(-1))
|
||||
)
|
||||
|
||||
// reads the integer based on its kind
|
||||
func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
||||
// ReadInteger reads the integer based on its kind and returns the appropriate value
|
||||
func ReadInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return b[len(b)-1]
|
||||
@@ -62,8 +64,8 @@ func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
||||
return ret
|
||||
}
|
||||
|
||||
if ret.Cmp(maxInt256) > 0 {
|
||||
ret.Add(maxUint256, big.NewInt(0).Neg(ret))
|
||||
if ret.Cmp(MaxInt256) > 0 {
|
||||
ret.Add(MaxUint256, big.NewInt(0).Neg(ret))
|
||||
ret.Add(ret, big.NewInt(1))
|
||||
ret.Neg(ret)
|
||||
}
|
||||
@@ -102,8 +104,8 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// through reflection, creates a fixed array to be read from
|
||||
func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
// ReadFixedBytes uses reflection to create a fixed array to be read from
|
||||
func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
if t.T != FixedBytesTy {
|
||||
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
|
||||
}
|
||||
@@ -230,7 +232,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
case StringTy: // variable arrays are written at the end of the return bytes
|
||||
return string(output[begin : begin+length]), nil
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.T, t.Kind, returnOutput), nil
|
||||
return ReadInteger(t.T, t.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return readBool(returnOutput)
|
||||
case AddressTy:
|
||||
@@ -240,7 +242,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
case BytesTy:
|
||||
return output[begin : begin+length], nil
|
||||
case FixedBytesTy:
|
||||
return readFixedBytes(t, returnOutput)
|
||||
return ReadFixedBytes(t, returnOutput)
|
||||
case FunctionTy:
|
||||
return readFunctionType(t, returnOutput)
|
||||
default:
|
||||
|
@@ -51,6 +51,7 @@ func (test unpackTest) checkError(err error) error {
|
||||
}
|
||||
|
||||
var unpackTests = []unpackTest{
|
||||
// Bools
|
||||
{
|
||||
def: `[{ "type": "bool" }]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
@@ -73,6 +74,7 @@ var unpackTests = []unpackTest{
|
||||
want: false,
|
||||
err: "abi: improperly encoded boolean value",
|
||||
},
|
||||
// Integers
|
||||
{
|
||||
def: `[{"type": "uint32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
@@ -122,11 +124,13 @@ var unpackTests = []unpackTest{
|
||||
enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
want: big.NewInt(-1),
|
||||
},
|
||||
// Address
|
||||
{
|
||||
def: `[{"type": "address"}]`,
|
||||
enc: "0000000000000000000000000100000000000000000000000000000000000000",
|
||||
want: common.Address{1},
|
||||
},
|
||||
// Bytes
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
@@ -154,23 +158,39 @@ var unpackTests = []unpackTest{
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// Functions
|
||||
{
|
||||
def: `[{"type": "function"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [24]byte{1},
|
||||
},
|
||||
// slices
|
||||
// Slice and Array
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: []uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint256[]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: []*big.Int{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint8{1, 2},
|
||||
},
|
||||
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [][]uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@@ -186,11 +206,21 @@ var unpackTests = []unpackTest{
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2][2]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [2][]uint8{{}, {}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: [2][]uint8{{1}, {1}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [][2]uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@@ -420,7 +450,7 @@ func TestUnpack(t *testing.T) {
|
||||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
t.Fatalf("invalid hex %s: %v", test.enc, err)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.want))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
|
@@ -123,6 +123,7 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
|
||||
"Please file a ticket at:\n\n" +
|
||||
"https://github.com/ethereum/go-ethereum/issues." +
|
||||
"The error was : %s"
|
||||
//lint:ignore ST1005 This is a message for the user
|
||||
return fmt.Errorf(msg, tmpName, err)
|
||||
}
|
||||
}
|
||||
@@ -237,7 +238,7 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
|
||||
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
||||
if cryptoJson.Cipher != "aes-128-ctr" {
|
||||
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
|
||||
return nil, fmt.Errorf("cipher not supported: %v", cryptoJson.Cipher)
|
||||
}
|
||||
mac, err := hex.DecodeString(cryptoJson.MAC)
|
||||
if err != nil {
|
||||
@@ -273,7 +274,7 @@ func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
||||
|
||||
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
|
||||
if keyProtected.Version != version {
|
||||
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
|
||||
return nil, nil, fmt.Errorf("version not supported: %v", keyProtected.Version)
|
||||
}
|
||||
keyId = uuid.Parse(keyProtected.Id)
|
||||
plainText, err := DecryptDataV3(keyProtected.Crypto, auth)
|
||||
@@ -335,13 +336,13 @@ func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
|
||||
c := ensureInt(cryptoJSON.KDFParams["c"])
|
||||
prf := cryptoJSON.KDFParams["prf"].(string)
|
||||
if prf != "hmac-sha256" {
|
||||
return nil, fmt.Errorf("Unsupported PBKDF2 PRF: %s", prf)
|
||||
return nil, fmt.Errorf("unsupported PBKDF2 PRF: %s", prf)
|
||||
}
|
||||
key := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unsupported KDF: %s", cryptoJSON.KDF)
|
||||
return nil, fmt.Errorf("unsupported KDF: %s", cryptoJSON.KDF)
|
||||
}
|
||||
|
||||
// TODO: can we do without this when unmarshalling dynamic JSON?
|
||||
|
@@ -141,6 +141,11 @@ func (am *Manager) Wallets() []Wallet {
|
||||
am.lock.RLock()
|
||||
defer am.lock.RUnlock()
|
||||
|
||||
return am.walletsNoLock()
|
||||
}
|
||||
|
||||
// walletsNoLock returns all registered wallets. Callers must hold am.lock.
|
||||
func (am *Manager) walletsNoLock() []Wallet {
|
||||
cpy := make([]Wallet, len(am.wallets))
|
||||
copy(cpy, am.wallets)
|
||||
return cpy
|
||||
@@ -155,7 +160,7 @@ func (am *Manager) Wallet(url string) (Wallet, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, wallet := range am.Wallets() {
|
||||
for _, wallet := range am.walletsNoLock() {
|
||||
if wallet.URL() == parsed {
|
||||
return wallet, nil
|
||||
}
|
||||
|
@@ -71,7 +71,7 @@ func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSes
|
||||
|
||||
cardPublic, ok := gen.Unmarshal(keyData)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Could not unmarshal public key from card")
|
||||
return nil, fmt.Errorf("could not unmarshal public key from card")
|
||||
}
|
||||
|
||||
secret, err := gen.GenerateSharedSecret(private, cardPublic)
|
||||
@@ -109,7 +109,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
|
||||
cardChallenge := response.Data[32:64]
|
||||
|
||||
if !bytes.Equal(expectedCryptogram, cardCryptogram) {
|
||||
return fmt.Errorf("Invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
|
||||
return fmt.Errorf("invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
|
||||
}
|
||||
|
||||
md.Reset()
|
||||
@@ -132,7 +132,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
|
||||
// Unpair disestablishes an existing pairing.
|
||||
func (s *SecureChannelSession) Unpair() error {
|
||||
if s.PairingKey == nil {
|
||||
return fmt.Errorf("Cannot unpair: not paired")
|
||||
return fmt.Errorf("cannot unpair: not paired")
|
||||
}
|
||||
|
||||
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
|
||||
@@ -148,7 +148,7 @@ func (s *SecureChannelSession) Unpair() error {
|
||||
// Open initializes the secure channel.
|
||||
func (s *SecureChannelSession) Open() error {
|
||||
if s.iv != nil {
|
||||
return fmt.Errorf("Session already opened")
|
||||
return fmt.Errorf("session already opened")
|
||||
}
|
||||
|
||||
response, err := s.open()
|
||||
@@ -185,11 +185,11 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
|
||||
return err
|
||||
}
|
||||
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
|
||||
return fmt.Errorf("Got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
||||
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
||||
}
|
||||
|
||||
if len(response.Data) != scSecretLength {
|
||||
return fmt.Errorf("Response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
|
||||
return fmt.Errorf("response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -222,7 +222,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
|
||||
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
|
||||
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
|
||||
if s.iv == nil {
|
||||
return nil, fmt.Errorf("Channel not open")
|
||||
return nil, fmt.Errorf("channel not open")
|
||||
}
|
||||
|
||||
data, err := s.encryptAPDU(data)
|
||||
@@ -261,14 +261,14 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
||||
return nil, err
|
||||
}
|
||||
if !bytes.Equal(s.iv, rmac) {
|
||||
return nil, fmt.Errorf("Invalid MAC in response")
|
||||
return nil, fmt.Errorf("invalid MAC in response")
|
||||
}
|
||||
|
||||
rapdu := &responseAPDU{}
|
||||
rapdu.deserialize(plainData)
|
||||
|
||||
if rapdu.Sw1 != sw1Ok {
|
||||
return nil, fmt.Errorf("Unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
||||
return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
||||
}
|
||||
|
||||
return rapdu, nil
|
||||
@@ -277,7 +277,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
||||
// encryptAPDU is an internal method that serializes and encrypts an APDU.
|
||||
func (s *SecureChannelSession) encryptAPDU(data []byte) ([]byte, error) {
|
||||
if len(data) > maxPayloadSize {
|
||||
return nil, fmt.Errorf("Payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
|
||||
return nil, fmt.Errorf("payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
|
||||
}
|
||||
data = pad(data, 0x80)
|
||||
|
||||
@@ -323,10 +323,10 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
|
||||
case terminator:
|
||||
return data[:len(data)-i], nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Expected end of padding, got %d", data[len(data)-i])
|
||||
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Expected end of padding, got 0")
|
||||
return nil, fmt.Errorf("expected end of padding, got 0")
|
||||
}
|
||||
|
||||
// updateIV is an internal method that updates the initialization vector after
|
||||
|
@@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
|
||||
}
|
||||
|
||||
if response.Sw1 != sw1Ok {
|
||||
return nil, fmt.Errorf("Unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
||||
return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -252,7 +252,7 @@ func (w *Wallet) release() error {
|
||||
// with the wallet.
|
||||
func (w *Wallet) pair(puk []byte) error {
|
||||
if w.session.paired() {
|
||||
return fmt.Errorf("Wallet already paired")
|
||||
return fmt.Errorf("wallet already paired")
|
||||
}
|
||||
pairing, err := w.session.pair(puk)
|
||||
if err != nil {
|
||||
@@ -773,12 +773,12 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
|
||||
|
||||
// Look for the path in the URL
|
||||
if account.URL.Scheme != w.Hub.scheme {
|
||||
return nil, fmt.Errorf("Scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(account.URL.Path, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("Invalid URL format: %s", account.URL)
|
||||
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
||||
}
|
||||
|
||||
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
||||
@@ -813,7 +813,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
|
||||
// unpair deletes an existing pairing.
|
||||
func (s *Session) unpair() error {
|
||||
if !s.verified {
|
||||
return fmt.Errorf("Unpair requires that the PIN be verified")
|
||||
return fmt.Errorf("unpair requires that the PIN be verified")
|
||||
}
|
||||
return s.Channel.Unpair()
|
||||
}
|
||||
@@ -850,7 +850,7 @@ func (s *Session) paired() bool {
|
||||
// authenticate uses an existing pairing to establish a secure channel.
|
||||
func (s *Session) authenticate(pairing smartcardPairing) error {
|
||||
if !bytes.Equal(s.Wallet.PublicKey, pairing.PublicKey) {
|
||||
return fmt.Errorf("Cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
|
||||
return fmt.Errorf("cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
|
||||
}
|
||||
s.Channel.PairingKey = pairing.PairingKey
|
||||
s.Channel.PairingIndex = pairing.PairingIndex
|
||||
@@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
|
||||
}
|
||||
|
||||
// derivationPath fetches the wallet's current derivation path from the card.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
||||
if err != nil {
|
||||
@@ -993,12 +994,14 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
|
||||
}
|
||||
|
||||
// keyExport contains information on an exported keypair.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
type keyExport struct {
|
||||
PublicKey []byte `asn1:"tag:0"`
|
||||
PrivateKey []byte `asn1:"tag:1,optional"`
|
||||
}
|
||||
|
||||
// publicKey returns the public key for the current derivation path.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
func (s *Session) publicKey() ([]byte, error) {
|
||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
||||
if err != nil {
|
||||
|
@@ -162,7 +162,8 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 {
|
||||
//lint:ignore ST1005 brand name displayed on the console
|
||||
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.13.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.8.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.13.8.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
19
build/checksums.txt
Normal file
19
build/checksums.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
# This file contains sha256 checksums of optional build dependencies.
|
||||
|
||||
b13bf04633d4d8cf53226ebeaace8d4d2fd07ae6fa676d0844a688339debec34 go1.13.8.src.tar.gz
|
||||
|
||||
478994633b0f5121a7a8d4f368078093e21014fdc7fb2c0ceeae63668c13c5b6 golangci-lint-1.22.2-freebsd-amd64.tar.gz
|
||||
fcf80824c21567eb0871055711bf9bdca91cf9a081122e2a45f1d11fed754600 golangci-lint-1.22.2-darwin-amd64.tar.gz
|
||||
cda85c72fc128b2ea0ae05baea7b91172c63aea34064829f65285f1dd536f1e0 golangci-lint-1.22.2-windows-386.zip
|
||||
94f04899f620aadc9c1524e5482e415efdbd993fa2b2918c4fec2798f030ac1c golangci-lint-1.22.2-linux-armv7.tar.gz
|
||||
0e72a87d71edde00b6e37e84a99841833ad55fee83e20d21130a7a622b2860bb golangci-lint-1.22.2-freebsd-386.tar.gz
|
||||
86def2f31fe8fd7c05674104ed2a4bef3e44b7132b93c6ad2f52f198b3d01801 golangci-lint-1.22.2-linux-s390x.tar.gz
|
||||
b0df4546d36be94e8107733ba290b98dd9b7e41a42d3fb202e87fc7e4ee800c3 golangci-lint-1.22.2-freebsd-armv6.tar.gz
|
||||
3d45958dcf6a8d195086d2fced1a21db42a90815dfd156d180efa62dbdda6724 golangci-lint-1.22.2-darwin-386.tar.gz
|
||||
7ee29f35c74fab017a454237990c74d984ce3855960f2c10509238992bb781f9 golangci-lint-1.22.2-linux-arm64.tar.gz
|
||||
52086ac52a502b68578e58e35d3964f127c16d7a90b9ffcb399a004d055ded51 golangci-lint-1.22.2-linux-386.tar.gz
|
||||
c2e4df1fab2ae53762f9baac6041503eeeaa968ce38ea41779f7cb526751c667 golangci-lint-1.22.2-windows-amd64.zip
|
||||
109d38cdc89f271392f5a138d6782657157f9f496fd4801956efa2d0428e0cbe golangci-lint-1.22.2-linux-amd64.tar.gz
|
||||
f08aae4868d4828c8f07deb0dcd941a1da695b97e58d15e9f3d1d07dcc7a0c84 golangci-lint-1.22.2-linux-armv6.tar.gz
|
||||
37af03d9c144d527cb15c46a07e6a22d3f62b5491e34ad6f3bfe6bb0b0b597d4 golangci-lint-1.22.2-linux-ppc64le.tar.gz
|
||||
251a1081d53944f1d5f86216d752837b23079f90605c9d1cc628da1ffcd2e749 golangci-lint-1.22.2-freebsd-armv7.tar.gz
|
@@ -22,19 +22,18 @@ variables `PPA_SIGNING_KEY` and `PPA_SSH_KEY` on Travis.
|
||||
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||
golang-1.11, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
we bundle the entire Go sources into our own source archive and start the built job by
|
||||
compiling Go and then using that to build go-ethereum. On Trusty we have a special case
|
||||
requiring the `~gophers/ubuntu/archive` PPA since Trusty can't even build Go itself. PPA
|
||||
deps are set at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
|
||||
## Building Packages Locally (for testing)
|
||||
|
||||
You need to run Ubuntu to do test packaging.
|
||||
|
||||
Add the gophers PPA and install Go 1.11 and Debian packaging tools:
|
||||
Install any version of Go and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.11 devscripts debhelper python-bzrlib python-paramiko
|
||||
$ sudo apt-get install build-essential golang-go devscripts debhelper python-bzrlib python-paramiko
|
||||
|
||||
Create the source packages:
|
||||
|
||||
@@ -42,10 +41,10 @@ Create the source packages:
|
||||
|
||||
Then go into the source package directory for your running distribution and build the package:
|
||||
|
||||
$ cd dist/ethereum-unstable-1.6.0+xenial
|
||||
$ cd dist/ethereum-unstable-1.9.6+bionic
|
||||
$ dpkg-buildpackage
|
||||
|
||||
Built packages are placed in the dist/ directory.
|
||||
|
||||
$ cd ..
|
||||
$ dpkg-deb -c geth-unstable_1.6.0+xenial_amd64.deb
|
||||
$ dpkg-deb -c geth-unstable_1.9.6+bionic_amd64.deb
|
||||
|
189
build/ci.go
189
build/ci.go
@@ -58,6 +58,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cespare/cp"
|
||||
"github.com/ethereum/go-ethereum/internal/build"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
@@ -138,7 +139,19 @@ var (
|
||||
// Note: zesty is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: artful is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: cosmic is unsupported because it was officially deprecated on Launchpad.
|
||||
debDistros = []string{"trusty", "xenial", "bionic", "disco", "eoan"}
|
||||
debDistroGoBoots = map[string]string{
|
||||
"trusty": "golang-1.11",
|
||||
"xenial": "golang-go",
|
||||
"bionic": "golang-go",
|
||||
"disco": "golang-go",
|
||||
"eoan": "golang-go",
|
||||
"focal": "golang-go",
|
||||
}
|
||||
|
||||
debGoBootPaths = map[string]string{
|
||||
"golang-1.11": "/usr/lib/go-1.11",
|
||||
"golang-go": "/usr/lib/go",
|
||||
}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@@ -202,9 +215,9 @@ func doInstall(cmdline []string) {
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if minor < 9 {
|
||||
if minor < 11 {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.9 and cannot")
|
||||
log.Println("go-ethereum requires at least Go version 1.11 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -217,18 +230,15 @@ func doInstall(cmdline []string) {
|
||||
|
||||
if *arch == "" || *arch == runtime.GOARCH {
|
||||
goinstall := goTool("install", buildFlags(env)...)
|
||||
if runtime.GOARCH == "arm64" {
|
||||
goinstall.Args = append(goinstall.Args, "-p", "1")
|
||||
}
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
goinstall.Args = append(goinstall.Args, packages...)
|
||||
build.MustRun(goinstall)
|
||||
return
|
||||
}
|
||||
// If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any previous builds
|
||||
if *arch == "arm" {
|
||||
os.RemoveAll(filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_arm"))
|
||||
for _, path := range filepath.SplitList(build.GOPATH()) {
|
||||
os.RemoveAll(filepath.Join(path, "pkg", runtime.GOOS+"_arm"))
|
||||
}
|
||||
}
|
||||
|
||||
// Seems we are cross compiling, work around forbidden GOBIN
|
||||
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
@@ -278,7 +288,6 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
|
||||
|
||||
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
|
||||
cmd := build.GoTool(subcmd, args...)
|
||||
cmd.Env = []string{"GOPATH=" + build.GOPATH()}
|
||||
if arch == "" || arch == runtime.GOARCH {
|
||||
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
|
||||
} else {
|
||||
@@ -289,7 +298,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
|
||||
cmd.Env = append(cmd.Env, "CC="+cc)
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||
if strings.HasPrefix(e, "GOBIN=") {
|
||||
continue
|
||||
}
|
||||
cmd.Env = append(cmd.Env, e)
|
||||
@@ -303,6 +312,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
|
||||
|
||||
func doTest(cmdline []string) {
|
||||
coverage := flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
verbose := flag.Bool("v", false, "Whether to log verbosely")
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
env := build.Env()
|
||||
|
||||
@@ -319,44 +329,46 @@ func doTest(cmdline []string) {
|
||||
if *coverage {
|
||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||
}
|
||||
if *verbose {
|
||||
gotest.Args = append(gotest.Args, "-v")
|
||||
}
|
||||
|
||||
gotest.Args = append(gotest.Args, packages...)
|
||||
build.MustRun(gotest)
|
||||
}
|
||||
|
||||
// runs gometalinter on requested packages
|
||||
// doLint runs golangci-lint on requested packages.
|
||||
func doLint(cmdline []string) {
|
||||
var (
|
||||
cachedir = flag.String("cachedir", "./build/cache", "directory for caching golangci-lint binary.")
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
packages := []string{"./..."}
|
||||
if len(flag.CommandLine.Args()) > 0 {
|
||||
packages = flag.CommandLine.Args()
|
||||
}
|
||||
// Get metalinter and install all supported linters
|
||||
build.MustRun(goTool("get", "gopkg.in/alecthomas/gometalinter.v2"))
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), "--install")
|
||||
|
||||
// Run fast linters batched together
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--tests",
|
||||
"--deadline=2m",
|
||||
"--disable-all",
|
||||
"--enable=goimports",
|
||||
"--enable=varcheck",
|
||||
"--enable=vet",
|
||||
"--enable=gofmt",
|
||||
"--enable=misspell",
|
||||
"--enable=goconst",
|
||||
"--min-occurrences=6", // for goconst
|
||||
}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...)
|
||||
linter := downloadLinter(*cachedir)
|
||||
lflags := []string{"run", "--config", ".golangci.yml"}
|
||||
build.MustRunCommand(linter, append(lflags, packages...)...)
|
||||
fmt.Println("You have achieved perfection.")
|
||||
}
|
||||
|
||||
// Run slow linters one by one
|
||||
for _, linter := range []string{"unconvert", "gosimple"} {
|
||||
configs = []string{"--vendor", "--tests", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...)
|
||||
// downloadLinter downloads and unpacks golangci-lint.
|
||||
func downloadLinter(cachedir string) string {
|
||||
const version = "1.22.2"
|
||||
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
||||
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
|
||||
archivePath := filepath.Join(cachedir, base+".tar.gz")
|
||||
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := build.ExtractTarballArchive(archivePath, cachedir); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return filepath.Join(cachedir, base, "golangci-lint")
|
||||
}
|
||||
|
||||
// Release Packaging
|
||||
@@ -459,11 +471,13 @@ func maybeSkipArchive(env build.Environment) {
|
||||
// Debian Packaging
|
||||
func doDebianSource(cmdline []string) {
|
||||
var (
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
|
||||
sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
|
||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||
now = time.Now()
|
||||
goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`)
|
||||
cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`)
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
|
||||
sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
|
||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||
now = time.Now()
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
*workdir = makeWorkdir(*workdir)
|
||||
@@ -477,12 +491,39 @@ func doDebianSource(cmdline []string) {
|
||||
build.MustRun(gpg)
|
||||
}
|
||||
|
||||
// Create Debian packages and upload them
|
||||
// Download and verify the Go source package.
|
||||
gobundle := downloadGoSources(*goversion, *cachedir)
|
||||
|
||||
// Download all the dependencies needed to build the sources and run the ci script
|
||||
srcdepfetch := goTool("install", "-n", "./...")
|
||||
srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
|
||||
build.MustRun(srcdepfetch)
|
||||
|
||||
cidepfetch := goTool("run", "./build/ci.go")
|
||||
cidepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
|
||||
cidepfetch.Run() // Command fails, don't care, we only need the deps to start it
|
||||
|
||||
// Create Debian packages and upload them.
|
||||
for _, pkg := range debPackages {
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
for distro, goboot := range debDistroGoBoots {
|
||||
// Prepare the debian package with the go-ethereum sources.
|
||||
meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz")
|
||||
|
||||
// Add Go source code
|
||||
if err := build.ExtractTarballArchive(gobundle, pkgdir); err != nil {
|
||||
log.Fatalf("Failed to extract Go sources: %v", err)
|
||||
}
|
||||
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil {
|
||||
log.Fatalf("Failed to rename Go source folder: %v", err)
|
||||
}
|
||||
// Add all dependency modules in compressed form
|
||||
os.MkdirAll(filepath.Join(pkgdir, ".mod", "cache"), 0755)
|
||||
if err := cp.CopyAll(filepath.Join(pkgdir, ".mod", "cache", "download"), filepath.Join(*workdir, "modgopath", "pkg", "mod", "cache", "download")); err != nil {
|
||||
log.Fatalf("Failed to copy Go module dependencies: %v", err)
|
||||
}
|
||||
// Run the packaging and upload to the PPA
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz", "-nc")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
|
||||
@@ -502,6 +543,17 @@ func doDebianSource(cmdline []string) {
|
||||
}
|
||||
}
|
||||
|
||||
func downloadGoSources(version string, cachedir string) string {
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
file := fmt.Sprintf("go%s.src.tar.gz", version)
|
||||
url := "https://dl.google.com/go/" + file
|
||||
dst := filepath.Join(cachedir, file)
|
||||
if err := csdb.DownloadFile(url, dst); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func ppaUpload(workdir, ppa, sshUser string, files []string) {
|
||||
p := strings.Split(ppa, "/")
|
||||
if len(p) != 2 {
|
||||
@@ -561,7 +613,9 @@ type debPackage struct {
|
||||
}
|
||||
|
||||
type debMetadata struct {
|
||||
Env build.Environment
|
||||
Env build.Environment
|
||||
GoBootPackage string
|
||||
GoBootPath string
|
||||
|
||||
PackageName string
|
||||
|
||||
@@ -590,19 +644,21 @@ func (d debExecutable) Package() string {
|
||||
return d.BinaryName
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
if author == "" {
|
||||
// No signing key, use default author.
|
||||
author = "Ethereum Builds <fjl@ethereum.org>"
|
||||
}
|
||||
return debMetadata{
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: exes,
|
||||
GoBootPackage: goboot,
|
||||
GoBootPath: debGoBootPaths[goboot],
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: exes,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -667,7 +723,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
if err := os.Mkdir(pkgdir, 0755); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Copy the source code.
|
||||
build.MustRunCommand("git", "checkout-index", "-a", "--prefix", pkgdir+string(filepath.Separator))
|
||||
|
||||
@@ -685,7 +740,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe)
|
||||
}
|
||||
|
||||
return pkgdir
|
||||
}
|
||||
|
||||
@@ -733,9 +787,12 @@ func doWindowsInstaller(cmdline []string) {
|
||||
build.Render("build/nsis.uninstall.nsh", filepath.Join(*workdir, "uninstall.nsh"), 0644, allTools)
|
||||
build.Render("build/nsis.pathupdate.nsh", filepath.Join(*workdir, "PathUpdate.nsh"), 0644, nil)
|
||||
build.Render("build/nsis.envvarupdate.nsh", filepath.Join(*workdir, "EnvVarUpdate.nsh"), 0644, nil)
|
||||
build.CopyFile(filepath.Join(*workdir, "SimpleFC.dll"), "build/nsis.simplefc.dll", 0755)
|
||||
build.CopyFile(filepath.Join(*workdir, "COPYING"), "COPYING", 0755)
|
||||
|
||||
if err := cp.CopyFile(filepath.Join(*workdir, "SimpleFC.dll"), "build/nsis.simplefc.dll"); err != nil {
|
||||
log.Fatal("Failed to copy SimpleFC.dll: %v", err)
|
||||
}
|
||||
if err := cp.CopyFile(filepath.Join(*workdir, "COPYING"), "COPYING"); err != nil {
|
||||
log.Fatal("Failed to copy copyright note: %v", err)
|
||||
}
|
||||
// Build the installer. This assumes that all the needed files have been previously
|
||||
// built (don't mix building and packaging to keep cross compilation complexity to a
|
||||
// minimum).
|
||||
@@ -752,7 +809,6 @@ func doWindowsInstaller(cmdline []string) {
|
||||
"/DARCH="+*arch,
|
||||
filepath.Join(*workdir, "geth.nsi"),
|
||||
)
|
||||
|
||||
// Sign and publish installer.
|
||||
if err := archiveUpload(installer, *upload, *signer); err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -825,7 +881,6 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd {
|
||||
cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
|
||||
cmd.Args = append(cmd.Args, args...)
|
||||
cmd.Env = []string{
|
||||
"GOPATH=" + build.GOPATH(),
|
||||
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
@@ -1013,16 +1068,10 @@ func doXgo(cmdline []string) {
|
||||
|
||||
func xgoTool(args []string) *exec.Cmd {
|
||||
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
|
||||
cmd.Env = []string{
|
||||
"GOPATH=" + build.GOPATH(),
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, []string{
|
||||
"GOBIN=" + GOBIN,
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||
continue
|
||||
}
|
||||
cmd.Env = append(cmd.Env, e)
|
||||
}
|
||||
}...)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@@ -1,19 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Cleaning the Go cache only makes sense if we actually have Go installed... or
|
||||
# if Go is actually callable. This does not hold true during deb packaging, so
|
||||
# we need an explicit check to avoid build failures.
|
||||
if ! command -v go > /dev/null; then
|
||||
exit
|
||||
fi
|
||||
|
||||
version_gt() {
|
||||
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
|
||||
}
|
||||
|
||||
golang_version=$(go version |cut -d' ' -f3 |sed 's/go//')
|
||||
|
||||
# Clean go build cache when go version is greater than or equal to 1.10
|
||||
if !(version_gt 1.10 $golang_version); then
|
||||
go clean -cache
|
||||
fi
|
@@ -2,7 +2,7 @@ Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.11
|
||||
Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}}
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
|
@@ -4,11 +4,27 @@
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
# Launchpad rejects Go's access to $HOME/.cache, use custom folder
|
||||
# Launchpad rejects Go's access to $HOME, use custom folders
|
||||
export GOCACHE=/tmp/go-build
|
||||
export GOPATH=/tmp/gopath
|
||||
export GOROOT_BOOTSTRAP={{.GoBootPath}}
|
||||
|
||||
override_dh_auto_clean:
|
||||
# Don't try to be smart Launchpad, we know our build rules better than you
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.11/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
# We can't download a fresh Go within Launchpad, so we're shipping and building
|
||||
# one on the fly. However, we can't build it inside the go-ethereum folder as
|
||||
# bootstrapping clashes with go modules, so build in a sibling folder.
|
||||
(mv .go ../ && cd ../.go/src && ./make.bash)
|
||||
|
||||
# We can't download external go modules within Launchpad, so we're shipping the
|
||||
# entire dependency source cache with go-ethereum.
|
||||
mkdir -p $(GOPATH)/pkg
|
||||
mv .mod $(GOPATH)/pkg/mod
|
||||
|
||||
# A fresh Go was built, all dependency downloads faked, hope build works now
|
||||
../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
|
30
build/env.sh
30
build/env.sh
@@ -1,30 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -f "build/env.sh" ]; then
|
||||
echo "$0 must be run from the root of the repository."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Create fake Go workspace if it doesn't exist yet.
|
||||
workspace="$PWD/build/_workspace"
|
||||
root="$PWD"
|
||||
ethdir="$workspace/src/github.com/ethereum"
|
||||
if [ ! -L "$ethdir/go-ethereum" ]; then
|
||||
mkdir -p "$ethdir"
|
||||
cd "$ethdir"
|
||||
ln -s ../../../../../. go-ethereum
|
||||
cd "$root"
|
||||
fi
|
||||
|
||||
# Set up the environment to use the workspace.
|
||||
GOPATH="$workspace"
|
||||
export GOPATH
|
||||
|
||||
# Run the command inside the workspace.
|
||||
cd "$ethdir/go-ethereum"
|
||||
PWD="$ethdir/go-ethereum"
|
||||
|
||||
# Launch the arguments with the configured environment.
|
||||
exec "$@"
|
74
cmd/abidump/main.go
Normal file
74
cmd/abidump/main.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/signer/core"
|
||||
"github.com/ethereum/go-ethereum/signer/fourbyte"
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "<hexdata>")
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprintln(os.Stderr, `
|
||||
Parses the given ABI data and tries to interpret it from the fourbyte database.`)
|
||||
}
|
||||
}
|
||||
|
||||
func parse(data []byte) {
|
||||
db, err := fourbyte.New()
|
||||
if err != nil {
|
||||
die(err)
|
||||
}
|
||||
messages := core.ValidationMessages{}
|
||||
db.ValidateCallData(nil, data, &messages)
|
||||
for _, m := range messages.Messages {
|
||||
fmt.Printf("%v: %v\n", m.Typ, m.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// Example
|
||||
// ./abidump a9059cbb000000000000000000000000ea0e2dc7d65a50e77fc7e84bff3fd2a9e781ff5c0000000000000000000000000000000000000000000000015af1d78b58c40000
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
switch {
|
||||
case flag.NArg() == 1:
|
||||
hexdata := flag.Arg(0)
|
||||
data, err := hex.DecodeString(strings.TrimPrefix(hexdata, "0x"))
|
||||
if err != nil {
|
||||
die(err)
|
||||
}
|
||||
parse(data)
|
||||
default:
|
||||
fmt.Fprintln(os.Stderr, "Error: one argument needed")
|
||||
flag.Usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
|
||||
func die(args ...interface{}) {
|
||||
fmt.Fprintln(os.Stderr, args...)
|
||||
os.Exit(1)
|
||||
}
|
@@ -21,8 +21,11 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common/compiler"
|
||||
@@ -31,19 +34,6 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
var (
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
gitCommit = ""
|
||||
@@ -103,6 +93,10 @@ var (
|
||||
Usage: "Destination language for the bindings (go, java, objc)",
|
||||
Value: "go",
|
||||
}
|
||||
aliasFlag = cli.StringFlag{
|
||||
Name: "alias",
|
||||
Usage: "Comma separated aliases for function and event renaming, e.g. foo=bar",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -120,9 +114,10 @@ func init() {
|
||||
pkgFlag,
|
||||
outFlag,
|
||||
langFlag,
|
||||
aliasFlag,
|
||||
}
|
||||
app.Action = utils.MigrateFlags(abigen)
|
||||
cli.CommandHelpTemplate = commandHelperTemplate
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func abigen(c *cli.Context) error {
|
||||
@@ -144,11 +139,12 @@ func abigen(c *cli.Context) error {
|
||||
}
|
||||
// If the entire solidity code was specified, build and bind based on that
|
||||
var (
|
||||
abis []string
|
||||
bins []string
|
||||
types []string
|
||||
sigs []map[string]string
|
||||
libs = make(map[string]string)
|
||||
abis []string
|
||||
bins []string
|
||||
types []string
|
||||
sigs []map[string]string
|
||||
libs = make(map[string]string)
|
||||
aliases = make(map[string]string)
|
||||
)
|
||||
if c.GlobalString(abiFlag.Name) != "" {
|
||||
// Load up the ABI, optional bytecode and type name from the parameters
|
||||
@@ -199,10 +195,22 @@ func abigen(c *cli.Context) error {
|
||||
utils.Fatalf("Failed to build Solidity contract: %v", err)
|
||||
}
|
||||
case c.GlobalIsSet(vyFlag.Name):
|
||||
contracts, err = compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
|
||||
output, err := compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to build Vyper contract: %v", err)
|
||||
}
|
||||
contracts = make(map[string]*compiler.Contract)
|
||||
for n, contract := range output {
|
||||
name := n
|
||||
// Sanitize the combined json names to match the
|
||||
// format expected by solidity.
|
||||
if !strings.Contains(n, ":") {
|
||||
// Remove extra path components
|
||||
name = abi.ToCamelCase(strings.TrimSuffix(filepath.Base(name), ".vy"))
|
||||
}
|
||||
contracts[name] = contract
|
||||
}
|
||||
|
||||
case c.GlobalIsSet(jsonFlag.Name):
|
||||
jsonOutput, err := ioutil.ReadFile(c.GlobalString(jsonFlag.Name))
|
||||
if err != nil {
|
||||
@@ -232,8 +240,20 @@ func abigen(c *cli.Context) error {
|
||||
libs[libPattern] = nameParts[len(nameParts)-1]
|
||||
}
|
||||
}
|
||||
// Extract all aliases from the flags
|
||||
if c.GlobalIsSet(aliasFlag.Name) {
|
||||
// We support multi-versions for aliasing
|
||||
// e.g.
|
||||
// foo=bar,foo2=bar2
|
||||
// foo:bar,foo2:bar2
|
||||
re := regexp.MustCompile(`(?:(\w+)[:=](\w+))`)
|
||||
submatches := re.FindAllStringSubmatch(c.GlobalString(aliasFlag.Name), -1)
|
||||
for _, match := range submatches {
|
||||
aliases[match[1]] = match[2]
|
||||
}
|
||||
}
|
||||
// Generate the contract binding
|
||||
code, err := bind.Bind(types, abis, bins, sigs, c.GlobalString(pkgFlag.Name), lang, libs)
|
||||
code, err := bind.Bind(types, abis, bins, sigs, c.GlobalString(pkgFlag.Name), lang, libs, aliases)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate ABI binding: %v", err)
|
||||
}
|
||||
|
@@ -70,7 +70,9 @@ func main() {
|
||||
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
return
|
||||
if !*writeAddr {
|
||||
return
|
||||
}
|
||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
||||
|
@@ -28,19 +28,6 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
var (
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
gitCommit = ""
|
||||
@@ -61,7 +48,7 @@ func init() {
|
||||
oracleFlag,
|
||||
nodeURLFlag,
|
||||
}
|
||||
cli.CommandHelpTemplate = commandHelperTemplate
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
// Commonly used command line flags.
|
||||
|
@@ -223,6 +223,7 @@ func init() {
|
||||
}
|
||||
app.Action = signer
|
||||
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, delCredentialCommand, gendocCommand}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -387,7 +388,7 @@ func initialize(c *cli.Context) error {
|
||||
if c.GlobalBool(stdiouiFlag.Name) {
|
||||
logOutput = os.Stderr
|
||||
// If using the stdioui, we can't do the 'confirm'-flow
|
||||
fmt.Fprintf(logOutput, legalWarning)
|
||||
fmt.Fprint(logOutput, legalWarning)
|
||||
} else {
|
||||
if !confirm(legalWarning) {
|
||||
return fmt.Errorf("aborted by user")
|
||||
@@ -404,6 +405,27 @@ func initialize(c *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||
// account the set data folders as well as the designated platform we're currently
|
||||
// running on.
|
||||
func ipcEndpoint(ipcPath, datadir string) string {
|
||||
// On windows we can only use plain top-level pipes
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
|
||||
return ipcPath
|
||||
}
|
||||
return `\\.\pipe\` + ipcPath
|
||||
}
|
||||
// Resolve names into the data directory full paths otherwise
|
||||
if filepath.Base(ipcPath) == ipcPath {
|
||||
if datadir == "" {
|
||||
return filepath.Join(os.TempDir(), ipcPath)
|
||||
}
|
||||
return filepath.Join(datadir, ipcPath)
|
||||
}
|
||||
return ipcPath
|
||||
}
|
||||
|
||||
func signer(c *cli.Context) error {
|
||||
// If we have some unrecognized command, bail out
|
||||
if args := c.Args(); len(args) > 0 {
|
||||
@@ -532,12 +554,8 @@ func signer(c *cli.Context) error {
|
||||
}()
|
||||
}
|
||||
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
|
||||
if c.IsSet(utils.IPCPathFlag.Name) {
|
||||
ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
ipcapiURL = filepath.Join(configDir, "clef.ipc")
|
||||
}
|
||||
|
||||
givenPath := c.GlobalString(utils.IPCPathFlag.Name)
|
||||
ipcapiURL = ipcEndpoint(filepath.Join(givenPath, "clef.ipc"), configDir)
|
||||
listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start IPC api: %v", err)
|
||||
@@ -547,7 +565,6 @@ func signer(c *cli.Context) error {
|
||||
listener.Close()
|
||||
log.Info("IPC endpoint closed", "url", ipcapiURL)
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
if c.GlobalBool(testFlag.Name) {
|
||||
@@ -563,7 +580,7 @@ func signer(c *cli.Context) error {
|
||||
},
|
||||
})
|
||||
|
||||
abortChan := make(chan os.Signal)
|
||||
abortChan := make(chan os.Signal, 1)
|
||||
signal.Notify(abortChan, os.Interrupt)
|
||||
|
||||
sig := <-abortChan
|
||||
@@ -678,7 +695,7 @@ func checkFile(filename string) error {
|
||||
|
||||
// confirm displays a text and asks for user confirmation
|
||||
func confirm(text string) bool {
|
||||
fmt.Printf(text)
|
||||
fmt.Print(text)
|
||||
fmt.Printf("\nEnter 'ok' to proceed:\n> ")
|
||||
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
@@ -744,21 +761,19 @@ func testExternalUI(api *core.SignerAPI) {
|
||||
api.UI.ShowInfo("Please approve the next request for signing a clique header")
|
||||
time.Sleep(delay)
|
||||
cliqueHeader := types.Header{
|
||||
common.HexToHash("0000H45H"),
|
||||
common.HexToHash("0000H45H"),
|
||||
common.HexToAddress("0000H45H"),
|
||||
common.HexToHash("0000H00H"),
|
||||
common.HexToHash("0000H45H"),
|
||||
common.HexToHash("0000H45H"),
|
||||
types.Bloom{},
|
||||
big.NewInt(1337),
|
||||
big.NewInt(1337),
|
||||
1338,
|
||||
1338,
|
||||
1338,
|
||||
[]byte("Extra data Extra data Extra data Extra data Extra data Extra data Extra data Extra data"),
|
||||
common.HexToHash("0x0000H45H"),
|
||||
types.BlockNonce{},
|
||||
ParentHash: common.HexToHash("0000H45H"),
|
||||
UncleHash: common.HexToHash("0000H45H"),
|
||||
Coinbase: common.HexToAddress("0000H45H"),
|
||||
Root: common.HexToHash("0000H00H"),
|
||||
TxHash: common.HexToHash("0000H45H"),
|
||||
ReceiptHash: common.HexToHash("0000H45H"),
|
||||
Difficulty: big.NewInt(1337),
|
||||
Number: big.NewInt(1337),
|
||||
GasLimit: 1338,
|
||||
GasUsed: 1338,
|
||||
Time: 1338,
|
||||
Extra: []byte("Extra data Extra data Extra data Extra data Extra data Extra data Extra data Extra data"),
|
||||
MixDigest: common.HexToHash("0x0000H45H"),
|
||||
}
|
||||
cliqueRlp, err := rlp.EncodeToBytes(cliqueHeader)
|
||||
if err != nil {
|
||||
@@ -922,7 +937,7 @@ func GenDoc(ctx *cli.Context) {
|
||||
"of the work in canonicalizing and making sense of the data, and it's up to the UI to present" +
|
||||
"the user with the contents of the `message`"
|
||||
sighash, msg := accounts.TextAndHash([]byte("hello world"))
|
||||
messages := []*core.NameValueType{{"message", msg, accounts.MimetypeTextPlain}}
|
||||
messages := []*core.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}}
|
||||
|
||||
add("SignDataRequest", desc, &core.SignDataRequest{
|
||||
Address: common.NewMixedcaseAddress(a),
|
||||
@@ -953,8 +968,8 @@ func GenDoc(ctx *cli.Context) {
|
||||
add("SignTxRequest", desc, &core.SignTxRequest{
|
||||
Meta: meta,
|
||||
Callinfo: []core.ValidationInfo{
|
||||
{"Warning", "Something looks odd, show this message as a warning"},
|
||||
{"Info", "User should see this aswell"},
|
||||
{Typ: "Warning", Message: "Something looks odd, show this message as a warning"},
|
||||
{Typ: "Info", Message: "User should see this as well"},
|
||||
},
|
||||
Transaction: core.SendTxArgs{
|
||||
Data: &data,
|
||||
@@ -1020,16 +1035,21 @@ func GenDoc(ctx *cli.Context) {
|
||||
&core.ListRequest{
|
||||
Meta: meta,
|
||||
Accounts: []accounts.Account{
|
||||
{a, accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/a"}},
|
||||
{b, accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/b"}}},
|
||||
{Address: a, URL: accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/a"}},
|
||||
{Address: b, URL: accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/b"}}},
|
||||
})
|
||||
|
||||
add("ListResponse", "Response to list request. The response contains a list of all addresses to show to the caller. "+
|
||||
"Note: the UI is free to respond with any address the caller, regardless of whether it exists or not",
|
||||
&core.ListResponse{
|
||||
Accounts: []accounts.Account{
|
||||
{common.HexToAddress("0xcowbeef000000cowbeef00000000000000000c0w"), accounts.URL{Path: ".. ignored .."}},
|
||||
{common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"), accounts.URL{}},
|
||||
{
|
||||
Address: common.HexToAddress("0xcowbeef000000cowbeef00000000000000000c0w"),
|
||||
URL: accounts.URL{Path: ".. ignored .."},
|
||||
},
|
||||
{
|
||||
Address: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"),
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
## Initializing Clef
|
||||
|
||||
First thing's first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password:
|
||||
First things first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password:
|
||||
|
||||
```text
|
||||
$ clef init
|
||||
|
152
cmd/devp2p/crawl.go
Normal file
152
cmd/devp2p/crawl.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
type crawler struct {
|
||||
input nodeSet
|
||||
output nodeSet
|
||||
disc *discover.UDPv4
|
||||
iters []enode.Iterator
|
||||
inputIter enode.Iterator
|
||||
ch chan *enode.Node
|
||||
closed chan struct{}
|
||||
|
||||
// settings
|
||||
revalidateInterval time.Duration
|
||||
}
|
||||
|
||||
func newCrawler(input nodeSet, disc *discover.UDPv4, iters ...enode.Iterator) *crawler {
|
||||
c := &crawler{
|
||||
input: input,
|
||||
output: make(nodeSet, len(input)),
|
||||
disc: disc,
|
||||
iters: iters,
|
||||
inputIter: enode.IterNodes(input.nodes()),
|
||||
ch: make(chan *enode.Node),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
c.iters = append(c.iters, c.inputIter)
|
||||
// Copy input to output initially. Any nodes that fail validation
|
||||
// will be dropped from output during the run.
|
||||
for id, n := range input {
|
||||
c.output[id] = n
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *crawler) run(timeout time.Duration) nodeSet {
|
||||
var (
|
||||
timeoutTimer = time.NewTimer(timeout)
|
||||
timeoutCh <-chan time.Time
|
||||
doneCh = make(chan enode.Iterator, len(c.iters))
|
||||
liveIters = len(c.iters)
|
||||
)
|
||||
for _, it := range c.iters {
|
||||
go c.runIterator(doneCh, it)
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case n := <-c.ch:
|
||||
c.updateNode(n)
|
||||
case it := <-doneCh:
|
||||
if it == c.inputIter {
|
||||
// Enable timeout when we're done revalidating the input nodes.
|
||||
log.Info("Revalidation of input set is done", "len", len(c.input))
|
||||
if timeout > 0 {
|
||||
timeoutCh = timeoutTimer.C
|
||||
}
|
||||
}
|
||||
if liveIters--; liveIters == 0 {
|
||||
break loop
|
||||
}
|
||||
case <-timeoutCh:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
close(c.closed)
|
||||
for _, it := range c.iters {
|
||||
it.Close()
|
||||
}
|
||||
for ; liveIters > 0; liveIters-- {
|
||||
<-doneCh
|
||||
}
|
||||
return c.output
|
||||
}
|
||||
|
||||
func (c *crawler) runIterator(done chan<- enode.Iterator, it enode.Iterator) {
|
||||
defer func() { done <- it }()
|
||||
for it.Next() {
|
||||
select {
|
||||
case c.ch <- it.Node():
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *crawler) updateNode(n *enode.Node) {
|
||||
node, ok := c.output[n.ID()]
|
||||
|
||||
// Skip validation of recently-seen nodes.
|
||||
if ok && time.Since(node.LastCheck) < c.revalidateInterval {
|
||||
return
|
||||
}
|
||||
|
||||
// Request the node record.
|
||||
nn, err := c.disc.RequestENR(n)
|
||||
node.LastCheck = truncNow()
|
||||
if err != nil {
|
||||
if node.Score == 0 {
|
||||
// Node doesn't implement EIP-868.
|
||||
log.Debug("Skipping node", "id", n.ID())
|
||||
return
|
||||
}
|
||||
node.Score /= 2
|
||||
} else {
|
||||
node.N = nn
|
||||
node.Seq = nn.Seq()
|
||||
node.Score++
|
||||
if node.FirstResponse.IsZero() {
|
||||
node.FirstResponse = node.LastCheck
|
||||
}
|
||||
node.LastResponse = node.LastCheck
|
||||
}
|
||||
|
||||
// Store/update node in output set.
|
||||
if node.Score <= 0 {
|
||||
log.Info("Removing node", "id", n.ID())
|
||||
delete(c.output, n.ID())
|
||||
} else {
|
||||
log.Info("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score)
|
||||
c.output[n.ID()] = node
|
||||
}
|
||||
}
|
||||
|
||||
func truncNow() time.Time {
|
||||
return time.Now().UTC().Truncate(1 * time.Second)
|
||||
}
|
@@ -19,10 +19,10 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -38,36 +38,59 @@ var (
|
||||
discv4PingCommand,
|
||||
discv4RequestRecordCommand,
|
||||
discv4ResolveCommand,
|
||||
discv4ResolveJSONCommand,
|
||||
discv4CrawlCommand,
|
||||
},
|
||||
}
|
||||
discv4PingCommand = cli.Command{
|
||||
Name: "ping",
|
||||
Usage: "Sends ping to a node",
|
||||
Action: discv4Ping,
|
||||
Name: "ping",
|
||||
Usage: "Sends ping to a node",
|
||||
Action: discv4Ping,
|
||||
ArgsUsage: "<node>",
|
||||
}
|
||||
discv4RequestRecordCommand = cli.Command{
|
||||
Name: "requestenr",
|
||||
Usage: "Requests a node record using EIP-868 enrRequest",
|
||||
Action: discv4RequestRecord,
|
||||
Name: "requestenr",
|
||||
Usage: "Requests a node record using EIP-868 enrRequest",
|
||||
Action: discv4RequestRecord,
|
||||
ArgsUsage: "<node>",
|
||||
}
|
||||
discv4ResolveCommand = cli.Command{
|
||||
Name: "resolve",
|
||||
Usage: "Finds a node in the DHT",
|
||||
Action: discv4Resolve,
|
||||
Flags: []cli.Flag{bootnodesFlag},
|
||||
Name: "resolve",
|
||||
Usage: "Finds a node in the DHT",
|
||||
Action: discv4Resolve,
|
||||
ArgsUsage: "<node>",
|
||||
Flags: []cli.Flag{bootnodesFlag},
|
||||
}
|
||||
discv4ResolveJSONCommand = cli.Command{
|
||||
Name: "resolve-json",
|
||||
Usage: "Re-resolves nodes in a nodes.json file",
|
||||
Action: discv4ResolveJSON,
|
||||
Flags: []cli.Flag{bootnodesFlag},
|
||||
ArgsUsage: "<nodes.json file>",
|
||||
}
|
||||
discv4CrawlCommand = cli.Command{
|
||||
Name: "crawl",
|
||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||
Action: discv4Crawl,
|
||||
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
||||
}
|
||||
)
|
||||
|
||||
var bootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated nodes used for bootstrapping",
|
||||
}
|
||||
var (
|
||||
bootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated nodes used for bootstrapping",
|
||||
}
|
||||
crawlTimeoutFlag = cli.DurationFlag{
|
||||
Name: "timeout",
|
||||
Usage: "Time limit for the crawl.",
|
||||
Value: 30 * time.Minute,
|
||||
}
|
||||
)
|
||||
|
||||
func discv4Ping(ctx *cli.Context) error {
|
||||
n, disc, err := getNodeArgAndStartV4(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := getNodeArg(ctx)
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
|
||||
start := time.Now()
|
||||
@@ -79,10 +102,8 @@ func discv4Ping(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func discv4RequestRecord(ctx *cli.Context) error {
|
||||
n, disc, err := getNodeArgAndStartV4(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := getNodeArg(ctx)
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
|
||||
respN, err := disc.RequestENR(n)
|
||||
@@ -94,33 +115,61 @@ func discv4RequestRecord(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func discv4Resolve(ctx *cli.Context) error {
|
||||
n, disc, err := getNodeArgAndStartV4(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := getNodeArg(ctx)
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
|
||||
fmt.Println(disc.Resolve(n).String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNodeArgAndStartV4(ctx *cli.Context) (*enode.Node, *discover.UDPv4, error) {
|
||||
if ctx.NArg() != 1 {
|
||||
return nil, nil, fmt.Errorf("missing node as command-line argument")
|
||||
func discv4ResolveJSON(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
n, err := parseNode(ctx.Args()[0])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
nodesFile := ctx.Args().Get(0)
|
||||
inputSet := make(nodeSet)
|
||||
if common.FileExist(nodesFile) {
|
||||
inputSet = loadNodesJSON(nodesFile)
|
||||
}
|
||||
var bootnodes []*enode.Node
|
||||
if commandHasFlag(ctx, bootnodesFlag) {
|
||||
bootnodes, err = parseBootnodes(ctx)
|
||||
|
||||
// Add extra nodes from command line arguments.
|
||||
var nodeargs []*enode.Node
|
||||
for i := 1; i < ctx.NArg(); i++ {
|
||||
n, err := parseNode(ctx.Args().Get(i))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
exit(err)
|
||||
}
|
||||
nodeargs = append(nodeargs, n)
|
||||
}
|
||||
disc, err := startV4(bootnodes)
|
||||
return n, disc, err
|
||||
|
||||
// Run the crawler.
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
|
||||
c.revalidateInterval = 0
|
||||
output := c.run(0)
|
||||
writeNodesJSON(nodesFile, output)
|
||||
return nil
|
||||
}
|
||||
|
||||
func discv4Crawl(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
nodesFile := ctx.Args().First()
|
||||
var inputSet nodeSet
|
||||
if common.FileExist(nodesFile) {
|
||||
inputSet = loadNodesJSON(nodesFile)
|
||||
}
|
||||
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
c := newCrawler(inputSet, disc, disc.RandomNodes())
|
||||
c.revalidateInterval = 10 * time.Minute
|
||||
output := c.run(ctx.Duration(crawlTimeoutFlag.Name))
|
||||
writeNodesJSON(nodesFile, output)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
||||
@@ -139,28 +188,39 @@ func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// commandHasFlag returns true if the current command supports the given flag.
|
||||
func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool {
|
||||
flags := ctx.FlagNames()
|
||||
sort.Strings(flags)
|
||||
i := sort.SearchStrings(flags, flag.GetName())
|
||||
return i != len(flags) && flags[i] == flag.GetName()
|
||||
// startV4 starts an ephemeral discovery V4 node.
|
||||
func startV4(ctx *cli.Context) *discover.UDPv4 {
|
||||
socket, ln, cfg, err := listen()
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if commandHasFlag(ctx, bootnodesFlag) {
|
||||
bn, err := parseBootnodes(ctx)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
cfg.Bootnodes = bn
|
||||
}
|
||||
disc, err := discover.ListenV4(socket, ln, cfg)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return disc
|
||||
}
|
||||
|
||||
// startV4 starts an ephemeral discovery V4 node.
|
||||
func startV4(bootnodes []*enode.Node) (*discover.UDPv4, error) {
|
||||
func listen() (*net.UDPConn, *enode.LocalNode, discover.Config, error) {
|
||||
var cfg discover.Config
|
||||
cfg.Bootnodes = bootnodes
|
||||
cfg.PrivateKey, _ = crypto.GenerateKey()
|
||||
db, _ := enode.OpenDB("")
|
||||
ln := enode.NewLocalNode(db, cfg.PrivateKey)
|
||||
|
||||
socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{0, 0, 0, 0}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
db.Close()
|
||||
return nil, nil, cfg, err
|
||||
}
|
||||
addr := socket.LocalAddr().(*net.UDPAddr)
|
||||
ln.SetFallbackIP(net.IP{127, 0, 0, 1})
|
||||
ln.SetFallbackUDP(addr.Port)
|
||||
return discover.ListenUDP(socket, ln, cfg)
|
||||
return socket, ln, cfg, nil
|
||||
}
|
||||
|
163
cmd/devp2p/dns_cloudflare.go
Normal file
163
cmd/devp2p/dns_cloudflare.go
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudflare/cloudflare-go"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
cloudflareTokenFlag = cli.StringFlag{
|
||||
Name: "token",
|
||||
Usage: "CloudFlare API token",
|
||||
EnvVar: "CLOUDFLARE_API_TOKEN",
|
||||
}
|
||||
cloudflareZoneIDFlag = cli.StringFlag{
|
||||
Name: "zoneid",
|
||||
Usage: "CloudFlare Zone ID (optional)",
|
||||
}
|
||||
)
|
||||
|
||||
type cloudflareClient struct {
|
||||
*cloudflare.API
|
||||
zoneID string
|
||||
}
|
||||
|
||||
// newCloudflareClient sets up a CloudFlare API client from command line flags.
|
||||
func newCloudflareClient(ctx *cli.Context) *cloudflareClient {
|
||||
token := ctx.String(cloudflareTokenFlag.Name)
|
||||
if token == "" {
|
||||
exit(fmt.Errorf("need cloudflare API token to proceed"))
|
||||
}
|
||||
api, err := cloudflare.NewWithAPIToken(token)
|
||||
if err != nil {
|
||||
exit(fmt.Errorf("can't create Cloudflare client: %v", err))
|
||||
}
|
||||
return &cloudflareClient{
|
||||
API: api,
|
||||
zoneID: ctx.String(cloudflareZoneIDFlag.Name),
|
||||
}
|
||||
}
|
||||
|
||||
// deploy uploads the given tree to CloudFlare DNS.
|
||||
func (c *cloudflareClient) deploy(name string, t *dnsdisc.Tree) error {
|
||||
if err := c.checkZone(name); err != nil {
|
||||
return err
|
||||
}
|
||||
records := t.ToTXT(name)
|
||||
return c.uploadRecords(name, records)
|
||||
}
|
||||
|
||||
// checkZone verifies permissions on the CloudFlare DNS Zone for name.
|
||||
func (c *cloudflareClient) checkZone(name string) error {
|
||||
if c.zoneID == "" {
|
||||
log.Info(fmt.Sprintf("Finding CloudFlare zone ID for %s", name))
|
||||
id, err := c.ZoneIDByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.zoneID = id
|
||||
}
|
||||
log.Info(fmt.Sprintf("Checking Permissions on zone %s", c.zoneID))
|
||||
zone, err := c.ZoneDetails(c.zoneID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.HasSuffix(name, "."+zone.Name) {
|
||||
return fmt.Errorf("CloudFlare zone name %q does not match name %q to be deployed", zone.Name, name)
|
||||
}
|
||||
needPerms := map[string]bool{"#zone:edit": false, "#zone:read": false}
|
||||
for _, perm := range zone.Permissions {
|
||||
if _, ok := needPerms[perm]; ok {
|
||||
needPerms[perm] = true
|
||||
}
|
||||
}
|
||||
for _, ok := range needPerms {
|
||||
if !ok {
|
||||
return fmt.Errorf("wrong permissions on zone %s: %v", c.zoneID, needPerms)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// uploadRecords updates the TXT records at a particular subdomain. All non-root records
|
||||
// will have a TTL of "infinity" and all existing records not in the new map will be
|
||||
// nuked!
|
||||
func (c *cloudflareClient) uploadRecords(name string, records map[string]string) error {
|
||||
// Convert all names to lowercase.
|
||||
lrecords := make(map[string]string, len(records))
|
||||
for name, r := range records {
|
||||
lrecords[strings.ToLower(name)] = r
|
||||
}
|
||||
records = lrecords
|
||||
|
||||
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name))
|
||||
entries, err := c.DNSRecords(c.zoneID, cloudflare.DNSRecord{Type: "TXT"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existing := make(map[string]cloudflare.DNSRecord)
|
||||
for _, entry := range entries {
|
||||
if !strings.HasSuffix(entry.Name, name) {
|
||||
continue
|
||||
}
|
||||
existing[strings.ToLower(entry.Name)] = entry
|
||||
}
|
||||
|
||||
// Iterate over the new records and inject anything missing.
|
||||
for path, val := range records {
|
||||
old, exists := existing[path]
|
||||
if !exists {
|
||||
// Entry is unknown, push a new one to Cloudflare.
|
||||
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||
ttl := rootTTL
|
||||
if path != name {
|
||||
ttl = treeNodeTTL // Max TTL permitted by Cloudflare
|
||||
}
|
||||
_, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl})
|
||||
} else if old.Content != val {
|
||||
// Entry already exists, only change its content.
|
||||
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
||||
old.Content = val
|
||||
err = c.UpdateDNSRecord(c.zoneID, old.ID, old)
|
||||
} else {
|
||||
log.Info(fmt.Sprintf("Skipping %s = %q", path, val))
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to publish %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over the old records and delete anything stale.
|
||||
for path, entry := range existing {
|
||||
if _, ok := records[path]; ok {
|
||||
continue
|
||||
}
|
||||
// Stale entry, nuke it.
|
||||
log.Info(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
|
||||
if err := c.DeleteDNSRecord(c.zoneID, entry.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
303
cmd/devp2p/dns_route53.go
Normal file
303
cmd/devp2p/dns_route53.go
Normal file
@@ -0,0 +1,303 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/route53"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// The Route53 limits change sets to this size. DNS changes need to be split
|
||||
// up into multiple batches to work around the limit.
|
||||
const route53ChangeLimit = 30000
|
||||
|
||||
var (
|
||||
route53AccessKeyFlag = cli.StringFlag{
|
||||
Name: "access-key-id",
|
||||
Usage: "AWS Access Key ID",
|
||||
EnvVar: "AWS_ACCESS_KEY_ID",
|
||||
}
|
||||
route53AccessSecretFlag = cli.StringFlag{
|
||||
Name: "access-key-secret",
|
||||
Usage: "AWS Access Key Secret",
|
||||
EnvVar: "AWS_SECRET_ACCESS_KEY",
|
||||
}
|
||||
route53ZoneIDFlag = cli.StringFlag{
|
||||
Name: "zone-id",
|
||||
Usage: "Route53 Zone ID",
|
||||
}
|
||||
)
|
||||
|
||||
type route53Client struct {
|
||||
api *route53.Route53
|
||||
zoneID string
|
||||
}
|
||||
|
||||
type recordSet struct {
|
||||
values []string
|
||||
ttl int64
|
||||
}
|
||||
|
||||
// newRoute53Client sets up a Route53 API client from command line flags.
|
||||
func newRoute53Client(ctx *cli.Context) *route53Client {
|
||||
akey := ctx.String(route53AccessKeyFlag.Name)
|
||||
asec := ctx.String(route53AccessSecretFlag.Name)
|
||||
if akey == "" || asec == "" {
|
||||
exit(fmt.Errorf("need Route53 Access Key ID and secret proceed"))
|
||||
}
|
||||
config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")}
|
||||
session, err := session.NewSession(config)
|
||||
if err != nil {
|
||||
exit(fmt.Errorf("can't create AWS session: %v", err))
|
||||
}
|
||||
return &route53Client{
|
||||
api: route53.New(session),
|
||||
zoneID: ctx.String(route53ZoneIDFlag.Name),
|
||||
}
|
||||
}
|
||||
|
||||
// deploy uploads the given tree to Route53.
|
||||
func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error {
|
||||
if err := c.checkZone(name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compute DNS changes.
|
||||
existing, err := c.collectRecords(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info(fmt.Sprintf("Found %d TXT records", len(existing)))
|
||||
|
||||
records := t.ToTXT(name)
|
||||
changes := c.computeChanges(name, records, existing)
|
||||
if len(changes) == 0 {
|
||||
log.Info("No DNS changes needed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Submit change batches.
|
||||
batches := splitChanges(changes, route53ChangeLimit)
|
||||
for i, changes := range batches {
|
||||
log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes)))
|
||||
batch := new(route53.ChangeBatch)
|
||||
batch.SetChanges(changes)
|
||||
batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq()))
|
||||
req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch}
|
||||
resp, err := c.api.ChangeResourceRecordSets(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id))
|
||||
wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id}
|
||||
if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkZone verifies zone information for the given domain.
|
||||
func (c *route53Client) checkZone(name string) (err error) {
|
||||
if c.zoneID == "" {
|
||||
c.zoneID, err = c.findZoneID(name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// findZoneID searches for the Zone ID containing the given domain.
|
||||
func (c *route53Client) findZoneID(name string) (string, error) {
|
||||
log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name))
|
||||
var req route53.ListHostedZonesByNameInput
|
||||
for {
|
||||
resp, err := c.api.ListHostedZonesByName(&req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, zone := range resp.HostedZones {
|
||||
if isSubdomain(name, *zone.Name) {
|
||||
return *zone.Id, nil
|
||||
}
|
||||
}
|
||||
if !*resp.IsTruncated {
|
||||
break
|
||||
}
|
||||
req.DNSName = resp.NextDNSName
|
||||
req.HostedZoneId = resp.NextHostedZoneId
|
||||
}
|
||||
return "", errors.New("can't find zone ID for " + name)
|
||||
}
|
||||
|
||||
// computeChanges creates DNS changes for the given record.
|
||||
func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change {
|
||||
// Convert all names to lowercase.
|
||||
lrecords := make(map[string]string, len(records))
|
||||
for name, r := range records {
|
||||
lrecords[strings.ToLower(name)] = r
|
||||
}
|
||||
records = lrecords
|
||||
|
||||
var changes []*route53.Change
|
||||
for path, val := range records {
|
||||
ttl := int64(rootTTL)
|
||||
if path != name {
|
||||
ttl = int64(treeNodeTTL)
|
||||
}
|
||||
|
||||
prevRecords, exists := existing[path]
|
||||
prevValue := strings.Join(prevRecords.values, "")
|
||||
if !exists {
|
||||
// Entry is unknown, push a new one
|
||||
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||
changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val)))
|
||||
} else if prevValue != val {
|
||||
// Entry already exists, only change its content.
|
||||
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val))
|
||||
changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val)))
|
||||
} else {
|
||||
log.Info(fmt.Sprintf("Skipping %s = %q", path, val))
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over the old records and delete anything stale.
|
||||
for path, set := range existing {
|
||||
if _, ok := records[path]; ok {
|
||||
continue
|
||||
}
|
||||
// Stale entry, nuke it.
|
||||
log.Info(fmt.Sprintf("Deleting %s = %q", path, strings.Join(set.values, "")))
|
||||
changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
|
||||
}
|
||||
|
||||
sortChanges(changes)
|
||||
return changes
|
||||
}
|
||||
|
||||
// sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order.
|
||||
func sortChanges(changes []*route53.Change) {
|
||||
score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3}
|
||||
sort.Slice(changes, func(i, j int) bool {
|
||||
if *changes[i].Action == *changes[j].Action {
|
||||
return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name
|
||||
}
|
||||
return score[*changes[i].Action] < score[*changes[j].Action]
|
||||
})
|
||||
}
|
||||
|
||||
// splitChanges splits up DNS changes such that each change batch
|
||||
// is smaller than the given RDATA limit.
|
||||
func splitChanges(changes []*route53.Change, limit int) [][]*route53.Change {
|
||||
var batches [][]*route53.Change
|
||||
var batchSize int
|
||||
for _, ch := range changes {
|
||||
// Start new batch if this change pushes the current one over the limit.
|
||||
size := changeSize(ch)
|
||||
if len(batches) == 0 || batchSize+size > limit {
|
||||
batches = append(batches, nil)
|
||||
batchSize = 0
|
||||
}
|
||||
batches[len(batches)-1] = append(batches[len(batches)-1], ch)
|
||||
batchSize += size
|
||||
}
|
||||
return batches
|
||||
}
|
||||
|
||||
// changeSize returns the RDATA size of a DNS change.
|
||||
func changeSize(ch *route53.Change) int {
|
||||
size := 0
|
||||
for _, rr := range ch.ResourceRecordSet.ResourceRecords {
|
||||
if rr.Value != nil {
|
||||
size += len(*rr.Value)
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// collectRecords collects all TXT records below the given name.
|
||||
func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) {
|
||||
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID))
|
||||
var req route53.ListResourceRecordSetsInput
|
||||
req.SetHostedZoneId(c.zoneID)
|
||||
existing := make(map[string]recordSet)
|
||||
err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool {
|
||||
for _, set := range resp.ResourceRecordSets {
|
||||
if !isSubdomain(*set.Name, name) || *set.Type != "TXT" {
|
||||
continue
|
||||
}
|
||||
s := recordSet{ttl: *set.TTL}
|
||||
for _, rec := range set.ResourceRecords {
|
||||
s.values = append(s.values, *rec.Value)
|
||||
}
|
||||
name := strings.TrimSuffix(*set.Name, ".")
|
||||
existing[name] = s
|
||||
}
|
||||
return true
|
||||
})
|
||||
return existing, err
|
||||
}
|
||||
|
||||
// newTXTChange creates a change to a TXT record.
|
||||
func newTXTChange(action, name string, ttl int64, values ...string) *route53.Change {
|
||||
var c route53.Change
|
||||
var r route53.ResourceRecordSet
|
||||
var rrs []*route53.ResourceRecord
|
||||
for _, val := range values {
|
||||
rr := new(route53.ResourceRecord)
|
||||
rr.SetValue(val)
|
||||
rrs = append(rrs, rr)
|
||||
}
|
||||
r.SetType("TXT")
|
||||
r.SetName(name)
|
||||
r.SetTTL(ttl)
|
||||
r.SetResourceRecords(rrs)
|
||||
c.SetAction(action)
|
||||
c.SetResourceRecordSet(&r)
|
||||
return &c
|
||||
}
|
||||
|
||||
// isSubdomain returns true if name is a subdomain of domain.
|
||||
func isSubdomain(name, domain string) bool {
|
||||
domain = strings.TrimSuffix(domain, ".")
|
||||
name = strings.TrimSuffix(name, ".")
|
||||
return strings.HasSuffix("."+name, "."+domain)
|
||||
}
|
||||
|
||||
// splitTXT splits value into a list of quoted 255-character strings.
|
||||
func splitTXT(value string) string {
|
||||
var result strings.Builder
|
||||
for len(value) > 0 {
|
||||
rlen := len(value)
|
||||
if rlen > 253 {
|
||||
rlen = 253
|
||||
}
|
||||
result.WriteString(strconv.Quote(value[:rlen]))
|
||||
value = value[rlen:]
|
||||
}
|
||||
return result.String()
|
||||
}
|
154
cmd/devp2p/dns_route53_test.go
Normal file
154
cmd/devp2p/dns_route53_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/route53"
|
||||
)
|
||||
|
||||
// This test checks that computeChanges/splitChanges create DNS changes in
|
||||
// leaf-added -> root-changed -> leaf-deleted order.
|
||||
func TestRoute53ChangeSort(t *testing.T) {
|
||||
testTree0 := map[string]recordSet{
|
||||
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
|
||||
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
|
||||
}},
|
||||
"fdxn3sn67na5dka4j2gok7bvqi.n": {ttl: treeNodeTTL, values: []string{`"enrtree-branch:"`}},
|
||||
"n": {ttl: rootTTL, values: []string{`"enrtree-root:v1 e=2KFJOGVXDQTXXUGBH7GS7NAAAI l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=0 sig=v_-J_q_9ICQg5ztExFvLQhDBGMb0lZPJLhe3ts9LAcgqhOhtT3YFJsl8BWNDSwGtamUdR-9xl88_w-X42SVpjwE"`}},
|
||||
}
|
||||
|
||||
testTree1 := map[string]string{
|
||||
"n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA",
|
||||
"C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org",
|
||||
"JWXYDBPXYWG6FX3GMDIBFA6CJ4.n": "enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24",
|
||||
"2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA",
|
||||
"H4FHT4B454P6UXFD7JCYQ5PWDY.n": "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI",
|
||||
"MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o",
|
||||
}
|
||||
|
||||
wantChanges := []*route53.Change{
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("mhtdo6tmubria2xwg5ludack24.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("UPSERT"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`),
|
||||
}},
|
||||
TTL: ip(rootTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("DELETE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{
|
||||
{Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)},
|
||||
},
|
||||
TTL: ip(3333),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("DELETE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enrtree-branch:"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var client route53Client
|
||||
changes := client.computeChanges("n", testTree1, testTree0)
|
||||
if !reflect.DeepEqual(changes, wantChanges) {
|
||||
t.Fatalf("wrong changes (got %d, want %d)", len(changes), len(wantChanges))
|
||||
}
|
||||
|
||||
wantSplit := [][]*route53.Change{
|
||||
wantChanges[:4],
|
||||
wantChanges[4:8],
|
||||
}
|
||||
split := splitChanges(changes, 600)
|
||||
if !reflect.DeepEqual(split, wantSplit) {
|
||||
t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit))
|
||||
}
|
||||
}
|
||||
|
||||
func sp(s string) *string { return &s }
|
||||
func ip(i int64) *int64 { return &i }
|
386
cmd/devp2p/dnscmd.go
Normal file
386
cmd/devp2p/dnscmd.go
Normal file
@@ -0,0 +1,386 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
dnsCommand = cli.Command{
|
||||
Name: "dns",
|
||||
Usage: "DNS Discovery Commands",
|
||||
Subcommands: []cli.Command{
|
||||
dnsSyncCommand,
|
||||
dnsSignCommand,
|
||||
dnsTXTCommand,
|
||||
dnsCloudflareCommand,
|
||||
dnsRoute53Command,
|
||||
},
|
||||
}
|
||||
dnsSyncCommand = cli.Command{
|
||||
Name: "sync",
|
||||
Usage: "Download a DNS discovery tree",
|
||||
ArgsUsage: "<url> [ <directory> ]",
|
||||
Action: dnsSync,
|
||||
Flags: []cli.Flag{dnsTimeoutFlag},
|
||||
}
|
||||
dnsSignCommand = cli.Command{
|
||||
Name: "sign",
|
||||
Usage: "Sign a DNS discovery tree",
|
||||
ArgsUsage: "<tree-directory> <key-file>",
|
||||
Action: dnsSign,
|
||||
Flags: []cli.Flag{dnsDomainFlag, dnsSeqFlag},
|
||||
}
|
||||
dnsTXTCommand = cli.Command{
|
||||
Name: "to-txt",
|
||||
Usage: "Create a DNS TXT records for a discovery tree",
|
||||
ArgsUsage: "<tree-directory> <output-file>",
|
||||
Action: dnsToTXT,
|
||||
}
|
||||
dnsCloudflareCommand = cli.Command{
|
||||
Name: "to-cloudflare",
|
||||
Usage: "Deploy DNS TXT records to CloudFlare",
|
||||
ArgsUsage: "<tree-directory>",
|
||||
Action: dnsToCloudflare,
|
||||
Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag},
|
||||
}
|
||||
dnsRoute53Command = cli.Command{
|
||||
Name: "to-route53",
|
||||
Usage: "Deploy DNS TXT records to Amazon Route53",
|
||||
ArgsUsage: "<tree-directory>",
|
||||
Action: dnsToRoute53,
|
||||
Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag},
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
dnsTimeoutFlag = cli.DurationFlag{
|
||||
Name: "timeout",
|
||||
Usage: "Timeout for DNS lookups",
|
||||
}
|
||||
dnsDomainFlag = cli.StringFlag{
|
||||
Name: "domain",
|
||||
Usage: "Domain name of the tree",
|
||||
}
|
||||
dnsSeqFlag = cli.UintFlag{
|
||||
Name: "seq",
|
||||
Usage: "New sequence number of the tree",
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
rootTTL = 1
|
||||
treeNodeTTL = 2147483647
|
||||
)
|
||||
|
||||
// dnsSync performs dnsSyncCommand.
|
||||
func dnsSync(ctx *cli.Context) error {
|
||||
var (
|
||||
c = dnsClient(ctx)
|
||||
url = ctx.Args().Get(0)
|
||||
outdir = ctx.Args().Get(1)
|
||||
)
|
||||
domain, _, err := dnsdisc.ParseURL(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if outdir == "" {
|
||||
outdir = domain
|
||||
}
|
||||
|
||||
t, err := c.SyncTree(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
def := treeToDefinition(url, t)
|
||||
def.Meta.LastModified = time.Now()
|
||||
writeTreeMetadata(outdir, def)
|
||||
writeTreeNodes(outdir, def)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dnsSign(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 2 {
|
||||
return fmt.Errorf("need tree definition directory and key file as arguments")
|
||||
}
|
||||
var (
|
||||
defdir = ctx.Args().Get(0)
|
||||
keyfile = ctx.Args().Get(1)
|
||||
def = loadTreeDefinition(defdir)
|
||||
domain = directoryName(defdir)
|
||||
)
|
||||
if def.Meta.URL != "" {
|
||||
d, _, err := dnsdisc.ParseURL(def.Meta.URL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid 'url' field: %v", err)
|
||||
}
|
||||
domain = d
|
||||
}
|
||||
if ctx.IsSet(dnsDomainFlag.Name) {
|
||||
domain = ctx.String(dnsDomainFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(dnsSeqFlag.Name) {
|
||||
def.Meta.Seq = ctx.Uint(dnsSeqFlag.Name)
|
||||
} else {
|
||||
def.Meta.Seq++ // Auto-bump sequence number if not supplied via flag.
|
||||
}
|
||||
t, err := dnsdisc.MakeTree(def.Meta.Seq, def.Nodes, def.Meta.Links)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := loadSigningKey(keyfile)
|
||||
url, err := t.Sign(key, domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't sign: %v", err)
|
||||
}
|
||||
|
||||
def = treeToDefinition(url, t)
|
||||
def.Meta.LastModified = time.Now()
|
||||
writeTreeMetadata(defdir, def)
|
||||
return nil
|
||||
}
|
||||
|
||||
func directoryName(dir string) string {
|
||||
abs, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return filepath.Base(abs)
|
||||
}
|
||||
|
||||
// dnsToTXT peforms dnsTXTCommand.
|
||||
func dnsToTXT(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need tree definition directory as argument")
|
||||
}
|
||||
output := ctx.Args().Get(1)
|
||||
if output == "" {
|
||||
output = "-" // default to stdout
|
||||
}
|
||||
domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeTXTJSON(output, t.ToTXT(domain))
|
||||
return nil
|
||||
}
|
||||
|
||||
// dnsToCloudflare peforms dnsCloudflareCommand.
|
||||
func dnsToCloudflare(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need tree definition directory as argument")
|
||||
}
|
||||
domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := newCloudflareClient(ctx)
|
||||
return client.deploy(domain, t)
|
||||
}
|
||||
|
||||
// dnsToRoute53 peforms dnsRoute53Command.
|
||||
func dnsToRoute53(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need tree definition directory as argument")
|
||||
}
|
||||
domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := newRoute53Client(ctx)
|
||||
return client.deploy(domain, t)
|
||||
}
|
||||
|
||||
// loadSigningKey loads a private key in Ethereum keystore format.
|
||||
func loadSigningKey(keyfile string) *ecdsa.PrivateKey {
|
||||
keyjson, err := ioutil.ReadFile(keyfile)
|
||||
if err != nil {
|
||||
exit(fmt.Errorf("failed to read the keyfile at '%s': %v", keyfile, err))
|
||||
}
|
||||
password, _ := console.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ")
|
||||
key, err := keystore.DecryptKey(keyjson, password)
|
||||
if err != nil {
|
||||
exit(fmt.Errorf("error decrypting key: %v", err))
|
||||
}
|
||||
return key.PrivateKey
|
||||
}
|
||||
|
||||
// dnsClient configures the DNS discovery client from command line flags.
|
||||
func dnsClient(ctx *cli.Context) *dnsdisc.Client {
|
||||
var cfg dnsdisc.Config
|
||||
if commandHasFlag(ctx, dnsTimeoutFlag) {
|
||||
cfg.Timeout = ctx.Duration(dnsTimeoutFlag.Name)
|
||||
}
|
||||
return dnsdisc.NewClient(cfg)
|
||||
}
|
||||
|
||||
// There are two file formats for DNS node trees on disk:
|
||||
//
|
||||
// The 'TXT' format is a single JSON file containing DNS TXT records
|
||||
// as a JSON object where the keys are names and the values are objects
|
||||
// containing the value of the record.
|
||||
//
|
||||
// The 'definition' format is a directory containing two files:
|
||||
//
|
||||
// enrtree-info.json -- contains sequence number & links to other trees
|
||||
// nodes.json -- contains the nodes as a JSON array.
|
||||
//
|
||||
// This format exists because it's convenient to edit. nodes.json can be generated
|
||||
// in multiple ways: it may be written by a DHT crawler or compiled by a human.
|
||||
|
||||
type dnsDefinition struct {
|
||||
Meta dnsMetaJSON
|
||||
Nodes []*enode.Node
|
||||
}
|
||||
|
||||
type dnsMetaJSON struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Seq uint `json:"seq"`
|
||||
Sig string `json:"signature,omitempty"`
|
||||
Links []string `json:"links"`
|
||||
LastModified time.Time `json:"lastModified"`
|
||||
}
|
||||
|
||||
func treeToDefinition(url string, t *dnsdisc.Tree) *dnsDefinition {
|
||||
meta := dnsMetaJSON{
|
||||
URL: url,
|
||||
Seq: t.Seq(),
|
||||
Sig: t.Signature(),
|
||||
Links: t.Links(),
|
||||
}
|
||||
if meta.Links == nil {
|
||||
meta.Links = []string{}
|
||||
}
|
||||
return &dnsDefinition{Meta: meta, Nodes: t.Nodes()}
|
||||
}
|
||||
|
||||
// loadTreeDefinition loads a directory in 'definition' format.
|
||||
func loadTreeDefinition(directory string) *dnsDefinition {
|
||||
metaFile, nodesFile := treeDefinitionFiles(directory)
|
||||
var def dnsDefinition
|
||||
err := common.LoadJSON(metaFile, &def.Meta)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
exit(err)
|
||||
}
|
||||
if def.Meta.Links == nil {
|
||||
def.Meta.Links = []string{}
|
||||
}
|
||||
// Check link syntax.
|
||||
for _, link := range def.Meta.Links {
|
||||
if _, _, err := dnsdisc.ParseURL(link); err != nil {
|
||||
exit(fmt.Errorf("invalid link %q: %v", link, err))
|
||||
}
|
||||
}
|
||||
// Check/convert nodes.
|
||||
nodes := loadNodesJSON(nodesFile)
|
||||
if err := nodes.verify(); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
def.Nodes = nodes.nodes()
|
||||
return &def
|
||||
}
|
||||
|
||||
// loadTreeDefinitionForExport loads a DNS tree and ensures it is signed.
|
||||
func loadTreeDefinitionForExport(dir string) (domain string, t *dnsdisc.Tree, err error) {
|
||||
metaFile, _ := treeDefinitionFiles(dir)
|
||||
def := loadTreeDefinition(dir)
|
||||
if def.Meta.URL == "" {
|
||||
return "", nil, fmt.Errorf("missing 'url' field in %v", metaFile)
|
||||
}
|
||||
domain, pubkey, err := dnsdisc.ParseURL(def.Meta.URL)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("invalid 'url' field in %v: %v", metaFile, err)
|
||||
}
|
||||
if t, err = dnsdisc.MakeTree(def.Meta.Seq, def.Nodes, def.Meta.Links); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if err := ensureValidTreeSignature(t, pubkey, def.Meta.Sig); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return domain, t, nil
|
||||
}
|
||||
|
||||
// ensureValidTreeSignature checks that sig is valid for tree and assigns it as the
|
||||
// tree's signature if valid.
|
||||
func ensureValidTreeSignature(t *dnsdisc.Tree, pubkey *ecdsa.PublicKey, sig string) error {
|
||||
if sig == "" {
|
||||
return fmt.Errorf("missing signature, run 'devp2p dns sign' first")
|
||||
}
|
||||
if err := t.SetSignature(pubkey, sig); err != nil {
|
||||
return fmt.Errorf("invalid signature on tree, run 'devp2p dns sign' to update it")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeTreeMetadata writes a DNS node tree metadata file to the given directory.
|
||||
func writeTreeMetadata(directory string, def *dnsDefinition) {
|
||||
metaJSON, err := json.MarshalIndent(&def.Meta, "", jsonIndent)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if err := os.Mkdir(directory, 0744); err != nil && !os.IsExist(err) {
|
||||
exit(err)
|
||||
}
|
||||
metaFile, _ := treeDefinitionFiles(directory)
|
||||
if err := ioutil.WriteFile(metaFile, metaJSON, 0644); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeTreeNodes(directory string, def *dnsDefinition) {
|
||||
ns := make(nodeSet, len(def.Nodes))
|
||||
ns.add(def.Nodes...)
|
||||
_, nodesFile := treeDefinitionFiles(directory)
|
||||
writeNodesJSON(nodesFile, ns)
|
||||
}
|
||||
|
||||
func treeDefinitionFiles(directory string) (string, string) {
|
||||
meta := filepath.Join(directory, "enrtree-info.json")
|
||||
nodes := filepath.Join(directory, "nodes.json")
|
||||
return meta, nodes
|
||||
}
|
||||
|
||||
// writeTXTJSON writes TXT records in JSON format.
|
||||
func writeTXTJSON(file string, txt map[string]string) {
|
||||
txtJSON, err := json.MarshalIndent(txt, "", jsonIndent)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if file == "-" {
|
||||
os.Stdout.Write(txtJSON)
|
||||
fmt.Println()
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(file, txtJSON, 0644); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
}
|
@@ -20,8 +20,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
@@ -43,7 +45,7 @@ func init() {
|
||||
// Set up the CLI app.
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return debug.Setup(ctx, "")
|
||||
return debug.Setup(ctx)
|
||||
}
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
debug.Exit()
|
||||
@@ -57,12 +59,39 @@ func init() {
|
||||
app.Commands = []cli.Command{
|
||||
enrdumpCommand,
|
||||
discv4Command,
|
||||
dnsCommand,
|
||||
nodesetCommand,
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
exit(app.Run(os.Args))
|
||||
}
|
||||
|
||||
// commandHasFlag returns true if the current command supports the given flag.
|
||||
func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool {
|
||||
flags := ctx.FlagNames()
|
||||
sort.Strings(flags)
|
||||
i := sort.SearchStrings(flags, flag.GetName())
|
||||
return i != len(flags) && flags[i] == flag.GetName()
|
||||
}
|
||||
|
||||
// getNodeArg handles the common case of a single node descriptor argument.
|
||||
func getNodeArg(ctx *cli.Context) *enode.Node {
|
||||
if ctx.NArg() != 1 {
|
||||
exit("missing node as command-line argument")
|
||||
}
|
||||
n, err := parseNode(ctx.Args()[0])
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func exit(err interface{}) {
|
||||
if err == nil {
|
||||
os.Exit(0)
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
102
cmd/devp2p/nodeset.go
Normal file
102
cmd/devp2p/nodeset.go
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
const jsonIndent = " "
|
||||
|
||||
// nodeSet is the nodes.json file format. It holds a set of node records
|
||||
// as a JSON object.
|
||||
type nodeSet map[enode.ID]nodeJSON
|
||||
|
||||
type nodeJSON struct {
|
||||
Seq uint64 `json:"seq"`
|
||||
N *enode.Node `json:"record"`
|
||||
|
||||
// The score tracks how many liveness checks were performed. It is incremented by one
|
||||
// every time the node passes a check, and halved every time it doesn't.
|
||||
Score int `json:"score,omitempty"`
|
||||
// These two track the time of last successful contact.
|
||||
FirstResponse time.Time `json:"firstResponse,omitempty"`
|
||||
LastResponse time.Time `json:"lastResponse,omitempty"`
|
||||
// This one tracks the time of our last attempt to contact the node.
|
||||
LastCheck time.Time `json:"lastCheck,omitempty"`
|
||||
}
|
||||
|
||||
func loadNodesJSON(file string) nodeSet {
|
||||
var nodes nodeSet
|
||||
if err := common.LoadJSON(file, &nodes); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func writeNodesJSON(file string, nodes nodeSet) {
|
||||
nodesJSON, err := json.MarshalIndent(nodes, "", jsonIndent)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if file == "-" {
|
||||
os.Stdout.Write(nodesJSON)
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(file, nodesJSON, 0644); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ns nodeSet) nodes() []*enode.Node {
|
||||
result := make([]*enode.Node, 0, len(ns))
|
||||
for _, n := range ns {
|
||||
result = append(result, n.N)
|
||||
}
|
||||
// Sort by ID.
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return bytes.Compare(result[i].ID().Bytes(), result[j].ID().Bytes()) < 0
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
func (ns nodeSet) add(nodes ...*enode.Node) {
|
||||
for _, n := range nodes {
|
||||
ns[n.ID()] = nodeJSON{Seq: n.Seq(), N: n}
|
||||
}
|
||||
}
|
||||
|
||||
func (ns nodeSet) verify() error {
|
||||
for id, n := range ns {
|
||||
if n.N.ID() != id {
|
||||
return fmt.Errorf("invalid node %v: ID does not match ID %v in record", id, n.N.ID())
|
||||
}
|
||||
if n.N.Seq() != n.Seq {
|
||||
return fmt.Errorf("invalid node %v: 'seq' does not match seq %d from record", id, n.N.Seq())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
193
cmd/devp2p/nodesetcmd.go
Normal file
193
cmd/devp2p/nodesetcmd.go
Normal file
@@ -0,0 +1,193 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
nodesetCommand = cli.Command{
|
||||
Name: "nodeset",
|
||||
Usage: "Node set tools",
|
||||
Subcommands: []cli.Command{
|
||||
nodesetInfoCommand,
|
||||
nodesetFilterCommand,
|
||||
},
|
||||
}
|
||||
nodesetInfoCommand = cli.Command{
|
||||
Name: "info",
|
||||
Usage: "Shows statistics about a node set",
|
||||
Action: nodesetInfo,
|
||||
ArgsUsage: "<nodes.json>",
|
||||
}
|
||||
nodesetFilterCommand = cli.Command{
|
||||
Name: "filter",
|
||||
Usage: "Filters a node set",
|
||||
Action: nodesetFilter,
|
||||
ArgsUsage: "<nodes.json> filters..",
|
||||
|
||||
SkipFlagParsing: true,
|
||||
}
|
||||
)
|
||||
|
||||
func nodesetInfo(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
|
||||
ns := loadNodesJSON(ctx.Args().First())
|
||||
fmt.Printf("Set contains %d nodes.\n", len(ns))
|
||||
return nil
|
||||
}
|
||||
|
||||
func nodesetFilter(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
ns := loadNodesJSON(ctx.Args().First())
|
||||
filter, err := andFilter(ctx.Args().Tail())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result := make(nodeSet)
|
||||
for id, n := range ns {
|
||||
if filter(n) {
|
||||
result[id] = n
|
||||
}
|
||||
}
|
||||
writeNodesJSON("-", result)
|
||||
return nil
|
||||
}
|
||||
|
||||
type nodeFilter func(nodeJSON) bool
|
||||
|
||||
type nodeFilterC struct {
|
||||
narg int
|
||||
fn func([]string) (nodeFilter, error)
|
||||
}
|
||||
|
||||
var filterFlags = map[string]nodeFilterC{
|
||||
"-ip": {1, ipFilter},
|
||||
"-min-age": {1, minAgeFilter},
|
||||
"-eth-network": {1, ethFilter},
|
||||
"-les-server": {0, lesFilter},
|
||||
}
|
||||
|
||||
func parseFilters(args []string) ([]nodeFilter, error) {
|
||||
var filters []nodeFilter
|
||||
for len(args) > 0 {
|
||||
fc, ok := filterFlags[args[0]]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid filter %q", args[0])
|
||||
}
|
||||
if len(args) < fc.narg {
|
||||
return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args))
|
||||
}
|
||||
filter, err := fc.fn(args[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %v", args[0], err)
|
||||
}
|
||||
filters = append(filters, filter)
|
||||
args = args[fc.narg+1:]
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
func andFilter(args []string) (nodeFilter, error) {
|
||||
checks, err := parseFilters(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool {
|
||||
for _, filter := range checks {
|
||||
if !filter(n) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func ipFilter(args []string) (nodeFilter, error) {
|
||||
_, cidr, err := net.ParseCIDR(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool { return cidr.Contains(n.N.IP()) }
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func minAgeFilter(args []string) (nodeFilter, error) {
|
||||
minage, err := time.ParseDuration(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool {
|
||||
age := n.LastResponse.Sub(n.FirstResponse)
|
||||
return age >= minage
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func ethFilter(args []string) (nodeFilter, error) {
|
||||
var filter forkid.Filter
|
||||
switch args[0] {
|
||||
case "mainnet":
|
||||
filter = forkid.NewStaticFilter(params.MainnetChainConfig, params.MainnetGenesisHash)
|
||||
case "rinkeby":
|
||||
filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash)
|
||||
case "goerli":
|
||||
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
||||
case "ropsten":
|
||||
filter = forkid.NewStaticFilter(params.TestnetChainConfig, params.TestnetGenesisHash)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown network %q", args[0])
|
||||
}
|
||||
|
||||
f := func(n nodeJSON) bool {
|
||||
var eth struct {
|
||||
ForkID forkid.ID
|
||||
_ []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
if n.N.Load(enr.WithEntry("eth", ð)) != nil {
|
||||
return false
|
||||
}
|
||||
return filter(eth.ForkID) == nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func lesFilter(args []string) (nodeFilter, error) {
|
||||
f := func(n nodeJSON) bool {
|
||||
var les struct {
|
||||
_ []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
return n.N.Load(enr.WithEntry("les", &les)) == nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
@@ -77,7 +77,7 @@ Change the password of a keyfile.`,
|
||||
}
|
||||
|
||||
// Then write the new keyfile in place of the old one.
|
||||
if err := ioutil.WriteFile(keyfilepath, newJson, 600); err != nil {
|
||||
if err := ioutil.WriteFile(keyfilepath, newJson, 0600); err != nil {
|
||||
utils.Fatalf("Error writing new keyfile to disk: %v", err)
|
||||
}
|
||||
|
||||
|
@@ -52,6 +52,10 @@ If you want to encrypt an existing private key, it can be specified by setting
|
||||
Name: "privatekey",
|
||||
Usage: "file containing a raw private key to encrypt",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "lightkdf",
|
||||
Usage: "use less secure scrypt parameters",
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
// Check if keyfile path given and make sure it doesn't already exist.
|
||||
@@ -91,7 +95,11 @@ If you want to encrypt an existing private key, it can be specified by setting
|
||||
|
||||
// Encrypt key with passphrase.
|
||||
passphrase := promptPassphrase(true)
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
scryptN, scryptP := keystore.StandardScryptN, keystore.StandardScryptP
|
||||
if ctx.Bool("lightkdf") {
|
||||
scryptN, scryptP = keystore.LightScryptN, keystore.LightScryptP
|
||||
}
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, scryptN, scryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting key: %v", err)
|
||||
}
|
||||
|
@@ -43,6 +43,7 @@ func init() {
|
||||
commandSignMessage,
|
||||
commandVerifyMessage,
|
||||
}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
// Commonly used command line flags.
|
||||
|
@@ -34,7 +34,7 @@ func TestMessageSignVerify(t *testing.T) {
|
||||
message := "test message"
|
||||
|
||||
// Create the key.
|
||||
generate := runEthkey(t, "generate", keyfile)
|
||||
generate := runEthkey(t, "generate", "--lightkdf", keyfile)
|
||||
generate.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Password: {{.InputLine "foobar"}}
|
||||
|
@@ -34,17 +34,22 @@ var disasmCommand = cli.Command{
|
||||
}
|
||||
|
||||
func disasmCmd(ctx *cli.Context) error {
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("filename required")
|
||||
var in string
|
||||
switch {
|
||||
case len(ctx.Args().First()) > 0:
|
||||
fn := ctx.Args().First()
|
||||
input, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
in = string(input)
|
||||
case ctx.GlobalIsSet(InputFlag.Name):
|
||||
in = ctx.GlobalString(InputFlag.Name)
|
||||
default:
|
||||
return errors.New("Missing filename or --input value")
|
||||
}
|
||||
|
||||
fn := ctx.Args().First()
|
||||
in, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(string(in))
|
||||
code := strings.TrimSpace(in)
|
||||
fmt.Printf("%v\n", code)
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
||||
|
@@ -79,10 +79,18 @@ var (
|
||||
Name: "input",
|
||||
Usage: "input for the EVM",
|
||||
}
|
||||
InputFileFlag = cli.StringFlag{
|
||||
Name: "inputfile",
|
||||
Usage: "file containing input for the EVM",
|
||||
}
|
||||
VerbosityFlag = cli.IntFlag{
|
||||
Name: "verbosity",
|
||||
Usage: "sets the verbosity level",
|
||||
}
|
||||
BenchFlag = cli.BoolFlag{
|
||||
Name: "bench",
|
||||
Usage: "benchmark the execution",
|
||||
}
|
||||
CreateFlag = cli.BoolFlag{
|
||||
Name: "create",
|
||||
Usage: "indicates the action should be create rather than call",
|
||||
@@ -120,6 +128,7 @@ var (
|
||||
|
||||
func init() {
|
||||
app.Flags = []cli.Flag{
|
||||
BenchFlag,
|
||||
CreateFlag,
|
||||
DebugFlag,
|
||||
VerbosityFlag,
|
||||
@@ -130,6 +139,7 @@ func init() {
|
||||
ValueFlag,
|
||||
DumpFlag,
|
||||
InputFlag,
|
||||
InputFileFlag,
|
||||
MemProfileFlag,
|
||||
CPUProfileFlag,
|
||||
StatDumpFlag,
|
||||
@@ -147,6 +157,7 @@ func init() {
|
||||
runCommand,
|
||||
stateTestCommand,
|
||||
}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
@@ -68,6 +70,33 @@ func readGenesis(genesisPath string) *core.Genesis {
|
||||
return genesis
|
||||
}
|
||||
|
||||
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, uint64, time.Duration, error) {
|
||||
var (
|
||||
output []byte
|
||||
gasLeft uint64
|
||||
execTime time.Duration
|
||||
err error
|
||||
)
|
||||
|
||||
if bench {
|
||||
result := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
output, gasLeft, err = execFunc()
|
||||
}
|
||||
})
|
||||
|
||||
// Get the average execution time from the benchmarking result.
|
||||
// There are other useful stats here that could be reported.
|
||||
execTime = time.Duration(result.NsPerOp())
|
||||
} else {
|
||||
startTime := time.Now()
|
||||
output, gasLeft, err = execFunc()
|
||||
execTime = time.Since(startTime)
|
||||
}
|
||||
|
||||
return output, gasLeft, execTime, err
|
||||
}
|
||||
|
||||
func runCmd(ctx *cli.Context) error {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
@@ -115,11 +144,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
|
||||
}
|
||||
|
||||
var (
|
||||
code []byte
|
||||
ret []byte
|
||||
err error
|
||||
)
|
||||
var code []byte
|
||||
codeFileFlag := ctx.GlobalString(CodeFileFlag.Name)
|
||||
codeFlag := ctx.GlobalString(CodeFlag.Name)
|
||||
|
||||
@@ -145,6 +170,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
} else {
|
||||
hexcode = []byte(codeFlag)
|
||||
}
|
||||
hexcode = bytes.TrimSpace(hexcode)
|
||||
if len(hexcode)%2 != 0 {
|
||||
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
|
||||
os.Exit(1)
|
||||
@@ -201,18 +227,36 @@ func runCmd(ctx *cli.Context) error {
|
||||
} else {
|
||||
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
|
||||
}
|
||||
tstart := time.Now()
|
||||
var leftOverGas uint64
|
||||
|
||||
var hexInput []byte
|
||||
if inputFileFlag := ctx.GlobalString(InputFileFlag.Name); inputFileFlag != "" {
|
||||
var err error
|
||||
if hexInput, err = ioutil.ReadFile(inputFileFlag); err != nil {
|
||||
fmt.Printf("could not load input from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
hexInput = []byte(ctx.GlobalString(InputFlag.Name))
|
||||
}
|
||||
input := common.FromHex(string(bytes.TrimSpace(hexInput)))
|
||||
|
||||
var execFunc func() ([]byte, uint64, error)
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
||||
input = append(code, input...)
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
|
||||
return output, gasLeft, err
|
||||
}
|
||||
} else {
|
||||
if len(code) > 0 {
|
||||
statedb.SetCode(receiver, code)
|
||||
}
|
||||
ret, leftOverGas, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
return runtime.Call(receiver, input, &runtimeConfig)
|
||||
}
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
|
||||
output, leftOverGas, execTime, err := timedExec(ctx.GlobalBool(BenchFlag.Name), execFunc)
|
||||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.Commit(true)
|
||||
@@ -255,7 +299,7 @@ Gas used: %d
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer == nil {
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
fmt.Printf("0x%x\n", output)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
|
@@ -58,7 +58,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"golang.org/x/net/websocket"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -296,8 +296,7 @@ func (f *faucet) listenAndServe(port int) error {
|
||||
go f.loop()
|
||||
|
||||
http.HandleFunc("/", f.webHandler)
|
||||
http.Handle("/api", websocket.Handler(f.apiHandler))
|
||||
|
||||
http.HandleFunc("/api", f.apiHandler)
|
||||
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
|
||||
}
|
||||
|
||||
@@ -308,7 +307,13 @@ func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// apiHandler handles requests for Ether grants and transaction statuses.
|
||||
func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
upgrader := websocket.Upgrader{}
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start tracking the connection and drop at the end
|
||||
defer conn.Close()
|
||||
|
||||
@@ -331,7 +336,6 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
head *types.Header
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
err error
|
||||
)
|
||||
for head == nil || balance == nil {
|
||||
// Retrieve the current stats cached by the faucet
|
||||
@@ -347,6 +351,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
|
||||
if head == nil || balance == nil {
|
||||
// Report the faucet offline until initial stats are ready
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
|
||||
log.Warn("Failed to send faucet error to client", "err", err)
|
||||
return
|
||||
@@ -355,11 +360,14 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
}
|
||||
// Send over the initial stats and the latest header
|
||||
f.lock.RLock()
|
||||
reqs := f.reqs
|
||||
f.lock.RUnlock()
|
||||
if err = send(conn, map[string]interface{}{
|
||||
"funds": new(big.Int).Div(balance, ether),
|
||||
"funded": nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
"requests": reqs,
|
||||
}, 3*time.Second); err != nil {
|
||||
log.Warn("Failed to send initial stats to client", "err", err)
|
||||
return
|
||||
@@ -376,7 +384,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
Tier uint `json:"tier"`
|
||||
Captcha string `json:"captcha"`
|
||||
}
|
||||
if err = websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
if err = conn.ReadJSON(&msg); err != nil {
|
||||
return
|
||||
}
|
||||
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") &&
|
||||
@@ -388,6 +396,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
continue
|
||||
}
|
||||
if msg.Tier >= uint(*tiersFlag) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
|
||||
log.Warn("Failed to send tier error to client", "err", err)
|
||||
return
|
||||
@@ -425,6 +434,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
//lint:ignore ST1005 it's funny and the robot won't mind
|
||||
if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
|
||||
log.Warn("Failed to send captcha failure to client", "err", err)
|
||||
return
|
||||
@@ -446,6 +456,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
continue
|
||||
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
|
||||
//lint:ignore ST1005 Google is a company name and should be capitalized.
|
||||
if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil {
|
||||
log.Warn("Failed to send Google+ deprecation to client", "err", err)
|
||||
return
|
||||
@@ -458,6 +469,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
case *noauthFlag:
|
||||
username, avatar, address, err = authNoAuth(msg.URL)
|
||||
default:
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -516,7 +528,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil { // nolint: gosimple
|
||||
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
|
||||
log.Warn("Failed to send funding error to client", "err", err)
|
||||
return
|
||||
}
|
||||
@@ -657,7 +669,7 @@ func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
conn.SetWriteDeadline(time.Now().Add(timeout))
|
||||
return websocket.JSON.Send(conn, value)
|
||||
return conn.WriteJSON(value)
|
||||
}
|
||||
|
||||
// sendError transmits an error to the remote end of the websocket, also setting
|
||||
@@ -678,6 +690,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
// Twitter's API isn't really friendly with direct links. Still, we don't
|
||||
@@ -692,6 +705,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
// Resolve the username from the final redirect, no intermediate junk
|
||||
parts = strings.Split(res.Request.URL.String(), "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
@@ -702,6 +716,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
@@ -717,6 +732,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
@@ -736,6 +752,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
@@ -751,6 +768,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
func authNoAuth(url string) (string, string, common.Address, error) {
|
||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
return address.Hex() + "@noauth", "", address, nil
|
||||
|
@@ -56,6 +56,18 @@ This is a destructive action and changes the network in which you will be
|
||||
participating.
|
||||
|
||||
It expects the genesis file as argument.`,
|
||||
}
|
||||
dumpGenesisCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(dumpGenesis),
|
||||
Name: "dumpgenesis",
|
||||
Usage: "Dumps genesis block JSON configuration to stdout",
|
||||
ArgsUsage: "",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
|
||||
}
|
||||
importCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(importChain),
|
||||
@@ -227,6 +239,17 @@ func initGenesis(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpGenesis(ctx *cli.Context) error {
|
||||
genesis := utils.MakeGenesis(ctx)
|
||||
if genesis == nil {
|
||||
genesis = core.DefaultGenesisBlock()
|
||||
}
|
||||
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
|
||||
utils.Fatalf("could not encode genesis")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func importChain(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
|
@@ -28,7 +28,6 @@ import (
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@@ -75,11 +74,10 @@ type ethstatsConfig struct {
|
||||
}
|
||||
|
||||
type gethConfig struct {
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
Dashboard dashboard.Config
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
}
|
||||
|
||||
func loadConfig(file string, cfg *gethConfig) error {
|
||||
@@ -101,8 +99,8 @@ func defaultNodeConfig() node.Config {
|
||||
cfg := node.DefaultConfig
|
||||
cfg.Name = clientIdentifier
|
||||
cfg.Version = params.VersionWithCommit(gitCommit, gitDate)
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth", "shh")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth", "shh")
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth")
|
||||
cfg.IPCPath = "geth.ipc"
|
||||
return cfg
|
||||
}
|
||||
@@ -110,10 +108,9 @@ func defaultNodeConfig() node.Config {
|
||||
func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
Dashboard: dashboard.DefaultConfig,
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
}
|
||||
|
||||
// Load config file.
|
||||
@@ -134,7 +131,6 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
|
||||
}
|
||||
utils.SetShhConfig(ctx, stack, &cfg.Shh)
|
||||
utils.SetDashboardConfig(ctx, &cfg.Dashboard)
|
||||
|
||||
return stack, cfg
|
||||
}
|
||||
@@ -154,11 +150,11 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
if ctx.GlobalIsSet(utils.OverrideIstanbulFlag.Name) {
|
||||
cfg.Eth.OverrideIstanbul = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideIstanbulFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.OverrideMuirGlacierFlag.Name) {
|
||||
cfg.Eth.OverrideMuirGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideMuirGlacierFlag.Name))
|
||||
}
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit)
|
||||
}
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DeveloperFlag.Name)
|
||||
|
@@ -51,7 +51,9 @@ func TestConsoleWelcome(t *testing.T) {
|
||||
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.SetTemplateFunc("gover", runtime.Version)
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
|
||||
geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.SetTemplateFunc("niltime", func() string {
|
||||
return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
|
||||
})
|
||||
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
@@ -87,11 +89,14 @@ func TestIPCAttachWelcome(t *testing.T) {
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh", "--ipcpath", ipc)
|
||||
|
||||
defer func() {
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}()
|
||||
|
||||
waitForEndpoint(t, ipc, 3*time.Second)
|
||||
testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestHTTPAttachWelcome(t *testing.T) {
|
||||
@@ -100,13 +105,14 @@ func TestHTTPAttachWelcome(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--rpc", "--rpcport", port)
|
||||
defer func() {
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}()
|
||||
|
||||
endpoint := "http://127.0.0.1:" + port
|
||||
waitForEndpoint(t, endpoint, 3*time.Second)
|
||||
testAttachWelcome(t, geth, endpoint, httpAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestWSAttachWelcome(t *testing.T) {
|
||||
@@ -116,13 +122,14 @@ func TestWSAttachWelcome(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--ws", "--wsport", port)
|
||||
defer func() {
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}()
|
||||
|
||||
endpoint := "ws://127.0.0.1:" + port
|
||||
waitForEndpoint(t, endpoint, 3*time.Second)
|
||||
testAttachWelcome(t, geth, endpoint, httpAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
@@ -137,7 +144,9 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
attach.SetTemplateFunc("gover", runtime.Version)
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
|
||||
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.SetTemplateFunc("niltime", func() string {
|
||||
return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
|
||||
})
|
||||
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.SetTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.SetTemplateFunc("apis", func() string { return apis })
|
||||
|
@@ -70,10 +70,7 @@ var (
|
||||
utils.NoUSBFlag,
|
||||
utils.SmartCardDaemonPathFlag,
|
||||
utils.OverrideIstanbulFlag,
|
||||
utils.DashboardEnabledFlag,
|
||||
utils.DashboardAddrFlag,
|
||||
utils.DashboardPortFlag,
|
||||
utils.DashboardRefreshFlag,
|
||||
utils.OverrideMuirGlacierFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
@@ -134,6 +131,7 @@ var (
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.DNSDiscoveryFlag,
|
||||
utils.DeveloperFlag,
|
||||
utils.DeveloperPeriodFlag,
|
||||
utils.TestnetFlag,
|
||||
@@ -197,7 +195,7 @@ func init() {
|
||||
// Initialize the CLI app and start Geth
|
||||
app.Action = geth
|
||||
app.HideVersion = true // we have a command to print the version
|
||||
app.Copyright = "Copyright 2013-2019 The go-ethereum Authors"
|
||||
app.Copyright = "Copyright 2013-2020 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
// See chaincmd.go:
|
||||
initCommand,
|
||||
@@ -208,6 +206,7 @@ func init() {
|
||||
copydbCommand,
|
||||
removedbCommand,
|
||||
dumpCommand,
|
||||
dumpGenesisCommand,
|
||||
inspectCommand,
|
||||
// See accountcmd.go:
|
||||
accountCommand,
|
||||
@@ -236,16 +235,8 @@ func init() {
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
logdir := ""
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
|
||||
}
|
||||
if err := debug.Setup(ctx, logdir); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return debug.Setup(ctx)
|
||||
}
|
||||
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
debug.Exit()
|
||||
console.Stdin.Close() // Resets terminal mode.
|
||||
|
@@ -117,7 +117,6 @@ func version(ctx *cli.Context) error {
|
||||
}
|
||||
fmt.Println("Architecture:", runtime.GOARCH)
|
||||
fmt.Println("Protocol Versions:", eth.ProtocolVersions)
|
||||
fmt.Println("Network Id:", eth.DefaultConfig.NetworkId)
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
fmt.Println("Operating System:", runtime.GOOS)
|
||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||
|
@@ -80,6 +80,7 @@ type RetestethEthAPI interface {
|
||||
SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error)
|
||||
BlockNumber(ctx context.Context) (uint64, error)
|
||||
GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error)
|
||||
GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error)
|
||||
GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error)
|
||||
GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error)
|
||||
GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error)
|
||||
@@ -110,7 +111,6 @@ type RetestethAPI struct {
|
||||
genesisHash common.Hash
|
||||
engine *NoRewardEngine
|
||||
blockchain *core.BlockChain
|
||||
blockNumber uint64
|
||||
txMap map[common.Address]map[uint64]*types.Transaction // Sender -> Nonce -> Transaction
|
||||
txSenders map[common.Address]struct{} // Set of transaction senders
|
||||
blockInterval uint64
|
||||
@@ -356,7 +356,7 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
|
||||
ChainID: chainId,
|
||||
HomesteadBlock: homesteadBlock,
|
||||
DAOForkBlock: daoForkBlock,
|
||||
DAOForkSupport: false,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: eip150Block,
|
||||
EIP155Block: eip155Block,
|
||||
EIP158Block: eip158Block,
|
||||
@@ -411,7 +411,6 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
|
||||
api.engine = engine
|
||||
api.blockchain = blockchain
|
||||
api.db = state.NewDatabase(api.ethDb)
|
||||
api.blockNumber = 0
|
||||
api.txMap = make(map[common.Address]map[uint64]*types.Transaction)
|
||||
api.txSenders = make(map[common.Address]struct{})
|
||||
api.blockInterval = 0
|
||||
@@ -424,7 +423,7 @@ func (api *RetestethAPI) SendRawTransaction(ctx context.Context, rawTx hexutil.B
|
||||
// Return nil is not by mistake - some tests include sending transaction where gasLimit overflows uint64
|
||||
return common.Hash{}, nil
|
||||
}
|
||||
signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.blockNumber)))
|
||||
signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.currentNumber())))
|
||||
sender, err := types.Sender(signer, tx)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
@@ -450,9 +449,17 @@ func (api *RetestethAPI) MineBlocks(ctx context.Context, number uint64) (bool, e
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) currentNumber() uint64 {
|
||||
if current := api.blockchain.CurrentBlock(); current != nil {
|
||||
return current.NumberU64()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) mineBlock() error {
|
||||
parentHash := rawdb.ReadCanonicalHash(api.ethDb, api.blockNumber)
|
||||
parent := rawdb.ReadBlock(api.ethDb, parentHash, api.blockNumber)
|
||||
number := api.currentNumber()
|
||||
parentHash := rawdb.ReadCanonicalHash(api.ethDb, number)
|
||||
parent := rawdb.ReadBlock(api.ethDb, parentHash, number)
|
||||
var timestamp uint64
|
||||
if api.blockInterval == 0 {
|
||||
timestamp = uint64(time.Now().Unix())
|
||||
@@ -462,7 +469,7 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
gasLimit := core.CalcGasLimit(parent, 9223372036854775807, 9223372036854775807)
|
||||
header := &types.Header{
|
||||
ParentHash: parent.Hash(),
|
||||
Number: big.NewInt(int64(api.blockNumber + 1)),
|
||||
Number: big.NewInt(int64(number + 1)),
|
||||
GasLimit: gasLimit,
|
||||
Extra: api.extraData,
|
||||
Time: timestamp,
|
||||
@@ -495,7 +502,6 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
txCount := 0
|
||||
var txs []*types.Transaction
|
||||
var receipts []*types.Receipt
|
||||
var coalescedLogs []*types.Log
|
||||
var blockFull = gasPool.Gas() < params.TxGas
|
||||
for address := range api.txSenders {
|
||||
if blockFull {
|
||||
@@ -522,7 +528,6 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
receipts = append(receipts, receipt)
|
||||
coalescedLogs = append(coalescedLogs, receipt.Logs...)
|
||||
delete(m, nonce)
|
||||
if len(m) == 0 {
|
||||
// Last tx for the sender
|
||||
@@ -550,8 +555,7 @@ func (api *RetestethAPI) importBlock(block *types.Block) error {
|
||||
if _, err := api.blockchain.InsertChain([]*types.Block{block}); err != nil {
|
||||
return err
|
||||
}
|
||||
api.blockNumber = block.NumberU64()
|
||||
fmt.Printf("Imported block %d\n", block.NumberU64())
|
||||
fmt.Printf("Imported block %d, head is %d\n", block.NumberU64(), api.currentNumber())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -576,7 +580,9 @@ func (api *RetestethAPI) RewindToBlock(ctx context.Context, newHead uint64) (boo
|
||||
if err := api.blockchain.SetHead(newHead); err != nil {
|
||||
return false, err
|
||||
}
|
||||
api.blockNumber = newHead
|
||||
// When we rewind, the transaction pool should be cleaned out.
|
||||
api.txMap = make(map[common.Address]map[uint64]*types.Transaction)
|
||||
api.txSenders = make(map[common.Address]struct{})
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -596,8 +602,7 @@ func (api *RetestethAPI) GetLogHash(ctx context.Context, txHash common.Hash) (co
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) BlockNumber(ctx context.Context) (uint64, error) {
|
||||
//fmt.Printf("BlockNumber, response: %d\n", api.blockNumber)
|
||||
return api.blockNumber, nil
|
||||
return api.currentNumber(), nil
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) {
|
||||
@@ -614,6 +619,20 @@ func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexO
|
||||
return nil, fmt.Errorf("block %d not found", blockNr)
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error) {
|
||||
block := api.blockchain.GetBlockByHash(blockHash)
|
||||
if block != nil {
|
||||
response, err := RPCMarshalBlock(block, true, fullTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response["author"] = response["miner"]
|
||||
response["totalDifficulty"] = (*hexutil.Big)(api.blockchain.GetTd(block.Hash(), block.Number().Uint64()))
|
||||
return response, err
|
||||
}
|
||||
return nil, fmt.Errorf("block 0x%x not found", blockHash)
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) AccountRange(ctx context.Context,
|
||||
blockHashOrNumber *math.HexOrDecimal256, txIndex uint64,
|
||||
addressHash *math.HexOrDecimal256, maxResults uint64,
|
||||
@@ -682,9 +701,6 @@ func (api *RetestethAPI) AccountRange(ctx context.Context,
|
||||
for i := 0; i < int(maxResults) && it.Next(); i++ {
|
||||
if preimage := accountTrie.GetKey(it.Key); preimage != nil {
|
||||
result.AddressMap[common.BytesToHash(it.Key)] = common.BytesToAddress(preimage)
|
||||
//fmt.Printf("%x: %x\n", it.Key, preimage)
|
||||
} else {
|
||||
//fmt.Printf("could not find preimage for %x\n", it.Key)
|
||||
}
|
||||
}
|
||||
//fmt.Printf("Number of entries returned: %d\n", len(result.AddressMap))
|
||||
@@ -808,9 +824,6 @@ func (api *RetestethAPI) StorageRangeAt(ctx context.Context,
|
||||
Key: string(ks),
|
||||
Value: string(vs),
|
||||
}
|
||||
//fmt.Printf("Key: %s, Value: %s\n", ks, vs)
|
||||
} else {
|
||||
//fmt.Printf("Did not find preimage for %x\n", it.Key)
|
||||
}
|
||||
}
|
||||
if it.Next() {
|
||||
@@ -889,7 +902,7 @@ func retesteth(ctx *cli.Context) error {
|
||||
log.Info("HTTP endpoint closed", "url", httpEndpoint)
|
||||
}()
|
||||
|
||||
abortChan := make(chan os.Signal)
|
||||
abortChan := make(chan os.Signal, 11)
|
||||
signal.Notify(abortChan, os.Interrupt)
|
||||
|
||||
sig := <-abortChan
|
||||
|
@@ -22,8 +22,6 @@ import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@@ -116,16 +114,6 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
},
|
||||
},
|
||||
//{
|
||||
// Name: "DASHBOARD",
|
||||
// Flags: []cli.Flag{
|
||||
// utils.DashboardEnabledFlag,
|
||||
// utils.DashboardAddrFlag,
|
||||
// utils.DashboardPortFlag,
|
||||
// utils.DashboardRefreshFlag,
|
||||
// utils.DashboardAssetsFlag,
|
||||
// },
|
||||
//},
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
@@ -194,6 +182,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.BootnodesFlag,
|
||||
utils.BootnodesV4Flag,
|
||||
utils.BootnodesV5Flag,
|
||||
utils.DNSDiscoveryFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
@@ -324,9 +313,6 @@ func init() {
|
||||
var uncategorized []cli.Flag
|
||||
for _, flag := range data.(*cli.App).Flags {
|
||||
if _, ok := categorized[flag.String()]; !ok {
|
||||
if strings.HasPrefix(flag.GetName(), "dashboard") {
|
||||
continue
|
||||
}
|
||||
uncategorized = append(uncategorized, flag)
|
||||
}
|
||||
}
|
||||
|
@@ -17,7 +17,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
math2 "github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@@ -36,36 +36,38 @@ import (
|
||||
type alethGenesisSpec struct {
|
||||
SealEngine string `json:"sealEngine"`
|
||||
Params struct {
|
||||
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
||||
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
|
||||
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
||||
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
||||
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
||||
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
TieBreakingGas bool `json:"tieBreakingGas"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
AllowFutureBlocks bool `json:"allowFutureBlocks"`
|
||||
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
HomesteadForkBlock *hexutil.Big `json:"homesteadForkBlock,omitempty"`
|
||||
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
|
||||
EIP150ForkBlock *hexutil.Big `json:"EIP150ForkBlock,omitempty"`
|
||||
EIP158ForkBlock *hexutil.Big `json:"EIP158ForkBlock,omitempty"`
|
||||
ByzantiumForkBlock *hexutil.Big `json:"byzantiumForkBlock,omitempty"`
|
||||
ConstantinopleForkBlock *hexutil.Big `json:"constantinopleForkBlock,omitempty"`
|
||||
ConstantinopleFixForkBlock *hexutil.Big `json:"constantinopleFixForkBlock,omitempty"`
|
||||
IstanbulForkBlock *hexutil.Big `json:"istanbulForkBlock,omitempty"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
TieBreakingGas bool `json:"tieBreakingGas"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
AllowFutureBlocks bool `json:"allowFutureBlocks"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
MixHash common.Hash `json:"mixHash"`
|
||||
Author common.Address `json:"author"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
Nonce types.BlockNonce `json:"nonce"`
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
MixHash common.Hash `json:"mixHash"`
|
||||
Author common.Address `json:"author"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
} `json:"genesis"`
|
||||
|
||||
Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
|
||||
@@ -74,7 +76,7 @@ type alethGenesisSpec struct {
|
||||
// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type alethGenesisSpecAccount struct {
|
||||
Balance *math2.HexOrDecimal256 `json:"balance"`
|
||||
Balance *math2.HexOrDecimal256 `json:"balance,omitempty"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
||||
}
|
||||
@@ -82,7 +84,7 @@ type alethGenesisSpecAccount struct {
|
||||
// alethGenesisSpecBuiltin is the precompiled contract definition.
|
||||
type alethGenesisSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
||||
StartingBlock *hexutil.Big `json:"startingBlock,omitempty"`
|
||||
Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
|
||||
}
|
||||
|
||||
@@ -106,21 +108,33 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp
|
||||
spec.Params.AccountStartNonce = 0
|
||||
spec.Params.TieBreakingGas = false
|
||||
spec.Params.AllowFutureBlocks = false
|
||||
|
||||
// Dao hardfork block is a special one. The fork block is listed as 0 in the
|
||||
// config but aleth will sync with ETC clients up until the actual dao hard
|
||||
// fork block.
|
||||
spec.Params.DaoHardforkBlock = 0
|
||||
|
||||
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
|
||||
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
|
||||
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
|
||||
|
||||
// Byzantium
|
||||
if num := genesis.Config.HomesteadBlock; num != nil {
|
||||
spec.Params.HomesteadForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.EIP150Block; num != nil {
|
||||
spec.Params.EIP150ForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.EIP158Block; num != nil {
|
||||
spec.Params.EIP158ForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.ByzantiumBlock; num != nil {
|
||||
spec.setByzantium(num)
|
||||
spec.Params.ByzantiumForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
// Constantinople
|
||||
if num := genesis.Config.ConstantinopleBlock; num != nil {
|
||||
spec.setConstantinople(num)
|
||||
spec.Params.ConstantinopleForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.PetersburgBlock; num != nil {
|
||||
spec.Params.ConstantinopleFixForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.IstanbulBlock; num != nil {
|
||||
spec.Params.IstanbulForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
@@ -132,9 +146,7 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp
|
||||
spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
|
||||
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
|
||||
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Nonce[:], genesis.Nonce)
|
||||
|
||||
spec.Genesis.Nonce = types.EncodeNonce(genesis.Nonce)
|
||||
spec.Genesis.MixHash = genesis.Mixhash
|
||||
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
|
||||
spec.Genesis.Author = genesis.Coinbase
|
||||
@@ -157,15 +169,32 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)})
|
||||
spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 500}})
|
||||
spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
|
||||
spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)})
|
||||
}
|
||||
if genesis.Config.IstanbulBlock != nil {
|
||||
if genesis.Config.ByzantiumBlock == nil {
|
||||
return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not")
|
||||
}
|
||||
spec.setPrecompile(6, &alethGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_add",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
}) // Aleth hardcoded the gas policy
|
||||
spec.setPrecompile(7, &alethGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_mul",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
}) // Aleth hardcoded the gas policy
|
||||
spec.setPrecompile(9, &alethGenesisSpecBuiltin{
|
||||
Name: "blake2_compression",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.IstanbulBlock),
|
||||
})
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
@@ -196,14 +225,6 @@ func (spec *alethGenesisSpec) setAccount(address common.Address, account core.Ge
|
||||
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
|
||||
spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
|
||||
spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// parityChainSpec is the chain specification format used by Parity.
|
||||
type parityChainSpec struct {
|
||||
Name string `json:"name"`
|
||||
@@ -223,36 +244,40 @@ type parityChainSpec struct {
|
||||
} `json:"engine"`
|
||||
|
||||
Params struct {
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
|
||||
EIP1283ReenableTransition hexutil.Uint64 `json:"eip1283ReenableTransition"`
|
||||
EIP1344Transition hexutil.Uint64 `json:"eip1344Transition"`
|
||||
EIP1884Transition hexutil.Uint64 `json:"eip1884Transition"`
|
||||
EIP2028Transition hexutil.Uint64 `json:"eip2028Transition"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
Seal struct {
|
||||
Ethereum struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
MixHash hexutil.Bytes `json:"mixHash"`
|
||||
Nonce types.BlockNonce `json:"nonce"`
|
||||
MixHash hexutil.Bytes `json:"mixHash"`
|
||||
} `json:"ethereum"`
|
||||
} `json:"seal"`
|
||||
|
||||
@@ -278,17 +303,23 @@ type parityChainSpecAccount struct {
|
||||
|
||||
// parityChainSpecBuiltin is the precompiled contract definition.
|
||||
type parityChainSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
|
||||
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
|
||||
Name string `json:"name"` // Each builtin should has it own name
|
||||
Pricing interface{} `json:"pricing"` // Each builtin should has it own price strategy
|
||||
ActivateAt *hexutil.Big `json:"activate_at,omitempty"` // ActivateAt can't be omitted if empty, default means no fork
|
||||
}
|
||||
|
||||
// parityChainSpecPricing represents the different pricing models that builtin
|
||||
// contracts might advertise using.
|
||||
type parityChainSpecPricing struct {
|
||||
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
|
||||
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
|
||||
AltBnPairing *parityChainSpecAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
|
||||
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
|
||||
|
||||
// Before the https://github.com/paritytech/parity-ethereum/pull/11039,
|
||||
// Parity uses this format to config bn pairing price policy.
|
||||
AltBnPairing *parityChainSepcAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
|
||||
// Blake2F is the price per round of Blake2 compression
|
||||
Blake2F *parityChainSpecBlakePricing `json:"blake2_f,omitempty"`
|
||||
}
|
||||
|
||||
type parityChainSpecLinearPricing struct {
|
||||
@@ -300,11 +331,36 @@ type parityChainSpecModExpPricing struct {
|
||||
Divisor uint64 `json:"divisor"`
|
||||
}
|
||||
|
||||
type parityChainSpecAltBnPairingPricing struct {
|
||||
// parityChainSpecAltBnConstOperationPricing defines the price
|
||||
// policy for bn const operation(used after istanbul)
|
||||
type parityChainSpecAltBnConstOperationPricing struct {
|
||||
Price uint64 `json:"price"`
|
||||
}
|
||||
|
||||
// parityChainSepcAltBnPairingPricing defines the price policy
|
||||
// for bn pairing.
|
||||
type parityChainSepcAltBnPairingPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Pair uint64 `json:"pair"`
|
||||
}
|
||||
|
||||
// parityChainSpecBlakePricing defines the price policy for blake2 f
|
||||
// compression.
|
||||
type parityChainSpecBlakePricing struct {
|
||||
GasPerRound uint64 `json:"gas_per_round"`
|
||||
}
|
||||
|
||||
type parityChainSpecAlternativePrice struct {
|
||||
AltBnConstOperationPrice *parityChainSpecAltBnConstOperationPricing `json:"alt_bn128_const_operations,omitempty"`
|
||||
AltBnPairingPrice *parityChainSepcAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
}
|
||||
|
||||
// parityChainSpecVersionedPricing represents a single version price policy.
|
||||
type parityChainSpecVersionedPricing struct {
|
||||
Price *parityChainSpecAlternativePrice `json:"price,omitempty"`
|
||||
Info string `json:"info,omitempty"`
|
||||
}
|
||||
|
||||
// newParityChainSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// chain specification format.
|
||||
func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []string) (*parityChainSpec, error) {
|
||||
@@ -352,7 +408,10 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
if num := genesis.Config.PetersburgBlock; num != nil {
|
||||
spec.setConstantinopleFix(num)
|
||||
}
|
||||
|
||||
// Istanbul
|
||||
if num := genesis.Config.IstanbulBlock; num != nil {
|
||||
spec.setIstanbul(num)
|
||||
}
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
|
||||
@@ -365,10 +424,8 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
// Disable this one
|
||||
spec.Params.EIP98Transition = math.MaxInt64
|
||||
|
||||
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
|
||||
|
||||
spec.Genesis.Seal.Ethereum.MixHash = (hexutil.Bytes)(genesis.Mixhash[:])
|
||||
spec.Genesis.Seal.Ethereum.Nonce = types.EncodeNonce(genesis.Nonce)
|
||||
spec.Genesis.Seal.Ethereum.MixHash = (genesis.Mixhash[:])
|
||||
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
|
||||
spec.Genesis.Author = genesis.Coinbase
|
||||
spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp)
|
||||
@@ -398,18 +455,93 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
|
||||
})
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.setPrecompile(5, &parityChainSpecBuiltin{
|
||||
Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||
Name: "modexp",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
ModExp: &parityChainSpecModExpPricing{Divisor: 20},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(6, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
||||
Name: "alt_bn128_add",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
Linear: &parityChainSpecLinearPricing{Base: 500, Word: 0},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(7, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
||||
Name: "alt_bn128_mul",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
Linear: &parityChainSpecLinearPricing{Base: 40000, Word: 0},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(8, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||
Name: "alt_bn128_pairing",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
AltBnPairing: &parityChainSepcAltBnPairingPricing{Base: 100000, Pair: 80000},
|
||||
},
|
||||
})
|
||||
}
|
||||
if genesis.Config.IstanbulBlock != nil {
|
||||
if genesis.Config.ByzantiumBlock == nil {
|
||||
return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not")
|
||||
}
|
||||
spec.setPrecompile(6, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
|
||||
(*hexutil.Big)(big.NewInt(0)): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 500},
|
||||
},
|
||||
},
|
||||
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 150},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(7, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
|
||||
(*hexutil.Big)(big.NewInt(0)): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 40000},
|
||||
},
|
||||
},
|
||||
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 6000},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(8, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
|
||||
(*hexutil.Big)(big.NewInt(0)): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnPairingPrice: &parityChainSepcAltBnPairingPricing{Base: 100000, Pair: 80000},
|
||||
},
|
||||
},
|
||||
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
|
||||
Price: &parityChainSpecAlternativePrice{
|
||||
AltBnPairingPrice: &parityChainSepcAltBnPairingPricing{Base: 45000, Pair: 34000},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
spec.setPrecompile(9, &parityChainSpecBuiltin{
|
||||
Name: "blake2_f",
|
||||
ActivateAt: (*hexutil.Big)(genesis.Config.IstanbulBlock),
|
||||
Pricing: &parityChainSpecPricing{
|
||||
Blake2F: &parityChainSpecBlakePricing{GasPerRound: 1},
|
||||
},
|
||||
})
|
||||
}
|
||||
return spec, nil
|
||||
@@ -451,10 +583,17 @@ func (spec *parityChainSpec) setConstantinopleFix(num *big.Int) {
|
||||
spec.Params.EIP1283DisableTransition = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
func (spec *parityChainSpec) setIstanbul(num *big.Int) {
|
||||
spec.Params.EIP1344Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP1884Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP2028Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP1283ReenableTransition = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// Python Ethereum implementation.
|
||||
type pyEthereumGenesisSpec struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
Nonce types.BlockNonce `json:"nonce"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
@@ -473,6 +612,7 @@ func newPyEthereumGenesisSpec(network string, genesis *core.Genesis) (*pyEthereu
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
spec := &pyEthereumGenesisSpec{
|
||||
Nonce: types.EncodeNonce(genesis.Nonce),
|
||||
Timestamp: (hexutil.Uint64)(genesis.Timestamp),
|
||||
ExtraData: genesis.ExtraData,
|
||||
GasLimit: (hexutil.Uint64)(genesis.GasLimit),
|
||||
@@ -482,8 +622,5 @@ func newPyEthereumGenesisSpec(network string, genesis *core.Genesis) (*pyEthereu
|
||||
Alloc: genesis.Alloc,
|
||||
ParentHash: genesis.ParentHash,
|
||||
}
|
||||
spec.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Nonce[:], genesis.Nonce)
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
@@ -76,33 +77,19 @@ func TestParitySturebyConverter(t *testing.T) {
|
||||
if err := json.Unmarshal(blob, &genesis); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
spec, err := newParityChainSpec("Stureby", &genesis, []string{})
|
||||
spec, err := newParityChainSpec("stureby", &genesis, []string{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed creating chainspec: %v", err)
|
||||
}
|
||||
|
||||
enc, err := json.MarshalIndent(spec, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("failed encoding chainspec: %v", err)
|
||||
}
|
||||
expBlob, err := ioutil.ReadFile("testdata/stureby_parity.json")
|
||||
if err != nil {
|
||||
t.Fatalf("could not read file: %v", err)
|
||||
}
|
||||
expspec := &parityChainSpec{}
|
||||
if err := json.Unmarshal(expBlob, expspec); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
expspec.Nodes = []string{}
|
||||
|
||||
if !reflect.DeepEqual(expspec, spec) {
|
||||
t.Errorf("chainspec mismatch")
|
||||
c := spew.ConfigState{
|
||||
DisablePointerAddresses: true,
|
||||
SortKeys: true,
|
||||
}
|
||||
exp := strings.Split(c.Sdump(expspec), "\n")
|
||||
got := strings.Split(c.Sdump(spec), "\n")
|
||||
for i := 0; i < len(exp) && i < len(got); i++ {
|
||||
if exp[i] != got[i] {
|
||||
t.Logf("got: %v\nexp: %v\n", exp[i], got[i])
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(expBlob, enc) {
|
||||
t.Fatalf("chainspec mismatch")
|
||||
}
|
||||
}
|
||||
|
@@ -41,7 +41,7 @@ var dashboardContent = `
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>{{.NetworkTitle}}: Ethereum Testnet</title>
|
||||
<title>{{.NetworkTitle}}: Network Dashboard</title>
|
||||
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet">
|
||||
|
@@ -129,15 +129,20 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
fmt.Printf("SSH key fingerprint is %s [MD5]\n", ssh.FingerprintLegacyMD5(key))
|
||||
fmt.Printf("Are you sure you want to continue connecting (yes/no)? ")
|
||||
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case strings.TrimSpace(text) == "yes":
|
||||
pubkey = key.Marshal()
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown auth choice: %v", text)
|
||||
for {
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case strings.TrimSpace(text) == "yes":
|
||||
pubkey = key.Marshal()
|
||||
return nil
|
||||
case strings.TrimSpace(text) == "no":
|
||||
return errors.New("users says no")
|
||||
default:
|
||||
fmt.Println("Please answer 'yes' or 'no'")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// If a public key exists for this SSH server, check that it matches
|
||||
|
177
cmd/puppeth/testdata/stureby_aleth.json
vendored
177
cmd/puppeth/testdata/stureby_aleth.json
vendored
@@ -1,112 +1,113 @@
|
||||
{
|
||||
"sealEngine":"Ethash",
|
||||
"params":{
|
||||
"accountStartNonce":"0x00",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"homesteadForkBlock":"0x2710",
|
||||
"daoHardforkBlock":"0x00",
|
||||
"EIP150ForkBlock":"0x3a98",
|
||||
"EIP158ForkBlock":"0x59d8",
|
||||
"byzantiumForkBlock":"0x7530",
|
||||
"constantinopleForkBlock":"0x9c40",
|
||||
"minGasLimit":"0x1388",
|
||||
"maxGasLimit":"0x7fffffffffffffff",
|
||||
"tieBreakingGas":false,
|
||||
"gasLimitBoundDivisor":"0x0400",
|
||||
"minimumDifficulty":"0x20000",
|
||||
"difficultyBoundDivisor":"0x0800",
|
||||
"durationLimit":"0x0d",
|
||||
"blockReward":"0x4563918244F40000",
|
||||
"networkID":"0x4cb2e",
|
||||
"chainID":"0x4cb2e",
|
||||
"allowFutureBlocks":false
|
||||
"sealEngine": "Ethash",
|
||||
"params": {
|
||||
"accountStartNonce": "0x0",
|
||||
"maximumExtraDataSize": "0x20",
|
||||
"homesteadForkBlock": "0x2710",
|
||||
"daoHardforkBlock": "0x0",
|
||||
"EIP150ForkBlock": "0x3a98",
|
||||
"EIP158ForkBlock": "0x59d8",
|
||||
"byzantiumForkBlock": "0x7530",
|
||||
"constantinopleForkBlock": "0x9c40",
|
||||
"constantinopleFixForkBlock": "0x9c40",
|
||||
"istanbulForkBlock": "0xc350",
|
||||
"minGasLimit": "0x1388",
|
||||
"maxGasLimit": "0x7fffffffffffffff",
|
||||
"tieBreakingGas": false,
|
||||
"gasLimitBoundDivisor": "0x400",
|
||||
"minimumDifficulty": "0x20000",
|
||||
"difficultyBoundDivisor": "0x800",
|
||||
"durationLimit": "0xd",
|
||||
"blockReward": "0x4563918244f40000",
|
||||
"networkID": "0x4cb2e",
|
||||
"chainID": "0x4cb2e",
|
||||
"allowFutureBlocks": false
|
||||
},
|
||||
"genesis":{
|
||||
"nonce":"0x0000000000000000",
|
||||
"difficulty":"0x20000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x59a4e76d",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit":"0x47b760"
|
||||
"genesis": {
|
||||
"nonce": "0x0000000000000000",
|
||||
"difficulty": "0x20000",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"author": "0x0000000000000000000000000000000000000000",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760"
|
||||
},
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"ecrecover",
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
"accounts": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "ecrecover",
|
||||
"linear": {
|
||||
"base": 3000,
|
||||
"word": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"sha256",
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "sha256",
|
||||
"linear": {
|
||||
"base": 60,
|
||||
"word": 12
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"ripemd160",
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "ripemd160",
|
||||
"linear": {
|
||||
"base": 600,
|
||||
"word": 120
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"identity",
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "identity",
|
||||
"linear": {
|
||||
"base": 15,
|
||||
"word": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"modexp",
|
||||
"startingBlock":"0x7530"
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "modexp",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_G1_add",
|
||||
"startingBlock":"0x7530",
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
}
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_G1_add",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_G1_mul",
|
||||
"startingBlock":"0x7530",
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
}
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_G1_mul",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_pairing_product",
|
||||
"startingBlock":"0x7530"
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_pairing_product",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "blake2_compression",
|
||||
"startingBlock": "0xc350"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
33
cmd/puppeth/testdata/stureby_geth.json
vendored
33
cmd/puppeth/testdata/stureby_geth.json
vendored
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"config": {
|
||||
"ethash":{},
|
||||
"chainId": 314158,
|
||||
"homesteadBlock": 10000,
|
||||
"eip150Block": 15000,
|
||||
@@ -8,11 +7,13 @@
|
||||
"eip155Block": 23000,
|
||||
"eip158Block": 23000,
|
||||
"byzantiumBlock": 30000,
|
||||
"constantinopleBlock": 40000
|
||||
"constantinopleBlock": 40000,
|
||||
"petersburgBlock": 40000,
|
||||
"istanbulBlock": 50000,
|
||||
"ethash": {}
|
||||
},
|
||||
"nonce": "0x0",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760",
|
||||
"difficulty": "0x20000",
|
||||
@@ -20,28 +21,34 @@
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
"alloc": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"number": "0x0",
|
||||
"gasUsed": "0x0",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
316
cmd/puppeth/testdata/stureby_parity.json
vendored
316
cmd/puppeth/testdata/stureby_parity.json
vendored
@@ -1,181 +1,213 @@
|
||||
{
|
||||
"name":"Stureby",
|
||||
"dataDir":"stureby",
|
||||
"engine":{
|
||||
"Ethash":{
|
||||
"params":{
|
||||
"minimumDifficulty":"0x20000",
|
||||
"difficultyBoundDivisor":"0x800",
|
||||
"durationLimit":"0xd",
|
||||
"blockReward":{
|
||||
"0x0":"0x4563918244f40000",
|
||||
"0x7530":"0x29a2241af62c0000",
|
||||
"0x9c40":"0x1bc16d674ec80000"
|
||||
"name": "stureby",
|
||||
"dataDir": "stureby",
|
||||
"engine": {
|
||||
"Ethash": {
|
||||
"params": {
|
||||
"minimumDifficulty": "0x20000",
|
||||
"difficultyBoundDivisor": "0x800",
|
||||
"durationLimit": "0xd",
|
||||
"blockReward": {
|
||||
"0x0": "0x4563918244f40000",
|
||||
"0x7530": "0x29a2241af62c0000",
|
||||
"0x9c40": "0x1bc16d674ec80000"
|
||||
},
|
||||
"homesteadTransition":"0x2710",
|
||||
"eip100bTransition":"0x7530",
|
||||
"difficultyBombDelays":{
|
||||
"0x7530":"0x2dc6c0",
|
||||
"0x9c40":"0x1e8480"
|
||||
}
|
||||
"difficultyBombDelays": {
|
||||
"0x7530": "0x2dc6c0",
|
||||
"0x9c40": "0x1e8480"
|
||||
},
|
||||
"homesteadTransition": "0x2710",
|
||||
"eip100bTransition": "0x7530"
|
||||
}
|
||||
}
|
||||
},
|
||||
"params":{
|
||||
"accountStartNonce":"0x0",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"gasLimitBoundDivisor":"0x400",
|
||||
"minGasLimit":"0x1388",
|
||||
"networkID":"0x4cb2e",
|
||||
"chainID":"0x4cb2e",
|
||||
"maxCodeSize":"0x6000",
|
||||
"maxCodeSizeTransition":"0x0",
|
||||
"params": {
|
||||
"accountStartNonce": "0x0",
|
||||
"maximumExtraDataSize": "0x20",
|
||||
"minGasLimit": "0x1388",
|
||||
"gasLimitBoundDivisor": "0x400",
|
||||
"networkID": "0x4cb2e",
|
||||
"chainID": "0x4cb2e",
|
||||
"maxCodeSize": "0x6000",
|
||||
"maxCodeSizeTransition": "0x0",
|
||||
"eip98Transition": "0x7fffffffffffffff",
|
||||
"eip150Transition":"0x3a98",
|
||||
"eip160Transition":"0x59d8",
|
||||
"eip161abcTransition":"0x59d8",
|
||||
"eip161dTransition":"0x59d8",
|
||||
"eip155Transition":"0x59d8",
|
||||
"eip140Transition":"0x7530",
|
||||
"eip211Transition":"0x7530",
|
||||
"eip214Transition":"0x7530",
|
||||
"eip658Transition":"0x7530",
|
||||
"eip145Transition":"0x9c40",
|
||||
"eip1014Transition":"0x9c40",
|
||||
"eip1052Transition":"0x9c40",
|
||||
"eip1283Transition":"0x9c40"
|
||||
"eip150Transition": "0x3a98",
|
||||
"eip160Transition": "0x59d8",
|
||||
"eip161abcTransition": "0x59d8",
|
||||
"eip161dTransition": "0x59d8",
|
||||
"eip155Transition": "0x59d8",
|
||||
"eip140Transition": "0x7530",
|
||||
"eip211Transition": "0x7530",
|
||||
"eip214Transition": "0x7530",
|
||||
"eip658Transition": "0x7530",
|
||||
"eip145Transition": "0x9c40",
|
||||
"eip1014Transition": "0x9c40",
|
||||
"eip1052Transition": "0x9c40",
|
||||
"eip1283Transition": "0x9c40",
|
||||
"eip1283DisableTransition": "0x9c40",
|
||||
"eip1283ReenableTransition": "0xc350",
|
||||
"eip1344Transition": "0xc350",
|
||||
"eip1884Transition": "0xc350",
|
||||
"eip2028Transition": "0xc350"
|
||||
},
|
||||
"genesis":{
|
||||
"seal":{
|
||||
"ethereum":{
|
||||
"nonce":"0x0000000000000000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
"genesis": {
|
||||
"seal": {
|
||||
"ethereum": {
|
||||
"nonce": "0x0000000000000000",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
},
|
||||
"difficulty":"0x20000",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x59a4e76d",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit":"0x47b760"
|
||||
"difficulty": "0x20000",
|
||||
"author": "0x0000000000000000000000000000000000000000",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760"
|
||||
},
|
||||
"nodes":[
|
||||
"enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
|
||||
"enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
|
||||
"enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
|
||||
"enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
|
||||
"enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
|
||||
"enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
|
||||
"enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
|
||||
"enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
|
||||
],
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"ecrecover",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
"nodes": [],
|
||||
"accounts": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "ecrecover",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 3000,
|
||||
"word": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"sha256",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "sha256",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 60,
|
||||
"word": 12
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"ripemd160",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "ripemd160",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 600,
|
||||
"word": 120
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"identity",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "identity",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 15,
|
||||
"word": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"modexp",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"modexp":{
|
||||
"divisor":20
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "modexp",
|
||||
"pricing": {
|
||||
"modexp": {
|
||||
"divisor": 20
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_add",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_add",
|
||||
"pricing": {
|
||||
"0x0": {
|
||||
"price": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 500
|
||||
}
|
||||
}
|
||||
},
|
||||
"0xc350": {
|
||||
"price": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 150
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_mul",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_mul",
|
||||
"pricing": {
|
||||
"0x0": {
|
||||
"price": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 40000
|
||||
}
|
||||
}
|
||||
},
|
||||
"0xc350": {
|
||||
"price": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 6000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_pairing",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"alt_bn128_pairing":{
|
||||
"base":100000,
|
||||
"pair":80000
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_pairing",
|
||||
"pricing": {
|
||||
"0x0": {
|
||||
"price": {
|
||||
"alt_bn128_pairing": {
|
||||
"base": 100000,
|
||||
"pair": 80000
|
||||
}
|
||||
}
|
||||
},
|
||||
"0xc350": {
|
||||
"price": {
|
||||
"alt_bn128_pairing": {
|
||||
"base": 45000,
|
||||
"pair": 34000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "blake2_f",
|
||||
"pricing": {
|
||||
"blake2_f": {
|
||||
"gas_per_round": 1
|
||||
}
|
||||
},
|
||||
"activate_at": "0xc350"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -51,6 +51,7 @@ func (w *wizard) makeGenesis() {
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
},
|
||||
}
|
||||
// Figure out which consensus engine to choose
|
||||
@@ -230,6 +231,10 @@ func (w *wizard) manageGenesis() {
|
||||
fmt.Printf("Which block should Petersburg come into effect? (default = %v)\n", w.conf.Genesis.Config.PetersburgBlock)
|
||||
w.conf.Genesis.Config.PetersburgBlock = w.readDefaultBigInt(w.conf.Genesis.Config.PetersburgBlock)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Istanbul come into effect? (default = %v)\n", w.conf.Genesis.Config.IstanbulBlock)
|
||||
w.conf.Genesis.Config.IstanbulBlock = w.readDefaultBigInt(w.conf.Genesis.Config.IstanbulBlock)
|
||||
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
|
||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||
|
||||
@@ -268,7 +273,7 @@ func (w *wizard) manageGenesis() {
|
||||
} else {
|
||||
saveGenesis(folder, w.network, "parity", spec)
|
||||
}
|
||||
// Export the genesis spec used by Harmony (formerly EthereumJ
|
||||
// Export the genesis spec used by Harmony (formerly EthereumJ)
|
||||
saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
|
||||
|
||||
case "3":
|
||||
@@ -291,7 +296,7 @@ func (w *wizard) manageGenesis() {
|
||||
func saveGenesis(folder, network, client string, spec interface{}) {
|
||||
path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
|
||||
|
||||
out, _ := json.Marshal(spec)
|
||||
out, _ := json.MarshalIndent(spec, "", " ")
|
||||
if err := ioutil.WriteFile(path, out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "client", client, "err", err)
|
||||
return
|
||||
|
@@ -185,28 +185,6 @@ func GlobalBig(ctx *cli.Context, name string) *big.Int {
|
||||
return (*big.Int)(val.(*bigValue))
|
||||
}
|
||||
|
||||
func prefixFor(name string) (prefix string) {
|
||||
if len(name) == 1 {
|
||||
prefix = "-"
|
||||
} else {
|
||||
prefix = "--"
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func prefixedNames(fullName string) (prefixed string) {
|
||||
parts := strings.Split(fullName, ",")
|
||||
for i, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
prefixed += prefixFor(name) + name
|
||||
if i < len(parts)-1 {
|
||||
prefixed += ", "
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Expands a file path
|
||||
// 1. replace tilde with users home dir
|
||||
// 2. expands embedded environment variables
|
||||
|
@@ -42,7 +42,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
@@ -78,6 +77,17 @@ SUBCOMMANDS:
|
||||
{{range $categorized.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}{{end}}`
|
||||
|
||||
OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -227,6 +237,10 @@ var (
|
||||
Name: "override.istanbul",
|
||||
Usage: "Manually specify Istanbul fork-block, overriding the bundled setting",
|
||||
}
|
||||
OverrideMuirGlacierFlag = cli.Uint64Flag{
|
||||
Name: "override.muirglacier",
|
||||
Usage: "Manually specify Muir Glacier fork-block, overriding the bundled setting",
|
||||
}
|
||||
// Light server and client settings
|
||||
LightLegacyServFlag = cli.IntFlag{ // Deprecated in favor of light.serve, remove in 2021
|
||||
Name: "lightserv",
|
||||
@@ -272,26 +286,6 @@ var (
|
||||
Name: "ulc.onlyannounce",
|
||||
Usage: "Ultra light server sends announcements only",
|
||||
}
|
||||
// Dashboard settings
|
||||
DashboardEnabledFlag = cli.BoolFlag{
|
||||
Name: "dashboard",
|
||||
Usage: "Enable the dashboard",
|
||||
}
|
||||
DashboardAddrFlag = cli.StringFlag{
|
||||
Name: "dashboard.addr",
|
||||
Usage: "Dashboard listening interface",
|
||||
Value: dashboard.DefaultConfig.Host,
|
||||
}
|
||||
DashboardPortFlag = cli.IntFlag{
|
||||
Name: "dashboard.host",
|
||||
Usage: "Dashboard listening port",
|
||||
Value: dashboard.DefaultConfig.Port,
|
||||
}
|
||||
DashboardRefreshFlag = cli.DurationFlag{
|
||||
Name: "dashboard.refresh",
|
||||
Usage: "Dashboard metrics collection refresh rate",
|
||||
Value: dashboard.DefaultConfig.Refresh,
|
||||
}
|
||||
// Ethash settings
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
@@ -664,6 +658,10 @@ var (
|
||||
Name: "netrestrict",
|
||||
Usage: "Restricts network communication to the given IP networks (CIDR masks)",
|
||||
}
|
||||
DNSDiscoveryFlag = cli.StringFlag{
|
||||
Name: "discovery.dns",
|
||||
Usage: "Sets DNS discovery entry points (use \"\" to disable DNS)",
|
||||
}
|
||||
|
||||
// ATM the url is left to the user and deployment to
|
||||
JSpathFlag = cli.StringFlag{
|
||||
@@ -817,9 +815,9 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV4Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), ",")
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesV4Flag.Name))
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||
}
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
urls = params.TestnetBootnodes
|
||||
@@ -851,9 +849,9 @@ func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV5Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV5Flag.Name), ",")
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesV5Flag.Name))
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||
}
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyBootnodes
|
||||
@@ -1453,9 +1451,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
|
||||
cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
|
||||
|
||||
if ctx.GlobalIsSet(GCModeFlag.Name) {
|
||||
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheNoPrefetchFlag.Name) {
|
||||
cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
@@ -1480,6 +1481,14 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(RPCGlobalGasCap.Name) {
|
||||
cfg.RPCGasCap = new(big.Int).SetUint64(ctx.GlobalUint64(RPCGlobalGasCap.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(DNSDiscoveryFlag.Name) {
|
||||
urls := ctx.GlobalString(DNSDiscoveryFlag.Name)
|
||||
if urls == "" {
|
||||
cfg.DiscoveryURLs = []string{}
|
||||
} else {
|
||||
cfg.DiscoveryURLs = splitAndTrim(urls)
|
||||
}
|
||||
}
|
||||
|
||||
// Override any default configs for hard coded networks.
|
||||
switch {
|
||||
@@ -1488,16 +1497,19 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.NetworkId = 3
|
||||
}
|
||||
cfg.Genesis = core.DefaultTestnetGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.TestnetGenesisHash])
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 4
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.RinkebyGenesisHash])
|
||||
case ctx.GlobalBool(GoerliFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 5
|
||||
}
|
||||
cfg.Genesis = core.DefaultGoerliGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.GoerliGenesisHash])
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 1337
|
||||
@@ -1524,14 +1536,20 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
|
||||
cfg.Miner.GasPrice = big.NewInt(1)
|
||||
}
|
||||
default:
|
||||
if cfg.NetworkId == 1 {
|
||||
setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.MainnetGenesisHash])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetDashboardConfig applies dashboard related command line flags to the config.
|
||||
func SetDashboardConfig(ctx *cli.Context, cfg *dashboard.Config) {
|
||||
cfg.Host = ctx.GlobalString(DashboardAddrFlag.Name)
|
||||
cfg.Port = ctx.GlobalInt(DashboardPortFlag.Name)
|
||||
cfg.Refresh = ctx.GlobalDuration(DashboardRefreshFlag.Name)
|
||||
// setDNSDiscoveryDefaults configures DNS discovery with the given URL if
|
||||
// no URLs are set.
|
||||
func setDNSDiscoveryDefaults(cfg *eth.Config, url string) {
|
||||
if cfg.DiscoveryURLs != nil {
|
||||
return
|
||||
}
|
||||
cfg.DiscoveryURLs = []string{url}
|
||||
}
|
||||
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
@@ -1556,13 +1574,6 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterDashboardService adds a dashboard to the stack.
|
||||
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) {
|
||||
stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return dashboard.New(cfg, commit, ctx.ResolvePath("logs")), nil
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterShhService configures Whisper and adds it to the given node.
|
||||
func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
|
||||
|
@@ -37,6 +37,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -165,7 +166,7 @@ func echo() {
|
||||
fmt.Printf("pow = %f \n", *argPoW)
|
||||
fmt.Printf("mspow = %f \n", *argServerPoW)
|
||||
fmt.Printf("ip = %s \n", *argIP)
|
||||
fmt.Printf("pub = %s \n", common.ToHex(crypto.FromECDSAPub(pub)))
|
||||
fmt.Printf("pub = %s \n", hexutil.Encode(crypto.FromECDSAPub(pub)))
|
||||
fmt.Printf("idfile = %s \n", *argIDFile)
|
||||
fmt.Printf("dbpath = %s \n", *argDBPath)
|
||||
fmt.Printf("boot = %s \n", *argEnode)
|
||||
@@ -298,7 +299,7 @@ func startServer() error {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("my public key: %s \n", common.ToHex(crypto.FromECDSAPub(&asymKey.PublicKey)))
|
||||
fmt.Printf("my public key: %s \n", hexutil.Encode(crypto.FromECDSAPub(&asymKey.PublicKey)))
|
||||
fmt.Println(server.NodeInfo().Enode)
|
||||
|
||||
if *bootstrapMode {
|
||||
|
@@ -19,41 +19,43 @@ package common
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
checker "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type BytesSuite struct{}
|
||||
func TestCopyBytes(t *testing.T) {
|
||||
input := []byte{1, 2, 3, 4}
|
||||
|
||||
var _ = checker.Suite(&BytesSuite{})
|
||||
|
||||
func (s *BytesSuite) TestCopyBytes(c *checker.C) {
|
||||
data1 := []byte{1, 2, 3, 4}
|
||||
exp1 := []byte{1, 2, 3, 4}
|
||||
res1 := CopyBytes(data1)
|
||||
c.Assert(res1, checker.DeepEquals, exp1)
|
||||
v := CopyBytes(input)
|
||||
if !bytes.Equal(v, []byte{1, 2, 3, 4}) {
|
||||
t.Fatal("not equal after copy")
|
||||
}
|
||||
v[0] = 99
|
||||
if bytes.Equal(v, input) {
|
||||
t.Fatal("result is not a copy")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BytesSuite) TestLeftPadBytes(c *checker.C) {
|
||||
val1 := []byte{1, 2, 3, 4}
|
||||
exp1 := []byte{0, 0, 0, 0, 1, 2, 3, 4}
|
||||
|
||||
res1 := LeftPadBytes(val1, 8)
|
||||
res2 := LeftPadBytes(val1, 2)
|
||||
|
||||
c.Assert(res1, checker.DeepEquals, exp1)
|
||||
c.Assert(res2, checker.DeepEquals, val1)
|
||||
}
|
||||
|
||||
func (s *BytesSuite) TestRightPadBytes(c *checker.C) {
|
||||
func TestLeftPadBytes(t *testing.T) {
|
||||
val := []byte{1, 2, 3, 4}
|
||||
exp := []byte{1, 2, 3, 4, 0, 0, 0, 0}
|
||||
padded := []byte{0, 0, 0, 0, 1, 2, 3, 4}
|
||||
|
||||
resstd := RightPadBytes(val, 8)
|
||||
resshrt := RightPadBytes(val, 2)
|
||||
if r := LeftPadBytes(val, 8); !bytes.Equal(r, padded) {
|
||||
t.Fatalf("LeftPadBytes(%v, 8) == %v", val, r)
|
||||
}
|
||||
if r := LeftPadBytes(val, 2); !bytes.Equal(r, val) {
|
||||
t.Fatalf("LeftPadBytes(%v, 2) == %v", val, r)
|
||||
}
|
||||
}
|
||||
|
||||
c.Assert(resstd, checker.DeepEquals, exp)
|
||||
c.Assert(resshrt, checker.DeepEquals, val)
|
||||
func TestRightPadBytes(t *testing.T) {
|
||||
val := []byte{1, 2, 3, 4}
|
||||
padded := []byte{1, 2, 3, 4, 0, 0, 0, 0}
|
||||
|
||||
if r := RightPadBytes(val, 8); !bytes.Equal(r, padded) {
|
||||
t.Fatalf("RightPadBytes(%v, 8) == %v", val, r)
|
||||
}
|
||||
if r := RightPadBytes(val, 2); !bytes.Equal(r, val) {
|
||||
t.Fatalf("RightPadBytes(%v, 2) == %v", val, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromHex(t *testing.T) {
|
||||
|
@@ -86,7 +86,7 @@ func (b *Bytes) UnmarshalGraphQL(input interface{}) error {
|
||||
}
|
||||
*b = data
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Bytes: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for Bytes", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func (b *Big) UnmarshalGraphQL(input interface{}) error {
|
||||
num.SetInt64(int64(input))
|
||||
*b = Big(num)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for BigInt: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for BigInt", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -284,7 +284,7 @@ func (b *Uint64) UnmarshalGraphQL(input interface{}) error {
|
||||
case int32:
|
||||
*b = Uint64(input)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Long: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for Long", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -31,44 +31,93 @@ func Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Add returns t + d.
|
||||
// Add returns t + d as absolute time.
|
||||
func (t AbsTime) Add(d time.Duration) AbsTime {
|
||||
return t + AbsTime(d)
|
||||
}
|
||||
|
||||
// Sub returns t - t2 as a duration.
|
||||
func (t AbsTime) Sub(t2 AbsTime) time.Duration {
|
||||
return time.Duration(t - t2)
|
||||
}
|
||||
|
||||
// The Clock interface makes it possible to replace the monotonic system clock with
|
||||
// a simulated clock.
|
||||
type Clock interface {
|
||||
Now() AbsTime
|
||||
Sleep(time.Duration)
|
||||
After(time.Duration) <-chan time.Time
|
||||
NewTimer(time.Duration) ChanTimer
|
||||
After(time.Duration) <-chan AbsTime
|
||||
AfterFunc(d time.Duration, f func()) Timer
|
||||
}
|
||||
|
||||
// Timer represents a cancellable event returned by AfterFunc
|
||||
// Timer is a cancellable event created by AfterFunc.
|
||||
type Timer interface {
|
||||
// Stop cancels the timer. It returns false if the timer has already
|
||||
// expired or been stopped.
|
||||
Stop() bool
|
||||
}
|
||||
|
||||
// ChanTimer is a cancellable event created by NewTimer.
|
||||
type ChanTimer interface {
|
||||
Timer
|
||||
|
||||
// The channel returned by C receives a value when the timer expires.
|
||||
C() <-chan AbsTime
|
||||
// Reset reschedules the timer with a new timeout.
|
||||
// It should be invoked only on stopped or expired timers with drained channels.
|
||||
Reset(time.Duration)
|
||||
}
|
||||
|
||||
// System implements Clock using the system clock.
|
||||
type System struct{}
|
||||
|
||||
// Now returns the current monotonic time.
|
||||
func (System) Now() AbsTime {
|
||||
func (c System) Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Sleep blocks for the given duration.
|
||||
func (System) Sleep(d time.Duration) {
|
||||
func (c System) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// NewTimer creates a timer which can be rescheduled.
|
||||
func (c System) NewTimer(d time.Duration) ChanTimer {
|
||||
ch := make(chan AbsTime, 1)
|
||||
t := time.AfterFunc(d, func() {
|
||||
// This send is non-blocking because that's how time.Timer
|
||||
// behaves. It doesn't matter in the happy case, but does
|
||||
// when Reset is misused.
|
||||
select {
|
||||
case ch <- c.Now():
|
||||
default:
|
||||
}
|
||||
})
|
||||
return &systemTimer{t, ch}
|
||||
}
|
||||
|
||||
// After returns a channel which receives the current time after d has elapsed.
|
||||
func (System) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
func (c System) After(d time.Duration) <-chan AbsTime {
|
||||
ch := make(chan AbsTime, 1)
|
||||
time.AfterFunc(d, func() { ch <- c.Now() })
|
||||
return ch
|
||||
}
|
||||
|
||||
// AfterFunc runs f on a new goroutine after the duration has elapsed.
|
||||
func (System) AfterFunc(d time.Duration, f func()) Timer {
|
||||
func (c System) AfterFunc(d time.Duration, f func()) Timer {
|
||||
return time.AfterFunc(d, f)
|
||||
}
|
||||
|
||||
type systemTimer struct {
|
||||
*time.Timer
|
||||
ch <-chan AbsTime
|
||||
}
|
||||
|
||||
func (st *systemTimer) Reset(d time.Duration) {
|
||||
st.Timer.Reset(d)
|
||||
}
|
||||
|
||||
func (st *systemTimer) C() <-chan AbsTime {
|
||||
return st.ch
|
||||
}
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package mclock
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -32,18 +33,24 @@ import (
|
||||
// the timeout using a channel or semaphore.
|
||||
type Simulated struct {
|
||||
now AbsTime
|
||||
scheduled []*simTimer
|
||||
scheduled simTimerHeap
|
||||
mu sync.RWMutex
|
||||
cond *sync.Cond
|
||||
lastId uint64
|
||||
}
|
||||
|
||||
// simTimer implements Timer on the virtual clock.
|
||||
// simTimer implements ChanTimer on the virtual clock.
|
||||
type simTimer struct {
|
||||
do func()
|
||||
at AbsTime
|
||||
id uint64
|
||||
s *Simulated
|
||||
at AbsTime
|
||||
index int // position in s.scheduled
|
||||
s *Simulated
|
||||
do func()
|
||||
ch <-chan AbsTime
|
||||
}
|
||||
|
||||
func (s *Simulated) init() {
|
||||
if s.cond == nil {
|
||||
s.cond = sync.NewCond(&s.mu)
|
||||
}
|
||||
}
|
||||
|
||||
// Run moves the clock by the given duration, executing all timers before that duration.
|
||||
@@ -53,14 +60,9 @@ func (s *Simulated) Run(d time.Duration) {
|
||||
|
||||
end := s.now + AbsTime(d)
|
||||
var do []func()
|
||||
for len(s.scheduled) > 0 {
|
||||
ev := s.scheduled[0]
|
||||
if ev.at > end {
|
||||
break
|
||||
}
|
||||
s.now = ev.at
|
||||
for len(s.scheduled) > 0 && s.scheduled[0].at <= end {
|
||||
ev := heap.Pop(&s.scheduled).(*simTimer)
|
||||
do = append(do, ev.do)
|
||||
s.scheduled = s.scheduled[1:]
|
||||
}
|
||||
s.now = end
|
||||
s.mu.Unlock()
|
||||
@@ -102,14 +104,22 @@ func (s *Simulated) Sleep(d time.Duration) {
|
||||
<-s.After(d)
|
||||
}
|
||||
|
||||
// NewTimer creates a timer which fires when the clock has advanced by d.
|
||||
func (s *Simulated) NewTimer(d time.Duration) ChanTimer {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
ch := make(chan AbsTime, 1)
|
||||
var timer *simTimer
|
||||
timer = s.schedule(d, func() { ch <- timer.at })
|
||||
timer.ch = ch
|
||||
return timer
|
||||
}
|
||||
|
||||
// After returns a channel which receives the current time after the clock
|
||||
// has advanced by d.
|
||||
func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
after := make(chan time.Time, 1)
|
||||
s.AfterFunc(d, func() {
|
||||
after <- (time.Time{}).Add(time.Duration(s.now))
|
||||
})
|
||||
return after
|
||||
func (s *Simulated) After(d time.Duration) <-chan AbsTime {
|
||||
return s.NewTimer(d).C()
|
||||
}
|
||||
|
||||
// AfterFunc runs fn after the clock has advanced by d. Unlike with the system
|
||||
@@ -117,46 +127,83 @@ func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
func (s *Simulated) AfterFunc(d time.Duration, fn func()) Timer {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.schedule(d, fn)
|
||||
}
|
||||
|
||||
func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer {
|
||||
s.init()
|
||||
|
||||
at := s.now + AbsTime(d)
|
||||
s.lastId++
|
||||
id := s.lastId
|
||||
l, h := 0, len(s.scheduled)
|
||||
ll := h
|
||||
for l != h {
|
||||
m := (l + h) / 2
|
||||
if (at < s.scheduled[m].at) || ((at == s.scheduled[m].at) && (id < s.scheduled[m].id)) {
|
||||
h = m
|
||||
} else {
|
||||
l = m + 1
|
||||
}
|
||||
}
|
||||
ev := &simTimer{do: fn, at: at, s: s}
|
||||
s.scheduled = append(s.scheduled, nil)
|
||||
copy(s.scheduled[l+1:], s.scheduled[l:ll])
|
||||
s.scheduled[l] = ev
|
||||
heap.Push(&s.scheduled, ev)
|
||||
s.cond.Broadcast()
|
||||
return ev
|
||||
}
|
||||
|
||||
func (ev *simTimer) Stop() bool {
|
||||
s := ev.s
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
ev.s.mu.Lock()
|
||||
defer ev.s.mu.Unlock()
|
||||
|
||||
for i := 0; i < len(s.scheduled); i++ {
|
||||
if s.scheduled[i] == ev {
|
||||
s.scheduled = append(s.scheduled[:i], s.scheduled[i+1:]...)
|
||||
s.cond.Broadcast()
|
||||
return true
|
||||
}
|
||||
if ev.index < 0 {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
heap.Remove(&ev.s.scheduled, ev.index)
|
||||
ev.s.cond.Broadcast()
|
||||
ev.index = -1
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Simulated) init() {
|
||||
if s.cond == nil {
|
||||
s.cond = sync.NewCond(&s.mu)
|
||||
func (ev *simTimer) Reset(d time.Duration) {
|
||||
if ev.ch == nil {
|
||||
panic("mclock: Reset() on timer created by AfterFunc")
|
||||
}
|
||||
|
||||
ev.s.mu.Lock()
|
||||
defer ev.s.mu.Unlock()
|
||||
ev.at = ev.s.now.Add(d)
|
||||
if ev.index < 0 {
|
||||
heap.Push(&ev.s.scheduled, ev) // already expired
|
||||
} else {
|
||||
heap.Fix(&ev.s.scheduled, ev.index) // hasn't fired yet, reschedule
|
||||
}
|
||||
ev.s.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (ev *simTimer) C() <-chan AbsTime {
|
||||
if ev.ch == nil {
|
||||
panic("mclock: C() on timer created by AfterFunc")
|
||||
}
|
||||
return ev.ch
|
||||
}
|
||||
|
||||
type simTimerHeap []*simTimer
|
||||
|
||||
func (h *simTimerHeap) Len() int {
|
||||
return len(*h)
|
||||
}
|
||||
|
||||
func (h *simTimerHeap) Less(i, j int) bool {
|
||||
return (*h)[i].at < (*h)[j].at
|
||||
}
|
||||
|
||||
func (h *simTimerHeap) Swap(i, j int) {
|
||||
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
|
||||
(*h)[i].index = i
|
||||
(*h)[j].index = j
|
||||
}
|
||||
|
||||
func (h *simTimerHeap) Push(x interface{}) {
|
||||
t := x.(*simTimer)
|
||||
t.index = len(*h)
|
||||
*h = append(*h, t)
|
||||
}
|
||||
|
||||
func (h *simTimerHeap) Pop() interface{} {
|
||||
end := len(*h) - 1
|
||||
t := (*h)[end]
|
||||
t.index = -1
|
||||
(*h)[end] = nil
|
||||
*h = (*h)[:end]
|
||||
return t
|
||||
}
|
||||
|
@@ -25,14 +25,16 @@ var _ Clock = System{}
|
||||
var _ Clock = new(Simulated)
|
||||
|
||||
func TestSimulatedAfter(t *testing.T) {
|
||||
const timeout = 30 * time.Minute
|
||||
const adv = time.Minute
|
||||
|
||||
var (
|
||||
c Simulated
|
||||
end = c.Now().Add(timeout)
|
||||
ch = c.After(timeout)
|
||||
timeout = 30 * time.Minute
|
||||
offset = 99 * time.Hour
|
||||
adv = 11 * time.Minute
|
||||
c Simulated
|
||||
)
|
||||
c.Run(offset)
|
||||
|
||||
end := c.Now().Add(timeout)
|
||||
ch := c.After(timeout)
|
||||
for c.Now() < end.Add(-adv) {
|
||||
c.Run(adv)
|
||||
select {
|
||||
@@ -45,8 +47,8 @@ func TestSimulatedAfter(t *testing.T) {
|
||||
c.Run(adv)
|
||||
select {
|
||||
case stamp := <-ch:
|
||||
want := time.Time{}.Add(timeout)
|
||||
if !stamp.Equal(want) {
|
||||
want := AbsTime(0).Add(offset).Add(timeout)
|
||||
if stamp != want {
|
||||
t.Errorf("Wrong time sent on timer channel: got %v, want %v", stamp, want)
|
||||
}
|
||||
default:
|
||||
@@ -94,7 +96,7 @@ func TestSimulatedSleep(t *testing.T) {
|
||||
var (
|
||||
c Simulated
|
||||
timeout = 1 * time.Hour
|
||||
done = make(chan AbsTime)
|
||||
done = make(chan AbsTime, 1)
|
||||
)
|
||||
go func() {
|
||||
c.Sleep(timeout)
|
||||
@@ -113,3 +115,48 @@ func TestSimulatedSleep(t *testing.T) {
|
||||
t.Fatal("Sleep didn't return in time")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedTimerReset(t *testing.T) {
|
||||
var (
|
||||
c Simulated
|
||||
timeout = 1 * time.Hour
|
||||
)
|
||||
timer := c.NewTimer(timeout)
|
||||
c.Run(2 * timeout)
|
||||
select {
|
||||
case ftime := <-timer.C():
|
||||
if ftime != AbsTime(timeout) {
|
||||
t.Fatalf("wrong time %v sent on timer channel, want %v", ftime, AbsTime(timeout))
|
||||
}
|
||||
default:
|
||||
t.Fatal("timer didn't fire")
|
||||
}
|
||||
|
||||
timer.Reset(timeout)
|
||||
c.Run(2 * timeout)
|
||||
select {
|
||||
case ftime := <-timer.C():
|
||||
if ftime != AbsTime(3*timeout) {
|
||||
t.Fatalf("wrong time %v sent on timer channel, want %v", ftime, AbsTime(3*timeout))
|
||||
}
|
||||
default:
|
||||
t.Fatal("timer didn't fire again")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedTimerStop(t *testing.T) {
|
||||
var (
|
||||
c Simulated
|
||||
timeout = 1 * time.Hour
|
||||
)
|
||||
timer := c.NewTimer(timeout)
|
||||
c.Run(2 * timeout)
|
||||
if timer.Stop() {
|
||||
t.Errorf("Stop returned true for fired timer")
|
||||
}
|
||||
select {
|
||||
case <-timer.C():
|
||||
default:
|
||||
t.Fatal("timer didn't fire")
|
||||
}
|
||||
}
|
||||
|
@@ -20,6 +20,7 @@ import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
@@ -142,7 +143,7 @@ func (h Hash) Value() (driver.Value, error) {
|
||||
}
|
||||
|
||||
// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
|
||||
func (_ Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" }
|
||||
func (Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" }
|
||||
|
||||
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
|
||||
func (h *Hash) UnmarshalGraphQL(input interface{}) error {
|
||||
@@ -151,7 +152,7 @@ func (h *Hash) UnmarshalGraphQL(input interface{}) error {
|
||||
case string:
|
||||
err = h.UnmarshalText([]byte(input))
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Bytes32: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for Hash", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -290,7 +291,7 @@ func (a *Address) UnmarshalGraphQL(input interface{}) error {
|
||||
case string:
|
||||
err = a.UnmarshalText([]byte(input))
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Address: %v", input)
|
||||
err = fmt.Errorf("unexpected type %T for Address", input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -323,7 +324,7 @@ func NewMixedcaseAddress(addr Address) MixedcaseAddress {
|
||||
// NewMixedcaseAddressFromString is mainly meant for unit-testing
|
||||
func NewMixedcaseAddressFromString(hexaddr string) (*MixedcaseAddress, error) {
|
||||
if !IsHexAddress(hexaddr) {
|
||||
return nil, fmt.Errorf("Invalid address")
|
||||
return nil, errors.New("invalid address")
|
||||
}
|
||||
a := FromHex(hexaddr)
|
||||
return &MixedcaseAddress{addr: BytesToAddress(a), original: hexaddr}, nil
|
||||
|
@@ -17,6 +17,8 @@
|
||||
package clique
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@@ -117,3 +119,59 @@ func (api *API) Discard(address common.Address) {
|
||||
|
||||
delete(api.clique.proposals, address)
|
||||
}
|
||||
|
||||
type status struct {
|
||||
InturnPercent float64 `json:"inturnPercent"`
|
||||
SigningStatus map[common.Address]int `json:"sealerActivity"`
|
||||
NumBlocks uint64 `json:"numBlocks"`
|
||||
}
|
||||
|
||||
// Status returns the status of the last N blocks,
|
||||
// - the number of active signers,
|
||||
// - the number of signers,
|
||||
// - the percentage of in-turn blocks
|
||||
func (api *API) Status() (*status, error) {
|
||||
var (
|
||||
numBlocks = uint64(64)
|
||||
header = api.chain.CurrentHeader()
|
||||
diff = uint64(0)
|
||||
optimals = 0
|
||||
)
|
||||
snap, err := api.clique.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
signers = snap.signers()
|
||||
end = header.Number.Uint64()
|
||||
start = end - numBlocks
|
||||
)
|
||||
if numBlocks > end {
|
||||
start = 1
|
||||
numBlocks = end - start
|
||||
}
|
||||
signStatus := make(map[common.Address]int)
|
||||
for _, s := range signers {
|
||||
signStatus[s] = 0
|
||||
}
|
||||
for n := start; n < end; n++ {
|
||||
h := api.chain.GetHeaderByNumber(n)
|
||||
if h == nil {
|
||||
return nil, fmt.Errorf("missing block %d", n)
|
||||
}
|
||||
if h.Difficulty.Cmp(diffInTurn) == 0 {
|
||||
optimals++
|
||||
}
|
||||
diff += h.Difficulty.Uint64()
|
||||
sealer, err := api.clique.Author(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signStatus[sealer]++
|
||||
}
|
||||
return &status{
|
||||
InturnPercent: float64((100 * optimals)) / float64(numBlocks),
|
||||
SigningStatus: signStatus,
|
||||
NumBlocks: numBlocks,
|
||||
}, nil
|
||||
}
|
||||
|
@@ -311,7 +311,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
|
||||
if number == 0 {
|
||||
return nil
|
||||
}
|
||||
// Ensure that the block's timestamp isn't too close to it's parent
|
||||
// Ensure that the block's timestamp isn't too close to its parent
|
||||
var parent *types.Header
|
||||
if len(parents) > 0 {
|
||||
parent = parents[len(parents)-1]
|
||||
@@ -522,7 +522,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
// Set the correct difficulty
|
||||
header.Difficulty = CalcDifficulty(snap, c.signer)
|
||||
|
||||
// Ensure the extra data has all it's components
|
||||
// Ensure the extra data has all its components
|
||||
if len(header.Extra) < extraVanity {
|
||||
header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...)
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ var (
|
||||
// to the current node.
|
||||
ErrFutureBlock = errors.New("block in the future")
|
||||
|
||||
// ErrInvalidNumber is returned if a block's number doesn't equal it's parent's
|
||||
// ErrInvalidNumber is returned if a block's number doesn't equal its parent's
|
||||
// plus one.
|
||||
ErrInvalidNumber = errors.New("invalid block number")
|
||||
)
|
||||
|
@@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||
|
||||
go func(idx int) {
|
||||
defer pend.Done()
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil, false)
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal, nil}, nil, false)
|
||||
defer ethash.Close()
|
||||
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||
|
@@ -28,7 +28,7 @@ var errEthashStopped = errors.New("ethash stopped")
|
||||
|
||||
// API exposes ethash related methods for the RPC interface.
|
||||
type API struct {
|
||||
ethash *Ethash // Make sure the mode of ethash is normal.
|
||||
ethash *Ethash
|
||||
}
|
||||
|
||||
// GetWork returns a work package for external miner.
|
||||
@@ -39,7 +39,7 @@ type API struct {
|
||||
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3] - hex encoded block number
|
||||
func (api *API) GetWork() ([4]string, error) {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return [4]string{}, errors.New("not supported")
|
||||
}
|
||||
|
||||
@@ -47,13 +47,11 @@ func (api *API) GetWork() ([4]string, error) {
|
||||
workCh = make(chan [4]string, 1)
|
||||
errc = make(chan error, 1)
|
||||
)
|
||||
|
||||
select {
|
||||
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.exitCh:
|
||||
case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return [4]string{}, errEthashStopped
|
||||
}
|
||||
|
||||
select {
|
||||
case work := <-workCh:
|
||||
return work, nil
|
||||
@@ -66,23 +64,21 @@ func (api *API) GetWork() ([4]string, error) {
|
||||
// It returns an indication if the work was accepted.
|
||||
// Note either an invalid solution, a stale work a non-existent work will return false.
|
||||
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var errc = make(chan error, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitWorkCh <- &mineResult{
|
||||
case api.ethash.remote.submitWorkCh <- &mineResult{
|
||||
nonce: nonce,
|
||||
mixDigest: digest,
|
||||
hash: hash,
|
||||
errc: errc,
|
||||
}:
|
||||
case <-api.ethash.exitCh:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
err := <-errc
|
||||
return err == nil
|
||||
}
|
||||
@@ -94,21 +90,19 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo
|
||||
// It accepts the miner hash rate and an identifier which must be unique
|
||||
// between nodes.
|
||||
func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = make(chan struct{}, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.exitCh:
|
||||
case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
// Block until hash rate submitted successfully.
|
||||
<-done
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
@@ -44,6 +44,11 @@ var (
|
||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
||||
|
||||
// calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
|
||||
// It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
|
||||
// Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384
|
||||
calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000))
|
||||
|
||||
// calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople.
|
||||
// It returns the difficulty that a new block should have when created at time given the
|
||||
// parent block's time and difficulty. The calculation uses the Byzantium rules, but with
|
||||
@@ -63,7 +68,7 @@ var (
|
||||
// codebase, inherently breaking if the engine is swapped out. Please put common
|
||||
// error types into the consensus package.
|
||||
var (
|
||||
errZeroBlockTime = errors.New("timestamp equals parent's")
|
||||
errOlderBlockTime = errors.New("timestamp older than parent")
|
||||
errTooManyUncles = errors.New("too many uncles")
|
||||
errDuplicateUncle = errors.New("duplicate uncle")
|
||||
errUncleIsAncestor = errors.New("uncle is ancestor")
|
||||
@@ -86,7 +91,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
return nil
|
||||
}
|
||||
// Short circuit if the header is known, or it's parent not
|
||||
// Short circuit if the header is known, or its parent not
|
||||
number := header.Number.Uint64()
|
||||
if chain.GetHeader(header.Hash(), number) != nil {
|
||||
return nil
|
||||
@@ -250,9 +255,9 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
}
|
||||
}
|
||||
if header.Time <= parent.Time {
|
||||
return errZeroBlockTime
|
||||
return errOlderBlockTime
|
||||
}
|
||||
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||
// Verify the block's difficulty based on its timestamp and parent's difficulty
|
||||
expected := ethash.CalcDifficulty(chain, header.Time, parent)
|
||||
|
||||
if expected.Cmp(header.Difficulty) != 0 {
|
||||
@@ -311,6 +316,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p
|
||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||
next := new(big.Int).Add(parent.Number, big1)
|
||||
switch {
|
||||
case config.IsMuirGlacier(next):
|
||||
return calcDifficultyEip2384(time, parent)
|
||||
case config.IsConstantinople(next):
|
||||
return calcDifficultyConstantinople(time, parent)
|
||||
case config.IsByzantium(next):
|
||||
|
@@ -34,9 +34,7 @@ import (
|
||||
"unsafe"
|
||||
|
||||
mmap "github.com/edsrzf/mmap-go"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
@@ -50,7 +48,7 @@ var (
|
||||
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
|
||||
// sharedEthash is a full instance that can be shared between multiple users.
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false)
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal, nil}, nil, false)
|
||||
|
||||
// algorithmRevision is the data structure version used for file naming.
|
||||
algorithmRevision = 23
|
||||
@@ -403,36 +401,8 @@ type Config struct {
|
||||
DatasetsInMem int
|
||||
DatasetsOnDisk int
|
||||
PowMode Mode
|
||||
}
|
||||
|
||||
// sealTask wraps a seal block with relative result channel for remote sealer thread.
|
||||
type sealTask struct {
|
||||
block *types.Block
|
||||
results chan<- *types.Block
|
||||
}
|
||||
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [4]string
|
||||
Log log.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
@@ -448,52 +418,42 @@ type Ethash struct {
|
||||
threads int // Number of threads to mine on if mining
|
||||
update chan struct{} // Notification channel to update mining parameters
|
||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||
|
||||
// Remote sealer related fields
|
||||
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
remote *remoteSealer
|
||||
|
||||
// The fields below are hooks for testing
|
||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
exitCh chan chan error // Notification channel to exiting backend threads
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
}
|
||||
|
||||
// New creates a full sized ethash PoW scheme and starts a background thread for
|
||||
// remote mining, also optionally notifying a batch of remote services of new work
|
||||
// packages.
|
||||
func New(config Config, notify []string, noverify bool) *Ethash {
|
||||
if config.Log == nil {
|
||||
config.Log = log.Root()
|
||||
}
|
||||
if config.CachesInMem <= 0 {
|
||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.CachesInMem = 1
|
||||
}
|
||||
if config.CacheDir != "" && config.CachesOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
|
||||
config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
|
||||
}
|
||||
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
}
|
||||
ethash := &Ethash{
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
}
|
||||
go ethash.remote(notify, noverify)
|
||||
ethash.remote = startRemoteSealer(ethash, notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
@@ -501,19 +461,13 @@ func New(config Config, notify []string, noverify bool) *Ethash {
|
||||
// purposes.
|
||||
func NewTester(notify []string, noverify bool) *Ethash {
|
||||
ethash := &Ethash{
|
||||
config: Config{PowMode: ModeTest},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
config: Config{PowMode: ModeTest, Log: log.Root()},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
}
|
||||
go ethash.remote(notify, noverify)
|
||||
ethash.remote = startRemoteSealer(ethash, notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
@@ -524,6 +478,7 @@ func NewFaker() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -535,6 +490,7 @@ func NewFakeFailer(fail uint64) *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeFail: fail,
|
||||
}
|
||||
@@ -547,6 +503,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeDelay: delay,
|
||||
}
|
||||
@@ -558,6 +515,7 @@ func NewFullFaker() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFullFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -573,13 +531,11 @@ func (ethash *Ethash) Close() error {
|
||||
var err error
|
||||
ethash.closeOnce.Do(func() {
|
||||
// Short circuit if the exit channel is not allocated.
|
||||
if ethash.exitCh == nil {
|
||||
if ethash.remote == nil {
|
||||
return
|
||||
}
|
||||
errc := make(chan error)
|
||||
ethash.exitCh <- errc
|
||||
err = <-errc
|
||||
close(ethash.exitCh)
|
||||
close(ethash.remote.requestExit)
|
||||
<-ethash.remote.exitCh
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -680,8 +636,8 @@ func (ethash *Ethash) Hashrate() float64 {
|
||||
var res = make(chan uint64, 1)
|
||||
|
||||
select {
|
||||
case ethash.fetchRateCh <- res:
|
||||
case <-ethash.exitCh:
|
||||
case ethash.remote.fetchRateCh <- res:
|
||||
case <-ethash.remote.exitCh:
|
||||
// Return local hashrate only if ethash is stopped.
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
|
@@ -49,7 +49,7 @@ func TestTestMode(t *testing.T) {
|
||||
if err := ethash.VerifySeal(nil, header); err != nil {
|
||||
t.Fatalf("unexpected verification error: %v", err)
|
||||
}
|
||||
case <-time.NewTimer(time.Second).C:
|
||||
case <-time.NewTimer(2 * time.Second).C:
|
||||
t.Error("sealing result timeout")
|
||||
}
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -33,7 +34,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,7 +56,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
select {
|
||||
case results <- block.WithSeal(header):
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
||||
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -85,8 +85,8 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
threads = 0 // Allows disabling local mining without extra logic around local/remote
|
||||
}
|
||||
// Push new work to remote sealer
|
||||
if ethash.workCh != nil {
|
||||
ethash.workCh <- &sealTask{block: block, results: results}
|
||||
if ethash.remote != nil {
|
||||
ethash.remote.workCh <- &sealTask{block: block, results: results}
|
||||
}
|
||||
var (
|
||||
pend sync.WaitGroup
|
||||
@@ -111,14 +111,14 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
select {
|
||||
case results <- result:
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
||||
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
close(abort)
|
||||
case <-ethash.update:
|
||||
// Thread count was changed on user request, restart
|
||||
close(abort)
|
||||
if err := ethash.Seal(chain, block, results, stop); err != nil {
|
||||
log.Error("Failed to restart sealing after update", "err", err)
|
||||
ethash.config.Log.Error("Failed to restart sealing after update", "err", err)
|
||||
}
|
||||
}
|
||||
// Wait for all miners to terminate and return the block
|
||||
@@ -143,7 +143,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||
attempts = int64(0)
|
||||
nonce = seed
|
||||
)
|
||||
logger := log.New("miner", id)
|
||||
logger := ethash.config.Log.New("miner", id)
|
||||
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
||||
search:
|
||||
for {
|
||||
@@ -186,160 +186,128 @@ search:
|
||||
runtime.KeepAlive(dataset)
|
||||
}
|
||||
|
||||
// remote is a standalone goroutine to handle remote mining related stuff.
|
||||
func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
var (
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
rates = make(map[common.Hash]hashrate)
|
||||
// This is the timeout for HTTP requests to notify external miners.
|
||||
const remoteSealerTimeout = 1 * time.Second
|
||||
|
||||
results chan<- *types.Block
|
||||
currentBlock *types.Block
|
||||
currentWork [4]string
|
||||
type remoteSealer struct {
|
||||
works map[common.Hash]*types.Block
|
||||
rates map[common.Hash]hashrate
|
||||
currentBlock *types.Block
|
||||
currentWork [4]string
|
||||
notifyCtx context.Context
|
||||
cancelNotify context.CancelFunc // cancels all notification requests
|
||||
reqWG sync.WaitGroup // tracks notification request goroutines
|
||||
|
||||
notifyTransport = &http.Transport{}
|
||||
notifyClient = &http.Client{
|
||||
Transport: notifyTransport,
|
||||
Timeout: time.Second,
|
||||
}
|
||||
notifyReqs = make([]*http.Request, len(notify))
|
||||
)
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
notifyWork := func() {
|
||||
work := currentWork
|
||||
blob, _ := json.Marshal(work)
|
||||
ethash *Ethash
|
||||
noverify bool
|
||||
notifyURLs []string
|
||||
results chan<- *types.Block
|
||||
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
requestExit chan struct{}
|
||||
exitCh chan struct{}
|
||||
}
|
||||
|
||||
for i, url := range notify {
|
||||
// Terminate any previously pending request and create the new work
|
||||
if notifyReqs[i] != nil {
|
||||
notifyTransport.CancelRequest(notifyReqs[i])
|
||||
}
|
||||
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
|
||||
notifyReqs[i].Header.Set("Content-Type", "application/json")
|
||||
// sealTask wraps a seal block with relative result channel for remote sealer thread.
|
||||
type sealTask struct {
|
||||
block *types.Block
|
||||
results chan<- *types.Block
|
||||
}
|
||||
|
||||
// Push the new work concurrently to all the remote nodes
|
||||
go func(req *http.Request, url string) {
|
||||
res, err := notifyClient.Do(req)
|
||||
if err != nil {
|
||||
log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
|
||||
res.Body.Close()
|
||||
}
|
||||
}(notifyReqs[i], url)
|
||||
}
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [4]string
|
||||
}
|
||||
|
||||
func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s := &remoteSealer{
|
||||
ethash: ethash,
|
||||
noverify: noverify,
|
||||
notifyURLs: urls,
|
||||
notifyCtx: ctx,
|
||||
cancelNotify: cancel,
|
||||
works: make(map[common.Hash]*types.Block),
|
||||
rates: make(map[common.Hash]hashrate),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
requestExit: make(chan struct{}),
|
||||
exitCh: make(chan struct{}),
|
||||
}
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3], hex encoded block number
|
||||
makeWork := func(block *types.Block) {
|
||||
hash := ethash.SealHash(block.Header())
|
||||
go s.loop()
|
||||
return s
|
||||
}
|
||||
|
||||
currentWork[0] = hash.Hex()
|
||||
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
currentWork[3] = hexutil.EncodeBig(block.Number())
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
currentBlock = block
|
||||
works[hash] = block
|
||||
}
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if currentBlock == nil {
|
||||
log.Error("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
block := works[sealhash]
|
||||
if block == nil {
|
||||
log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64())
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if !noverify {
|
||||
if err := ethash.verifySeal(nil, header, true); err != nil {
|
||||
log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Make sure the result channel is assigned.
|
||||
if results == nil {
|
||||
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
solution := block.WithSeal(header)
|
||||
|
||||
// The submitted solution is within the scope of acceptance.
|
||||
if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() {
|
||||
select {
|
||||
case results <- solution:
|
||||
log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// The submitted block is too old to accept, drop it.
|
||||
log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return false
|
||||
}
|
||||
func (s *remoteSealer) loop() {
|
||||
defer func() {
|
||||
s.ethash.config.Log.Trace("Ethash remote sealer is exiting")
|
||||
s.cancelNotify()
|
||||
s.reqWG.Wait()
|
||||
close(s.exitCh)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case work := <-ethash.workCh:
|
||||
case work := <-s.workCh:
|
||||
// Update current work with new received block.
|
||||
// Note same work can be past twice, happens when changing CPU threads.
|
||||
results = work.results
|
||||
s.results = work.results
|
||||
s.makeWork(work.block)
|
||||
s.notifyWork()
|
||||
|
||||
makeWork(work.block)
|
||||
|
||||
// Notify and requested URLs of the new work availability
|
||||
notifyWork()
|
||||
|
||||
case work := <-ethash.fetchWorkCh:
|
||||
case work := <-s.fetchWorkCh:
|
||||
// Return current mining work to remote miner.
|
||||
if currentBlock == nil {
|
||||
if s.currentBlock == nil {
|
||||
work.errc <- errNoMiningWork
|
||||
} else {
|
||||
work.res <- currentWork
|
||||
work.res <- s.currentWork
|
||||
}
|
||||
|
||||
case result := <-ethash.submitWorkCh:
|
||||
case result := <-s.submitWorkCh:
|
||||
// Verify submitted PoW solution based on maintained mining blocks.
|
||||
if submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
if s.submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
result.errc <- nil
|
||||
} else {
|
||||
result.errc <- errInvalidSealResult
|
||||
}
|
||||
|
||||
case result := <-ethash.submitRateCh:
|
||||
case result := <-s.submitRateCh:
|
||||
// Trace remote sealer's hash rate by submitted value.
|
||||
rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
close(result.done)
|
||||
|
||||
case req := <-ethash.fetchRateCh:
|
||||
case req := <-s.fetchRateCh:
|
||||
// Gather all hash rate submitted by remote sealer.
|
||||
var total uint64
|
||||
for _, rate := range rates {
|
||||
for _, rate := range s.rates {
|
||||
// this could overflow
|
||||
total += rate.rate
|
||||
}
|
||||
@@ -347,25 +315,126 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
|
||||
case <-ticker.C:
|
||||
// Clear stale submitted hash rate.
|
||||
for id, rate := range rates {
|
||||
for id, rate := range s.rates {
|
||||
if time.Since(rate.ping) > 10*time.Second {
|
||||
delete(rates, id)
|
||||
delete(s.rates, id)
|
||||
}
|
||||
}
|
||||
// Clear stale pending blocks
|
||||
if currentBlock != nil {
|
||||
for hash, block := range works {
|
||||
if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() {
|
||||
delete(works, hash)
|
||||
if s.currentBlock != nil {
|
||||
for hash, block := range s.works {
|
||||
if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() {
|
||||
delete(s.works, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case errc := <-ethash.exitCh:
|
||||
// Exit remote loop if ethash is closed and return relevant error.
|
||||
errc <- nil
|
||||
log.Trace("Ethash remote sealer is exiting")
|
||||
case <-s.requestExit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3], hex encoded block number
|
||||
func (s *remoteSealer) makeWork(block *types.Block) {
|
||||
hash := s.ethash.SealHash(block.Header())
|
||||
s.currentWork[0] = hash.Hex()
|
||||
s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
s.currentWork[3] = hexutil.EncodeBig(block.Number())
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
s.currentBlock = block
|
||||
s.works[hash] = block
|
||||
}
|
||||
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
func (s *remoteSealer) notifyWork() {
|
||||
work := s.currentWork
|
||||
blob, _ := json.Marshal(work)
|
||||
s.reqWG.Add(len(s.notifyURLs))
|
||||
for _, url := range s.notifyURLs {
|
||||
go s.sendNotification(s.notifyCtx, url, blob, work)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) {
|
||||
defer s.reqWG.Done()
|
||||
|
||||
req, err := http.NewRequest("POST", url, bytes.NewReader(json))
|
||||
if err != nil {
|
||||
s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout)
|
||||
defer cancel()
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2])
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if s.currentBlock == nil {
|
||||
s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
block := s.works[sealhash]
|
||||
if block == nil {
|
||||
s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64())
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if !s.noverify {
|
||||
if err := s.ethash.verifySeal(nil, header, true); err != nil {
|
||||
s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Make sure the result channel is assigned.
|
||||
if s.results == nil {
|
||||
s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
solution := block.WithSeal(header)
|
||||
|
||||
// The submitted solution is within the scope of acceptance.
|
||||
if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
|
||||
select {
|
||||
case s.results <- solution:
|
||||
s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// The submitted block is too old to accept, drop it.
|
||||
s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return false
|
||||
}
|
||||
|
@@ -20,59 +20,39 @@ import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/internal/testlog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// Tests whether remote HTTP servers are correctly notified of new work.
|
||||
func TestRemoteNotify(t *testing.T) {
|
||||
// Start a simple webserver to capture notifications
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan [3]string)
|
||||
|
||||
server := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}),
|
||||
}
|
||||
// Open a custom listener to extract its local address
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open notification server: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go server.Serve(listener)
|
||||
|
||||
// Wait for server to start listening
|
||||
var tries int
|
||||
for tries = 0; tries < 10; tries++ {
|
||||
conn, _ := net.DialTimeout("tcp", listener.Addr().String(), 1*time.Second)
|
||||
if conn != nil {
|
||||
break
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
}
|
||||
if tries == 10 {
|
||||
t.Fatal("tcp listener not ready for more than 10 seconds")
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create the custom ethash engine
|
||||
ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
|
||||
// Create the custom ethash engine.
|
||||
ethash := NewTester([]string{server.URL}, false)
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a work task and ensure the notification bubbles out
|
||||
// Stream a work task and ensure the notification bubbles out.
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
@@ -97,46 +77,37 @@ func TestRemoteNotify(t *testing.T) {
|
||||
// Tests that pushing work packages fast to the miner doesn't cause any data race
|
||||
// issues in the notifications.
|
||||
func TestRemoteMultiNotify(t *testing.T) {
|
||||
// Start a simple webserver to capture notifications
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan [3]string, 64)
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
server := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}),
|
||||
}
|
||||
// Open a custom listener to extract its local address
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open notification server: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go server.Serve(listener)
|
||||
|
||||
// Create the custom ethash engine
|
||||
ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
|
||||
// Create the custom ethash engine.
|
||||
ethash := NewTester([]string{server.URL}, false)
|
||||
ethash.config.Log = testlog.Logger(t, log.LvlWarn)
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a lot of work task and ensure all the notifications bubble out
|
||||
// Stream a lot of work task and ensure all the notifications bubble out.
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
ethash.Seal(nil, block, nil, nil)
|
||||
}
|
||||
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
select {
|
||||
case <-sink:
|
||||
case <-time.After(3 * time.Second):
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("notification %d timed out", i)
|
||||
}
|
||||
}
|
||||
@@ -206,10 +177,10 @@ func TestStaleSubmission(t *testing.T) {
|
||||
select {
|
||||
case res := <-results:
|
||||
if res.Header().Nonce != fakeNonce {
|
||||
t.Errorf("case %d block nonce mismatch, want %s, get %s", id+1, fakeNonce, res.Header().Nonce)
|
||||
t.Errorf("case %d block nonce mismatch, want %x, get %x", id+1, fakeNonce, res.Header().Nonce)
|
||||
}
|
||||
if res.Header().MixDigest != fakeDigest {
|
||||
t.Errorf("case %d block digest mismatch, want %s, get %s", id+1, fakeDigest, res.Header().MixDigest)
|
||||
t.Errorf("case %d block digest mismatch, want %x, get %x", id+1, fakeDigest, res.Header().MixDigest)
|
||||
}
|
||||
if res.Header().Difficulty.Uint64() != c.headers[c.submitIndex].Difficulty.Uint64() {
|
||||
t.Errorf("case %d block difficulty mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Difficulty, res.Header().Difficulty)
|
||||
|
@@ -20,14 +20,16 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
||||
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
// bridge is a collection of JavaScript utility methods to bride the .js runtime
|
||||
@@ -47,10 +49,18 @@ func newBridge(client *rpc.Client, prompter UserPrompter, printer io.Writer) *br
|
||||
}
|
||||
}
|
||||
|
||||
func getJeth(vm *goja.Runtime) *goja.Object {
|
||||
jeth := vm.Get("jeth")
|
||||
if jeth == nil {
|
||||
panic(vm.ToValue("jeth object does not exist"))
|
||||
}
|
||||
return jeth.ToObject(vm)
|
||||
}
|
||||
|
||||
// NewAccount is a wrapper around the personal.newAccount RPC method that uses a
|
||||
// non-echoing password prompt to acquire the passphrase and executes the original
|
||||
// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.
|
||||
func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) {
|
||||
var (
|
||||
password string
|
||||
confirm string
|
||||
@@ -58,52 +68,57 @@ func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
)
|
||||
switch {
|
||||
// No password was specified, prompt the user for it
|
||||
case len(call.ArgumentList) == 0:
|
||||
if password, err = b.prompter.PromptPassword("Password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
case len(call.Arguments) == 0:
|
||||
if password, err = b.prompter.PromptPassword("Passphrase: "); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if confirm, err = b.prompter.PromptPassword("Repeat password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
if confirm, err = b.prompter.PromptPassword("Repeat passphrase: "); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if password != confirm {
|
||||
throwJSException("passwords don't match!")
|
||||
return nil, fmt.Errorf("passwords don't match!")
|
||||
}
|
||||
|
||||
// A single string password was specified, use that
|
||||
case len(call.ArgumentList) == 1 && call.Argument(0).IsString():
|
||||
password, _ = call.Argument(0).ToString()
|
||||
|
||||
// Otherwise fail with some error
|
||||
case len(call.Arguments) == 1 && call.Argument(0).ToString() != nil:
|
||||
password = call.Argument(0).ToString().String()
|
||||
default:
|
||||
throwJSException("expected 0 or 1 string argument")
|
||||
return nil, fmt.Errorf("expected 0 or 1 string argument")
|
||||
}
|
||||
// Password acquired, execute the call and return
|
||||
ret, err := call.Otto.Call("jeth.newAccount", nil, password)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
newAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("newAccount"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.newAccount is not callable")
|
||||
}
|
||||
return ret
|
||||
ret, err := newAccount(goja.Null(), call.VM.ToValue(password))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// OpenWallet is a wrapper around personal.openWallet which can interpret and
|
||||
// react to certain error messages, such as the Trezor PIN matrix request.
|
||||
func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) {
|
||||
// Make sure we have a wallet specified to open
|
||||
if !call.Argument(0).IsString() {
|
||||
throwJSException("first argument must be the wallet URL to open")
|
||||
if call.Argument(0).ToObject(call.VM).ClassName() != "String" {
|
||||
return nil, fmt.Errorf("first argument must be the wallet URL to open")
|
||||
}
|
||||
wallet := call.Argument(0)
|
||||
|
||||
var passwd otto.Value
|
||||
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
|
||||
passwd, _ = otto.ToValue("")
|
||||
var passwd goja.Value
|
||||
if goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {
|
||||
passwd = call.VM.ToValue("")
|
||||
} else {
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
// Open the wallet and return if successful in itself
|
||||
val, err := call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.openWallet is not callable")
|
||||
}
|
||||
val, err := openWallet(goja.Null(), wallet, passwd)
|
||||
if err == nil {
|
||||
return val
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Wallet open failed, report error unless it's a PIN or PUK entry
|
||||
@@ -111,32 +126,31 @@ func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
|
||||
case strings.HasSuffix(err.Error(), usbwallet.ErrTrezorPINNeeded.Error()):
|
||||
val, err = b.readPinAndReopenWallet(call)
|
||||
if err == nil {
|
||||
return val
|
||||
return val, nil
|
||||
}
|
||||
val, err = b.readPassphraseAndReopenWallet(call)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPairingPasswordNeeded.Error()):
|
||||
// PUK input requested, fetch from the user and call open again
|
||||
if input, err := b.prompter.PromptPassword("Please enter the pairing password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter the pairing password: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
|
||||
passwd = call.VM.ToValue(input)
|
||||
if val, err = openWallet(goja.Null(), wallet, passwd); err != nil {
|
||||
if !strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()) {
|
||||
throwJSException(err.Error())
|
||||
return nil, err
|
||||
} else {
|
||||
// PIN input requested, fetch from the user and call open again
|
||||
if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
|
||||
throwJSException(err.Error())
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -144,52 +158,52 @@ func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPINUnblockNeeded.Error()):
|
||||
// PIN unblock requested, fetch PUK and new PIN from the user
|
||||
var pukpin string
|
||||
if input, err := b.prompter.PromptPassword("Please enter current PUK: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
pukpin = input
|
||||
input, err := b.prompter.PromptPassword("Please enter current PUK: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if input, err := b.prompter.PromptPassword("Please enter new PIN: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
pukpin += input
|
||||
pukpin = input
|
||||
input, err = b.prompter.PromptPassword("Please enter new PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passwd, _ = otto.ToValue(pukpin)
|
||||
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
|
||||
throwJSException(err.Error())
|
||||
pukpin += input
|
||||
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(pukpin)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()):
|
||||
// PIN input requested, fetch from the user and call open again
|
||||
if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
|
||||
throwJSException(err.Error())
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
default:
|
||||
// Unknown error occurred, drop to the user
|
||||
throwJSException(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return val
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (b *bridge) readPassphraseAndReopenWallet(call otto.FunctionCall) (otto.Value, error) {
|
||||
var passwd otto.Value
|
||||
func (b *bridge) readPassphraseAndReopenWallet(call jsre.Call) (goja.Value, error) {
|
||||
wallet := call.Argument(0)
|
||||
if input, err := b.prompter.PromptPassword("Please enter your password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter your passphrase: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.openWallet is not callable")
|
||||
}
|
||||
return openWallet(goja.Null(), wallet, call.VM.ToValue(input))
|
||||
}
|
||||
|
||||
func (b *bridge) readPinAndReopenWallet(call otto.FunctionCall) (otto.Value, error) {
|
||||
var passwd otto.Value
|
||||
func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) {
|
||||
wallet := call.Argument(0)
|
||||
// Trezor PIN matrix input requested, display the matrix to the user and fetch the data
|
||||
fmt.Fprintf(b.printer, "Look at the device for number positions\n\n")
|
||||
@@ -199,155 +213,154 @@ func (b *bridge) readPinAndReopenWallet(call otto.FunctionCall) (otto.Value, err
|
||||
fmt.Fprintf(b.printer, "--+---+--\n")
|
||||
fmt.Fprintf(b.printer, "1 | 2 | 3\n\n")
|
||||
|
||||
if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.openWallet is not callable")
|
||||
}
|
||||
return openWallet(goja.Null(), wallet, call.VM.ToValue(input))
|
||||
}
|
||||
|
||||
// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that
|
||||
// uses a non-echoing password prompt to acquire the passphrase and executes the
|
||||
// original RPC method (saved in jeth.unlockAccount) with it to actually execute
|
||||
// the RPC call.
|
||||
func (b *bridge) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
// Make sure we have an account specified to unlock
|
||||
if !call.Argument(0).IsString() {
|
||||
throwJSException("first argument must be the account to unlock")
|
||||
func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) {
|
||||
// Make sure we have an account specified to unlock.
|
||||
if call.Argument(0).ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("first argument must be the account to unlock")
|
||||
}
|
||||
account := call.Argument(0)
|
||||
|
||||
// If password is not given or is the null value, prompt the user for it
|
||||
var passwd otto.Value
|
||||
|
||||
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
|
||||
// If password is not given or is the null value, prompt the user for it.
|
||||
var passwd goja.Value
|
||||
if goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {
|
||||
fmt.Fprintf(b.printer, "Unlock account %s\n", account)
|
||||
if input, err := b.prompter.PromptPassword("Password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passwd = call.VM.ToValue(input)
|
||||
} else {
|
||||
if !call.Argument(1).IsString() {
|
||||
throwJSException("password must be a string")
|
||||
if call.Argument(1).ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("password must be a string")
|
||||
}
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
// Third argument is the duration how long the account must be unlocked.
|
||||
duration := otto.NullValue()
|
||||
if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() {
|
||||
if !call.Argument(2).IsNumber() {
|
||||
throwJSException("unlock duration must be a number")
|
||||
|
||||
// Third argument is the duration how long the account should be unlocked.
|
||||
duration := goja.Null()
|
||||
if !goja.IsUndefined(call.Argument(2)) && !goja.IsNull(call.Argument(2)) {
|
||||
if !isNumber(call.Argument(2)) {
|
||||
return nil, fmt.Errorf("unlock duration must be a number")
|
||||
}
|
||||
duration = call.Argument(2)
|
||||
}
|
||||
// Send the request to the backend and return
|
||||
val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
|
||||
// Send the request to the backend and return.
|
||||
unlockAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.unlockAccount is not callable")
|
||||
}
|
||||
return val
|
||||
return unlockAccount(goja.Null(), account, passwd, duration)
|
||||
}
|
||||
|
||||
// Sign is a wrapper around the personal.sign RPC method that uses a non-echoing password
|
||||
// prompt to acquire the passphrase and executes the original RPC method (saved in
|
||||
// jeth.sign) with it to actually execute the RPC call.
|
||||
func (b *bridge) Sign(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) Sign(call jsre.Call) (goja.Value, error) {
|
||||
var (
|
||||
message = call.Argument(0)
|
||||
account = call.Argument(1)
|
||||
passwd = call.Argument(2)
|
||||
)
|
||||
|
||||
if !message.IsString() {
|
||||
throwJSException("first argument must be the message to sign")
|
||||
if message.ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("first argument must be the message to sign")
|
||||
}
|
||||
if !account.IsString() {
|
||||
throwJSException("second argument must be the account to sign with")
|
||||
if account.ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("second argument must be the account to sign with")
|
||||
}
|
||||
|
||||
// if the password is not given or null ask the user and ensure password is a string
|
||||
if passwd.IsUndefined() || passwd.IsNull() {
|
||||
if goja.IsUndefined(passwd) || goja.IsNull(passwd) {
|
||||
fmt.Fprintf(b.printer, "Give password for account %s\n", account)
|
||||
if input, err := b.prompter.PromptPassword("Password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Password: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !passwd.IsString() {
|
||||
throwJSException("third argument must be the password to unlock the account")
|
||||
passwd = call.VM.ToValue(input)
|
||||
} else if passwd.ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("third argument must be the password to unlock the account")
|
||||
}
|
||||
|
||||
// Send the request to the backend and return
|
||||
val, err := call.Otto.Call("jeth.sign", nil, message, account, passwd)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
sign, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.unlockAccount is not callable")
|
||||
}
|
||||
return val
|
||||
return sign(goja.Null(), message, account, passwd)
|
||||
}
|
||||
|
||||
// Sleep will block the console for the specified number of seconds.
|
||||
func (b *bridge) Sleep(call otto.FunctionCall) (response otto.Value) {
|
||||
if call.Argument(0).IsNumber() {
|
||||
sleep, _ := call.Argument(0).ToInteger()
|
||||
time.Sleep(time.Duration(sleep) * time.Second)
|
||||
return otto.TrueValue()
|
||||
func (b *bridge) Sleep(call jsre.Call) (goja.Value, error) {
|
||||
if !isNumber(call.Argument(0)) {
|
||||
return nil, fmt.Errorf("usage: sleep(<number of seconds>)")
|
||||
}
|
||||
return throwJSException("usage: sleep(<number of seconds>)")
|
||||
sleep := call.Argument(0).ToFloat()
|
||||
time.Sleep(time.Duration(sleep * float64(time.Second)))
|
||||
return call.VM.ToValue(true), nil
|
||||
}
|
||||
|
||||
// SleepBlocks will block the console for a specified number of new blocks optionally
|
||||
// until the given timeout is reached.
|
||||
func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) SleepBlocks(call jsre.Call) (goja.Value, error) {
|
||||
// Parse the input parameters for the sleep.
|
||||
var (
|
||||
blocks = int64(0)
|
||||
sleep = int64(9999999999999999) // indefinitely
|
||||
)
|
||||
// Parse the input parameters for the sleep
|
||||
nArgs := len(call.ArgumentList)
|
||||
nArgs := len(call.Arguments)
|
||||
if nArgs == 0 {
|
||||
throwJSException("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
|
||||
return nil, fmt.Errorf("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
|
||||
}
|
||||
if nArgs >= 1 {
|
||||
if call.Argument(0).IsNumber() {
|
||||
blocks, _ = call.Argument(0).ToInteger()
|
||||
} else {
|
||||
throwJSException("expected number as first argument")
|
||||
if !isNumber(call.Argument(0)) {
|
||||
return nil, fmt.Errorf("expected number as first argument")
|
||||
}
|
||||
blocks = call.Argument(0).ToInteger()
|
||||
}
|
||||
if nArgs >= 2 {
|
||||
if call.Argument(1).IsNumber() {
|
||||
sleep, _ = call.Argument(1).ToInteger()
|
||||
} else {
|
||||
throwJSException("expected number as second argument")
|
||||
if isNumber(call.Argument(1)) {
|
||||
return nil, fmt.Errorf("expected number as second argument")
|
||||
}
|
||||
sleep = call.Argument(1).ToInteger()
|
||||
}
|
||||
// go through the console, this will allow web3 to call the appropriate
|
||||
// callbacks if a delayed response or notification is received.
|
||||
blockNumber := func() int64 {
|
||||
result, err := call.Otto.Run("eth.blockNumber")
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
block, err := result.ToInteger()
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
return block
|
||||
}
|
||||
// Poll the current block number until either it ot a timeout is reached
|
||||
targetBlockNr := blockNumber() + blocks
|
||||
deadline := time.Now().Add(time.Duration(sleep) * time.Second)
|
||||
|
||||
// Poll the current block number until either it or a timeout is reached.
|
||||
var (
|
||||
deadline = time.Now().Add(time.Duration(sleep) * time.Second)
|
||||
lastNumber = ^hexutil.Uint64(0)
|
||||
)
|
||||
for time.Now().Before(deadline) {
|
||||
if blockNumber() >= targetBlockNr {
|
||||
return otto.TrueValue()
|
||||
var number hexutil.Uint64
|
||||
err := b.client.Call(&number, "eth_blockNumber")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if number != lastNumber {
|
||||
lastNumber = number
|
||||
blocks--
|
||||
}
|
||||
if blocks <= 0 {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return otto.FalseValue()
|
||||
return call.VM.ToValue(true), nil
|
||||
}
|
||||
|
||||
type jsonrpcCall struct {
|
||||
@@ -357,15 +370,15 @@ type jsonrpcCall struct {
|
||||
}
|
||||
|
||||
// Send implements the web3 provider "send" method.
|
||||
func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) Send(call jsre.Call) (goja.Value, error) {
|
||||
// Remarshal the request into a Go value.
|
||||
JSON, _ := call.Otto.Object("JSON")
|
||||
reqVal, err := JSON.Call("stringify", call.Argument(0))
|
||||
reqVal, err := call.Argument(0).ToObject(call.VM).MarshalJSON()
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
rawReq = reqVal.String()
|
||||
rawReq = string(reqVal)
|
||||
dec = json.NewDecoder(strings.NewReader(rawReq))
|
||||
reqs []jsonrpcCall
|
||||
batch bool
|
||||
@@ -381,10 +394,12 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
}
|
||||
|
||||
// Execute the requests.
|
||||
resps, _ := call.Otto.Object("new Array()")
|
||||
var resps []*goja.Object
|
||||
for _, req := range reqs {
|
||||
resp, _ := call.Otto.Object(`({"jsonrpc":"2.0"})`)
|
||||
resp := call.VM.NewObject()
|
||||
resp.Set("jsonrpc", "2.0")
|
||||
resp.Set("id", req.ID)
|
||||
|
||||
var result json.RawMessage
|
||||
err = b.client.Call(&result, req.Method, req.Params...)
|
||||
switch err := err.(type) {
|
||||
@@ -392,9 +407,14 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
if result == nil {
|
||||
// Special case null because it is decoded as an empty
|
||||
// raw message for some reason.
|
||||
resp.Set("result", otto.NullValue())
|
||||
resp.Set("result", goja.Null())
|
||||
} else {
|
||||
resultVal, err := JSON.Call("parse", string(result))
|
||||
JSON := call.VM.Get("JSON").ToObject(call.VM)
|
||||
parse, callable := goja.AssertFunction(JSON.Get("parse"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("JSON.parse is not a function")
|
||||
}
|
||||
resultVal, err := parse(goja.Null(), call.VM.ToValue(string(result)))
|
||||
if err != nil {
|
||||
setError(resp, -32603, err.Error())
|
||||
} else {
|
||||
@@ -406,33 +426,38 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
default:
|
||||
setError(resp, -32603, err.Error())
|
||||
}
|
||||
resps.Call("push", resp)
|
||||
resps = append(resps, resp)
|
||||
}
|
||||
|
||||
// Return the responses either to the callback (if supplied)
|
||||
// or directly as the return value.
|
||||
var result goja.Value
|
||||
if batch {
|
||||
response = resps.Value()
|
||||
result = call.VM.ToValue(resps)
|
||||
} else {
|
||||
response, _ = resps.Get("0")
|
||||
result = resps[0]
|
||||
}
|
||||
if fn := call.Argument(1); fn.Class() == "Function" {
|
||||
fn.Call(otto.NullValue(), otto.NullValue(), response)
|
||||
return otto.UndefinedValue()
|
||||
if fn, isFunc := goja.AssertFunction(call.Argument(1)); isFunc {
|
||||
fn(goja.Null(), goja.Null(), result)
|
||||
return goja.Undefined(), nil
|
||||
}
|
||||
return response
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func setError(resp *otto.Object, code int, msg string) {
|
||||
func setError(resp *goja.Object, code int, msg string) {
|
||||
resp.Set("error", map[string]interface{}{"code": code, "message": msg})
|
||||
}
|
||||
|
||||
// throwJSException panics on an otto.Value. The Otto VM will recover from the
|
||||
// Go panic and throw msg as a JavaScript error.
|
||||
func throwJSException(msg interface{}) otto.Value {
|
||||
val, err := otto.ToValue(msg)
|
||||
if err != nil {
|
||||
log.Error("Failed to serialize JavaScript exception", "exception", msg, "err", err)
|
||||
}
|
||||
panic(val)
|
||||
// isNumber returns true if input value is a JS number.
|
||||
func isNumber(v goja.Value) bool {
|
||||
k := v.ExportType().Kind()
|
||||
return k >= reflect.Int && k <= reflect.Float64
|
||||
}
|
||||
|
||||
func getObject(vm *goja.Runtime, name string) *goja.Object {
|
||||
v := vm.Get(name)
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
return v.ToObject(vm)
|
||||
}
|
||||
|
@@ -28,12 +28,13 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre/deps"
|
||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/peterh/liner"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -86,6 +87,7 @@ func New(config Config) (*Console, error) {
|
||||
if config.Printer == nil {
|
||||
config.Printer = colorable.NewColorableStdout()
|
||||
}
|
||||
|
||||
// Initialize the console and return
|
||||
console := &Console{
|
||||
client: config.Client,
|
||||
@@ -107,110 +109,35 @@ func New(config Config) (*Console, error) {
|
||||
// init retrieves the available APIs from the remote RPC provider and initializes
|
||||
// the console's JavaScript namespaces based on the exposed modules.
|
||||
func (c *Console) init(preload []string) error {
|
||||
// Initialize the JavaScript <-> Go RPC bridge
|
||||
c.initConsoleObject()
|
||||
|
||||
// Initialize the JavaScript <-> Go RPC bridge.
|
||||
bridge := newBridge(c.client, c.prompter, c.printer)
|
||||
c.jsre.Set("jeth", struct{}{})
|
||||
|
||||
jethObj, _ := c.jsre.Get("jeth")
|
||||
jethObj.Object().Set("send", bridge.Send)
|
||||
jethObj.Object().Set("sendAsync", bridge.Send)
|
||||
|
||||
consoleObj, _ := c.jsre.Get("console")
|
||||
consoleObj.Object().Set("log", c.consoleOutput)
|
||||
consoleObj.Object().Set("error", c.consoleOutput)
|
||||
|
||||
// Load all the internal utility JavaScript libraries
|
||||
if err := c.jsre.Compile("bignumber.js", jsre.BignumberJs); err != nil {
|
||||
return fmt.Errorf("bignumber.js: %v", err)
|
||||
}
|
||||
if err := c.jsre.Compile("web3.js", jsre.Web3Js); err != nil {
|
||||
return fmt.Errorf("web3.js: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var Web3 = require('web3');"); err != nil {
|
||||
return fmt.Errorf("web3 require: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var web3 = new Web3(jeth);"); err != nil {
|
||||
return fmt.Errorf("web3 provider: %v", err)
|
||||
}
|
||||
// Load the supported APIs into the JavaScript runtime environment
|
||||
apis, err := c.client.SupportedModules()
|
||||
if err != nil {
|
||||
return fmt.Errorf("api modules: %v", err)
|
||||
}
|
||||
flatten := "var eth = web3.eth; var personal = web3.personal; "
|
||||
for api := range apis {
|
||||
if api == "web3" {
|
||||
continue // manually mapped or ignore
|
||||
}
|
||||
if file, ok := web3ext.Modules[api]; ok {
|
||||
// Load our extension for the module.
|
||||
if err = c.jsre.Compile(fmt.Sprintf("%s.js", api), file); err != nil {
|
||||
return fmt.Errorf("%s.js: %v", api, err)
|
||||
}
|
||||
flatten += fmt.Sprintf("var %s = web3.%s; ", api, api)
|
||||
} else if obj, err := c.jsre.Run("web3." + api); err == nil && obj.IsObject() {
|
||||
// Enable web3.js built-in extension if available.
|
||||
flatten += fmt.Sprintf("var %s = web3.%s; ", api, api)
|
||||
}
|
||||
}
|
||||
if _, err = c.jsre.Run(flatten); err != nil {
|
||||
return fmt.Errorf("namespace flattening: %v", err)
|
||||
}
|
||||
// Initialize the global name register (disabled for now)
|
||||
//c.jsre.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
|
||||
|
||||
// If the console is in interactive mode, instrument password related methods to query the user
|
||||
if c.prompter != nil {
|
||||
// Retrieve the account management object to instrument
|
||||
personal, err := c.jsre.Get("personal")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Override the openWallet, unlockAccount, newAccount and sign methods since
|
||||
// these require user interaction. Assign these method in the Console the
|
||||
// original web3 callbacks. These will be called by the jeth.* methods after
|
||||
// they got the password from the user and send the original web3 request to
|
||||
// the backend.
|
||||
if obj := personal.Object(); obj != nil { // make sure the personal api is enabled over the interface
|
||||
if _, err = c.jsre.Run(`jeth.openWallet = personal.openWallet;`); err != nil {
|
||||
return fmt.Errorf("personal.openWallet: %v", err)
|
||||
}
|
||||
if _, err = c.jsre.Run(`jeth.unlockAccount = personal.unlockAccount;`); err != nil {
|
||||
return fmt.Errorf("personal.unlockAccount: %v", err)
|
||||
}
|
||||
if _, err = c.jsre.Run(`jeth.newAccount = personal.newAccount;`); err != nil {
|
||||
return fmt.Errorf("personal.newAccount: %v", err)
|
||||
}
|
||||
if _, err = c.jsre.Run(`jeth.sign = personal.sign;`); err != nil {
|
||||
return fmt.Errorf("personal.sign: %v", err)
|
||||
}
|
||||
obj.Set("openWallet", bridge.OpenWallet)
|
||||
obj.Set("unlockAccount", bridge.UnlockAccount)
|
||||
obj.Set("newAccount", bridge.NewAccount)
|
||||
obj.Set("sign", bridge.Sign)
|
||||
}
|
||||
}
|
||||
// The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer.
|
||||
admin, err := c.jsre.Get("admin")
|
||||
if err != nil {
|
||||
if err := c.initWeb3(bridge); err != nil {
|
||||
return err
|
||||
}
|
||||
if obj := admin.Object(); obj != nil { // make sure the admin api is enabled over the interface
|
||||
obj.Set("sleepBlocks", bridge.SleepBlocks)
|
||||
obj.Set("sleep", bridge.Sleep)
|
||||
obj.Set("clearHistory", c.clearHistory)
|
||||
if err := c.initExtensions(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Preload any JavaScript files before starting the console
|
||||
|
||||
// Add bridge overrides for web3.js functionality.
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
c.initAdmin(vm, bridge)
|
||||
c.initPersonal(vm, bridge)
|
||||
})
|
||||
|
||||
// Preload JavaScript files.
|
||||
for _, path := range preload {
|
||||
if err := c.jsre.Exec(path); err != nil {
|
||||
failure := err.Error()
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
failure = ottoErr.String()
|
||||
if gojaErr, ok := err.(*goja.Exception); ok {
|
||||
failure = gojaErr.String()
|
||||
}
|
||||
return fmt.Errorf("%s: %v", path, failure)
|
||||
}
|
||||
}
|
||||
// Configure the console's input prompter for scrollback and tab completion
|
||||
|
||||
// Configure the input prompter for history and tab completion.
|
||||
if c.prompter != nil {
|
||||
if content, err := ioutil.ReadFile(c.histPath); err != nil {
|
||||
c.prompter.SetHistory(nil)
|
||||
@@ -223,6 +150,102 @@ func (c *Console) init(preload []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Console) initConsoleObject() {
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
console := vm.NewObject()
|
||||
console.Set("log", c.consoleOutput)
|
||||
console.Set("error", c.consoleOutput)
|
||||
vm.Set("console", console)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Console) initWeb3(bridge *bridge) error {
|
||||
bnJS := string(deps.MustAsset("bignumber.js"))
|
||||
web3JS := string(deps.MustAsset("web3.js"))
|
||||
if err := c.jsre.Compile("bignumber.js", bnJS); err != nil {
|
||||
return fmt.Errorf("bignumber.js: %v", err)
|
||||
}
|
||||
if err := c.jsre.Compile("web3.js", web3JS); err != nil {
|
||||
return fmt.Errorf("web3.js: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var Web3 = require('web3');"); err != nil {
|
||||
return fmt.Errorf("web3 require: %v", err)
|
||||
}
|
||||
var err error
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
transport := vm.NewObject()
|
||||
transport.Set("send", jsre.MakeCallback(vm, bridge.Send))
|
||||
transport.Set("sendAsync", jsre.MakeCallback(vm, bridge.Send))
|
||||
vm.Set("_consoleWeb3Transport", transport)
|
||||
_, err = vm.RunString("var web3 = new Web3(_consoleWeb3Transport)")
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// initExtensions loads and registers web3.js extensions.
|
||||
func (c *Console) initExtensions() error {
|
||||
// Compute aliases from server-provided modules.
|
||||
apis, err := c.client.SupportedModules()
|
||||
if err != nil {
|
||||
return fmt.Errorf("api modules: %v", err)
|
||||
}
|
||||
aliases := map[string]struct{}{"eth": {}, "personal": {}}
|
||||
for api := range apis {
|
||||
if api == "web3" {
|
||||
continue
|
||||
}
|
||||
aliases[api] = struct{}{}
|
||||
if file, ok := web3ext.Modules[api]; ok {
|
||||
if err = c.jsre.Compile(api+".js", file); err != nil {
|
||||
return fmt.Errorf("%s.js: %v", api, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply aliases.
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
web3 := getObject(vm, "web3")
|
||||
for name := range aliases {
|
||||
if v := web3.Get(name); v != nil {
|
||||
vm.Set(name, v)
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// initAdmin creates additional admin APIs implemented by the bridge.
|
||||
func (c *Console) initAdmin(vm *goja.Runtime, bridge *bridge) {
|
||||
if admin := getObject(vm, "admin"); admin != nil {
|
||||
admin.Set("sleepBlocks", jsre.MakeCallback(vm, bridge.SleepBlocks))
|
||||
admin.Set("sleep", jsre.MakeCallback(vm, bridge.Sleep))
|
||||
admin.Set("clearHistory", c.clearHistory)
|
||||
}
|
||||
}
|
||||
|
||||
// initPersonal redirects account-related API methods through the bridge.
|
||||
//
|
||||
// If the console is in interactive mode and the 'personal' API is available, override
|
||||
// the openWallet, unlockAccount, newAccount and sign methods since these require user
|
||||
// interaction. The original web3 callbacks are stored in 'jeth'. These will be called
|
||||
// by the bridge after the prompt and send the original web3 request to the backend.
|
||||
func (c *Console) initPersonal(vm *goja.Runtime, bridge *bridge) {
|
||||
personal := getObject(vm, "personal")
|
||||
if personal == nil || c.prompter == nil {
|
||||
return
|
||||
}
|
||||
jeth := vm.NewObject()
|
||||
vm.Set("jeth", jeth)
|
||||
jeth.Set("openWallet", personal.Get("openWallet"))
|
||||
jeth.Set("unlockAccount", personal.Get("unlockAccount"))
|
||||
jeth.Set("newAccount", personal.Get("newAccount"))
|
||||
jeth.Set("sign", personal.Get("sign"))
|
||||
personal.Set("openWallet", jsre.MakeCallback(vm, bridge.OpenWallet))
|
||||
personal.Set("unlockAccount", jsre.MakeCallback(vm, bridge.UnlockAccount))
|
||||
personal.Set("newAccount", jsre.MakeCallback(vm, bridge.NewAccount))
|
||||
personal.Set("sign", jsre.MakeCallback(vm, bridge.Sign))
|
||||
}
|
||||
|
||||
func (c *Console) clearHistory() {
|
||||
c.history = nil
|
||||
c.prompter.ClearHistory()
|
||||
@@ -235,13 +258,13 @@ func (c *Console) clearHistory() {
|
||||
|
||||
// consoleOutput is an override for the console.log and console.error methods to
|
||||
// stream the output into the configured output stream instead of stdout.
|
||||
func (c *Console) consoleOutput(call otto.FunctionCall) otto.Value {
|
||||
func (c *Console) consoleOutput(call goja.FunctionCall) goja.Value {
|
||||
var output []string
|
||||
for _, argument := range call.ArgumentList {
|
||||
for _, argument := range call.Arguments {
|
||||
output = append(output, fmt.Sprintf("%v", argument))
|
||||
}
|
||||
fmt.Fprintln(c.printer, strings.Join(output, " "))
|
||||
return otto.Value{}
|
||||
return goja.Null()
|
||||
}
|
||||
|
||||
// AutoCompleteInput is a pre-assembled word completer to be used by the user
|
||||
@@ -304,13 +327,13 @@ func (c *Console) Welcome() {
|
||||
|
||||
// Evaluate executes code and pretty prints the result to the specified output
|
||||
// stream.
|
||||
func (c *Console) Evaluate(statement string) error {
|
||||
func (c *Console) Evaluate(statement string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Fprintf(c.printer, "[native] error: %v\n", r)
|
||||
}
|
||||
}()
|
||||
return c.jsre.Evaluate(statement, c.printer)
|
||||
c.jsre.Evaluate(statement, c.printer)
|
||||
}
|
||||
|
||||
// Interactive starts an interactive user session, where input is propted from
|
||||
|
@@ -289,7 +289,7 @@ func TestPrettyError(t *testing.T) {
|
||||
defer tester.Close(t)
|
||||
tester.console.Evaluate("throw 'hello'")
|
||||
|
||||
want := jsre.ErrorColor("hello") + "\n"
|
||||
want := jsre.ErrorColor("hello") + "\n\tat <eval>:1:7(1)\n\n"
|
||||
if output := tester.output.String(); output != want {
|
||||
t.Fatalf("pretty error mismatch: have %s, want %s", output, want)
|
||||
}
|
||||
|
@@ -29,8 +29,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// CheckpointOracle is a Go wrapper around an on-chain light client checkpoint oracle.
|
||||
// CheckpointOracle is a Go wrapper around an on-chain checkpoint oracle contract.
|
||||
type CheckpointOracle struct {
|
||||
address common.Address
|
||||
contract *contract.CheckpointOracle
|
||||
}
|
||||
|
||||
@@ -40,7 +41,12 @@ func NewCheckpointOracle(contractAddr common.Address, backend bind.ContractBacke
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &CheckpointOracle{contract: c}, nil
|
||||
return &CheckpointOracle{address: contractAddr, contract: c}, nil
|
||||
}
|
||||
|
||||
// ContractAddr returns the address of contract.
|
||||
func (oracle *CheckpointOracle) ContractAddr() common.Address {
|
||||
return oracle.address
|
||||
}
|
||||
|
||||
// Contract returns the underlying contract instance.
|
||||
|
@@ -57,6 +57,7 @@ func NewCompiler(debug bool) *Compiler {
|
||||
// second stage to push labels and determine the right
|
||||
// position.
|
||||
func (c *Compiler) Feed(ch <-chan token) {
|
||||
var prev token
|
||||
for i := range ch {
|
||||
switch i.typ {
|
||||
case number:
|
||||
@@ -73,10 +74,14 @@ func (c *Compiler) Feed(ch <-chan token) {
|
||||
c.labels[i.text] = c.pc
|
||||
c.pc++
|
||||
case label:
|
||||
c.pc += 5
|
||||
c.pc += 4
|
||||
if prev.typ == element && isJump(prev.text) {
|
||||
c.pc++
|
||||
}
|
||||
}
|
||||
|
||||
c.tokens = append(c.tokens, i)
|
||||
prev = i
|
||||
}
|
||||
if c.debug {
|
||||
fmt.Fprintln(os.Stderr, "found", len(c.labels), "labels")
|
||||
@@ -181,6 +186,8 @@ func (c *Compiler) compileElement(element token) error {
|
||||
pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes()
|
||||
pos = append(make([]byte, 4-len(pos)), pos...)
|
||||
c.pushBin(pos)
|
||||
case lineEnd:
|
||||
c.pos--
|
||||
default:
|
||||
return compileErr(rvalue, rvalue.text, "number, string or label")
|
||||
}
|
||||
@@ -201,8 +208,8 @@ func (c *Compiler) compileElement(element token) error {
|
||||
case stringValue:
|
||||
value = []byte(rvalue.text[1 : len(rvalue.text)-1])
|
||||
case label:
|
||||
value = make([]byte, 4)
|
||||
copy(value, big.NewInt(int64(c.labels[rvalue.text])).Bytes())
|
||||
value = big.NewInt(int64(c.labels[rvalue.text])).Bytes()
|
||||
value = append(make([]byte, 4-len(value)), value...)
|
||||
default:
|
||||
return compileErr(rvalue, rvalue.text, "number, string or label")
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user