Compare commits
481 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
748f22c192 | ||
|
|
43e2e58cbd | ||
|
|
35ddf36229 | ||
|
|
0fef66c739 | ||
|
|
9e88224eb8 | ||
|
|
295693759e | ||
|
|
240d1851db | ||
|
|
6c9f040ebe | ||
|
|
5b081ab214 | ||
|
|
6ef4495a8f | ||
|
|
79addac698 | ||
|
|
7d5267e3a2 | ||
|
|
4edbc1f2bb | ||
|
|
6cf6e1d753 | ||
|
|
2e08dad9e6 | ||
|
|
af258efdb9 | ||
|
|
6eef141aef | ||
|
|
b8dd0890b3 | ||
|
|
ea3b00ad75 | ||
|
|
feb40e3a4d | ||
|
|
beabf95ad7 | ||
|
|
6ccce0906a | ||
|
|
bcb3087450 | ||
|
|
967d8de77a | ||
|
|
7a556abe15 | ||
|
|
5b1cfdef89 | ||
|
|
c16967c267 | ||
|
|
6a48ae37b2 | ||
|
|
e5871b928f | ||
|
|
6d8e51ab88 | ||
|
|
6315b6fcc0 | ||
|
|
fa01117498 | ||
|
|
490b380a04 | ||
|
|
61270e5e1c | ||
|
|
07a95ce571 | ||
|
|
04c4e50d72 | ||
|
|
7451fc637d | ||
|
|
12867d152c | ||
|
|
af5c97aebe | ||
|
|
8dfd66f701 | ||
|
|
ec51cbb5fb | ||
|
|
d671dbd5b7 | ||
|
|
1e635bd0bd | ||
|
|
b86b1e6d43 | ||
|
|
ddeea1e0c6 | ||
|
|
e376d2fb31 | ||
|
|
dd91c7ce6a | ||
|
|
c13df14581 | ||
|
|
02cea2330d | ||
|
|
413358abb9 | ||
|
|
0c82928981 | ||
|
|
b482423e61 | ||
|
|
93142e50c3 | ||
|
|
23f1a0b783 | ||
|
|
da180ba097 | ||
|
|
c42d1390d3 | ||
|
|
42ccb2fdbd | ||
|
|
dce533c246 | ||
|
|
9a188c975d | ||
|
|
3ebfeb09fe | ||
|
|
5435e0d1a1 | ||
|
|
e029cc6616 | ||
|
|
56a319b9da | ||
|
|
bcf19bc4be | ||
|
|
eb9d7d15ec | ||
|
|
a981b60c25 | ||
|
|
9371b2f70c | ||
|
|
c85fdb76ee | ||
|
|
e30c0af861 | ||
|
|
4a19c0e7b8 | ||
|
|
e9ba536d85 | ||
|
|
89043cba75 | ||
|
|
d5c267fd30 | ||
|
|
a0797e37f8 | ||
|
|
80e887d7bf | ||
|
|
cf6674539c | ||
|
|
39abd92ca8 | ||
|
|
45b7535137 | ||
|
|
da06519347 | ||
|
|
0f77f34bb6 | ||
|
|
651233454e | ||
|
|
a5c827af86 | ||
|
|
0b3f3be2b5 | ||
|
|
88125d8bd0 | ||
|
|
55f30db0ae | ||
|
|
9d93535674 | ||
|
|
4b2ff1457a | ||
|
|
cefa2ab1bd | ||
|
|
b1b75f0089 | ||
|
|
201e345c65 | ||
|
|
469b8739eb | ||
|
|
8523ad450d | ||
|
|
8b83125739 | ||
|
|
f52ff0f1e9 | ||
|
|
890757f03a | ||
|
|
4fc678542d | ||
|
|
3f649d4852 | ||
|
|
5f6f5e345e | ||
|
|
d98c42c0e3 | ||
|
|
723bd8c17f | ||
|
|
cd57d5cd38 | ||
|
|
a35382de94 | ||
|
|
a5eee8d1dc | ||
|
|
389da6aa48 | ||
|
|
b2c59e297b | ||
|
|
9219e0fba4 | ||
|
|
4873a9d3c3 | ||
|
|
070a5e1252 | ||
|
|
81e9caed7d | ||
|
|
7ddb40239b | ||
|
|
2f66a8d614 | ||
|
|
dbf6b8a797 | ||
|
|
befecc9fdf | ||
|
|
e868adde30 | ||
|
|
25a661e0c2 | ||
|
|
4f2784b38f | ||
|
|
48e3b95e77 | ||
|
|
b4a2681120 | ||
|
|
65ce550b37 | ||
|
|
0a99efa61f | ||
|
|
d5b7d1cc34 | ||
|
|
e0987f67e0 | ||
|
|
3666da8a4b | ||
|
|
f3f1e59eea | ||
|
|
677724af0c | ||
|
|
46698d7931 | ||
|
|
6d74d1e5f7 | ||
|
|
a188a1e150 | ||
|
|
d02301f758 | ||
|
|
0b63915430 | ||
|
|
b8ea9042e5 | ||
|
|
7b7e5921a4 | ||
|
|
7540c53e72 | ||
|
|
aaede53738 | ||
|
|
53cac027d0 | ||
|
|
7ace5a3a8b | ||
|
|
40859a2441 | ||
|
|
4535230059 | ||
|
|
126ac94f36 | ||
|
|
6f54ae24cd | ||
|
|
069a7e1f8a | ||
|
|
bd60295de5 | ||
|
|
930e82d7f4 | ||
|
|
37877e86ed | ||
|
|
263622f44f | ||
|
|
0b2edf05bb | ||
|
|
e29e4c2376 | ||
|
|
82f9ed49fa | ||
|
|
b0b65d017f | ||
|
|
1152f45849 | ||
|
|
dd88bd82c9 | ||
|
|
85944c2561 | ||
|
|
87c463c47a | ||
|
|
90af6dae6e | ||
|
|
39c64d85a2 | ||
|
|
234cc8e77f | ||
|
|
5cdc2dffda | ||
|
|
c2147ee154 | ||
|
|
b98259868b | ||
|
|
292570ad6c | ||
|
|
34ed2d834a | ||
|
|
933acf3389 | ||
|
|
44a3b8c04c | ||
|
|
a52511e692 | ||
|
|
8f8ff8d601 | ||
|
|
4515772993 | ||
|
|
c989bca173 | ||
|
|
587656619d | ||
|
|
da59147014 | ||
|
|
5e45db7610 | ||
|
|
ab72803e6f | ||
|
|
e872083d44 | ||
|
|
65cd28aa0e | ||
|
|
510b6f90db | ||
|
|
c43be6cf87 | ||
|
|
7e4d1925f0 | ||
|
|
d2d3166f35 | ||
|
|
2337aa64eb | ||
|
|
3cebfb6664 | ||
|
|
4b6f6ffe23 | ||
|
|
26d271dfbb | ||
|
|
1264c19f11 | ||
|
|
7f95a85fd4 | ||
|
|
0708b573bc | ||
|
|
9887edd580 | ||
|
|
1893266c59 | ||
|
|
92a7538ed3 | ||
|
|
5c3993444d | ||
|
|
ba068d40dd | ||
|
|
e32ee6ac05 | ||
|
|
40283d0522 | ||
|
|
a070e23178 | ||
|
|
b0bbd47185 | ||
|
|
1aa83290f5 | ||
|
|
8a2e8faadd | ||
|
|
44ff3f3dc9 | ||
|
|
38aab0aa83 | ||
|
|
2ec7232191 | ||
|
|
b9df7ecdc3 | ||
|
|
c60c0c97e7 | ||
|
|
870d4c4970 | ||
|
|
6c458f32f8 | ||
|
|
c036fe35a8 | ||
|
|
7599999dcd | ||
|
|
79b68dd78d | ||
|
|
648b0cb714 | ||
|
|
ac9c03f910 | ||
|
|
ca22d0761b | ||
|
|
7a63faf734 | ||
|
|
3bf1054a13 | ||
|
|
cbc4ac264e | ||
|
|
359d9c3f0a | ||
|
|
d77d35a4a9 | ||
|
|
6402c42b67 | ||
|
|
af4080b4b7 | ||
|
|
00064ddcfb | ||
|
|
2a836bb259 | ||
|
|
eb2fd823b2 | ||
|
|
5a20cc0de6 | ||
|
|
0851646e48 | ||
|
|
15540ae992 | ||
|
|
023b87b9d1 | ||
|
|
1bad861222 | ||
|
|
fe9ffa5953 | ||
|
|
07d909ff32 | ||
|
|
5065cdefff | ||
|
|
6975172d01 | ||
|
|
c8e9a91672 | ||
|
|
b7394d7942 | ||
|
|
671f22be38 | ||
|
|
6a3daa2a4e | ||
|
|
094996b8c9 | ||
|
|
8dc8941551 | ||
|
|
0bec6a43f6 | ||
|
|
f0b5eb09eb | ||
|
|
3cf7d2e9a6 | ||
|
|
be6078ad83 | ||
|
|
98eab2dbe7 | ||
|
|
be9172a7ac | ||
|
|
462ddce5b2 | ||
|
|
f7b29ec942 | ||
|
|
f98cabad7c | ||
|
|
0c359e4b9a | ||
|
|
37d6357806 | ||
|
|
53e034ce0b | ||
|
|
0893ee6d51 | ||
|
|
4d891f23b5 | ||
|
|
66ed58bfcc | ||
|
|
47f7c736cb | ||
|
|
228a297056 | ||
|
|
ad4b60efdd | ||
|
|
c87cdd3053 | ||
|
|
f15849cf00 | ||
|
|
bf35e27ea7 | ||
|
|
1e2e1b41f8 | ||
|
|
d56dc038d2 | ||
|
|
a5a9feab21 | ||
|
|
f0be151349 | ||
|
|
f78ffc0545 | ||
|
|
32d31c31af | ||
|
|
3b69c14f5d | ||
|
|
300c35b854 | ||
|
|
03fe9de2cb | ||
|
|
c56f4fa808 | ||
|
|
8f05cfa122 | ||
|
|
76eed9e50d | ||
|
|
84f4975520 | ||
|
|
55a73f556a | ||
|
|
5d7e5b00be | ||
|
|
62cd943c7b | ||
|
|
d6c5f2417c | ||
|
|
d7851e6359 | ||
|
|
d3c1e654f0 | ||
|
|
87a411b839 | ||
|
|
1583e7d274 | ||
|
|
4690912ac9 | ||
|
|
42e02ac03b | ||
|
|
39f502329f | ||
|
|
0734c4b820 | ||
|
|
a75c0610b7 | ||
|
|
613af7ceea | ||
|
|
074efe6c8d | ||
|
|
93ffb85b3d | ||
|
|
e943f07a85 | ||
|
|
0e6ea9199c | ||
|
|
36e93d2dd8 | ||
|
|
e6ca1958d3 | ||
|
|
4655b60999 | ||
|
|
dc6e98d2a8 | ||
|
|
6283391c99 | ||
|
|
20a092fb9f | ||
|
|
5dd0cd12ec | ||
|
|
efd92d81a9 | ||
|
|
8d7aa9078f | ||
|
|
b6f1c8dcc0 | ||
|
|
97243f3a76 | ||
|
|
241b283690 | ||
|
|
466b009135 | ||
|
|
68b4b74682 | ||
|
|
270fbfba4b | ||
|
|
92f3405dae | ||
|
|
b1efff659e | ||
|
|
0bdb21f0cb | ||
|
|
fab0ee3bfa | ||
|
|
bc5d742c66 | ||
|
|
eff7cfbb03 | ||
|
|
328de180a7 | ||
|
|
dcb22a9f99 | ||
|
|
a4cf279494 | ||
|
|
6e05ccd845 | ||
|
|
556888c4a9 | ||
|
|
fe8347ea8a | ||
|
|
361a6f08ac | ||
|
|
01d92531ee | ||
|
|
92ec07d63b | ||
|
|
1e1b18637e | ||
|
|
f1a7997af3 | ||
|
|
cec1f292f0 | ||
|
|
4fabd9cbd2 | ||
|
|
fadf84a752 | ||
|
|
06d4470b41 | ||
|
|
19099421dc | ||
|
|
6ddb92a089 | ||
|
|
e570835356 | ||
|
|
e567675473 | ||
|
|
7e38996301 | ||
|
|
22c494d399 | ||
|
|
3ad4335acc | ||
|
|
fd39f722a3 | ||
|
|
d5d7c0c24b | ||
|
|
351a5903b0 | ||
|
|
f300c0df01 | ||
|
|
d754091a87 | ||
|
|
cdf3f016df | ||
|
|
d7d81d7c12 | ||
|
|
e146fbe4e7 | ||
|
|
542df8898e | ||
|
|
2a5ed1a1d3 | ||
|
|
bf1cdd723a | ||
|
|
c6be24c731 | ||
|
|
6ffee2afd6 | ||
|
|
2e1ecc02bd | ||
|
|
6df973df27 | ||
|
|
4be8840120 | ||
|
|
529b81dadb | ||
|
|
6a62fe399b | ||
|
|
dae3aee5ff | ||
|
|
05ccbb5edd | ||
|
|
4f55e24c02 | ||
|
|
91b228966e | ||
|
|
1b9c5b393b | ||
|
|
57d4898e29 | ||
|
|
c2117982b8 | ||
|
|
1c4c486a85 | ||
|
|
ac72787768 | ||
|
|
26284ec3cc | ||
|
|
fef8c985bc | ||
|
|
36a1e0b67d | ||
|
|
37531b1884 | ||
|
|
855690523a | ||
|
|
38d1b0cba2 | ||
|
|
eddcecc160 | ||
|
|
9938d954c8 | ||
|
|
90caa2cabb | ||
|
|
5f2002bbcc | ||
|
|
a9614c3c91 | ||
|
|
46c4b699c8 | ||
|
|
1821328162 | ||
|
|
8045504abf | ||
|
|
c22fdec3c7 | ||
|
|
049e17116e | ||
|
|
dcffb7777f | ||
|
|
8694d14e65 | ||
|
|
172f7778fe | ||
|
|
34bb132b10 | ||
|
|
675f4e75b8 | ||
|
|
4a231cd951 | ||
|
|
976a0f5558 | ||
|
|
a1313b5b1e | ||
|
|
711ed74e09 | ||
|
|
058a4ac5f1 | ||
|
|
33791dbeb5 | ||
|
|
5a9c96454e | ||
|
|
4cc89a5a32 | ||
|
|
15d09038a6 | ||
|
|
3c776c7199 | ||
|
|
24cab2d535 | ||
|
|
594e038e75 | ||
|
|
a903912b96 | ||
|
|
44c365c3e2 | ||
|
|
7b68975a00 | ||
|
|
60deeb103e | ||
|
|
0b284f6c6c | ||
|
|
8a5c81349e | ||
|
|
33c56ebc67 | ||
|
|
31baf3a9af | ||
|
|
ad2fc7c6a6 | ||
|
|
58cf5686ea | ||
|
|
b4aa4a6965 | ||
|
|
b88b4632c2 | ||
|
|
1f1cefc036 | ||
|
|
4c8fcd93da | ||
|
|
fcc84c38dd | ||
|
|
6d200efe72 | ||
|
|
92956e2930 | ||
|
|
9b09c0fc83 | ||
|
|
770316dc20 | ||
|
|
0af96d2556 | ||
|
|
d5acc5ed9e | ||
|
|
fcafa0baa5 | ||
|
|
1ee754b056 | ||
|
|
b3b8d36995 | ||
|
|
9b32f592dc | ||
|
|
3e97b04a3d | ||
|
|
f20c8d495a | ||
|
|
8704e8a8fc | ||
|
|
94e8418939 | ||
|
|
feda78e052 | ||
|
|
8592a57553 | ||
|
|
b2de0bd87b | ||
|
|
e9e69d6e29 | ||
|
|
a90cc66f3c | ||
|
|
8bd37a1d91 | ||
|
|
b5c4ea56b8 | ||
|
|
fc392395fb | ||
|
|
b211742e5f | ||
|
|
0218d7001d | ||
|
|
4d663d57d6 | ||
|
|
8a63f7f504 | ||
|
|
c49a4165d0 | ||
|
|
a1bc0e3cb6 | ||
|
|
a013f02df2 | ||
|
|
50be790869 | ||
|
|
9e0f934e2b | ||
|
|
4f7b7f84ae | ||
|
|
c6285e6437 | ||
|
|
35f95aef6f | ||
|
|
7a509b4732 | ||
|
|
433937fb42 | ||
|
|
2eeb8dd271 | ||
|
|
b7cf41e4b3 | ||
|
|
3bb6815fc1 | ||
|
|
a67fe48b43 | ||
|
|
93b1171316 | ||
|
|
6ae9dc15cc | ||
|
|
49cf000df7 | ||
|
|
c4b7fdd27e | ||
|
|
275cd4988d | ||
|
|
f51cf573b5 | ||
|
|
191364c350 | ||
|
|
d90d1db609 | ||
|
|
b8bc9b3d8e | ||
|
|
f383eaa102 | ||
|
|
cecc7230c0 | ||
|
|
4b40b5377b | ||
|
|
370cb95b7f | ||
|
|
017449971e | ||
|
|
bc01593afb | ||
|
|
c9dce0bfd7 | ||
|
|
e78f631dfc | ||
|
|
6b6882f08b | ||
|
|
c2d65d34d5 | ||
|
|
13ccf6016e | ||
|
|
7ce7c3967c | ||
|
|
fc7e0fe6c7 | ||
|
|
5cc6e7a71e | ||
|
|
d556d39a2c | ||
|
|
54d332e1db | ||
|
|
e0bf5f0ccb | ||
|
|
1ff3d7c2d4 | ||
|
|
08611cfd75 | ||
|
|
9a529d64d1 | ||
|
|
a91b704b01 | ||
|
|
c9f28ca8e5 | ||
|
|
58e33d9e5a | ||
|
|
7800ba978d | ||
|
|
717f8a4e8f | ||
|
|
7b189d6f1f | ||
|
|
c4844e9ee2 | ||
|
|
23c8c74131 | ||
|
|
0676320169 |
12
.github/CODEOWNERS
vendored
12
.github/CODEOWNERS
vendored
@@ -3,21 +3,21 @@
|
|||||||
|
|
||||||
accounts/usbwallet @karalabe
|
accounts/usbwallet @karalabe
|
||||||
accounts/scwallet @gballet
|
accounts/scwallet @gballet
|
||||||
accounts/abi @gballet
|
accounts/abi @gballet @MariusVanDerWijden
|
||||||
cmd/clef @holiman
|
cmd/clef @holiman
|
||||||
cmd/puppeth @karalabe
|
cmd/puppeth @karalabe
|
||||||
consensus @karalabe
|
consensus @karalabe
|
||||||
core/ @karalabe @holiman @rjl493456442
|
core/ @karalabe @holiman @rjl493456442
|
||||||
dashboard/ @kurkomisi
|
|
||||||
eth/ @karalabe @holiman @rjl493456442
|
eth/ @karalabe @holiman @rjl493456442
|
||||||
graphql/ @gballet
|
graphql/ @gballet
|
||||||
les/ @zsfelfoldi @rjl493456442
|
les/ @zsfelfoldi @rjl493456442
|
||||||
light/ @zsfelfoldi @rjl493456442
|
light/ @zsfelfoldi @rjl493456442
|
||||||
mobile/ @karalabe @ligi
|
mobile/ @karalabe @ligi
|
||||||
|
node/ @fjl @renaynay
|
||||||
p2p/ @fjl @zsfelfoldi
|
p2p/ @fjl @zsfelfoldi
|
||||||
rpc/ @fjl @holiman
|
rpc/ @fjl @holiman
|
||||||
p2p/simulations @zelig @janos @justelad
|
p2p/simulations @fjl
|
||||||
p2p/protocols @zelig @janos @justelad
|
p2p/protocols @fjl
|
||||||
p2p/testing @zelig @janos @justelad
|
p2p/testing @fjl
|
||||||
signer/ @holiman
|
signer/ @holiman
|
||||||
whisper/ @gballet @gluk256
|
whisper/ @gballet
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -2,7 +2,7 @@ Hi there,
|
|||||||
|
|
||||||
Please note that this is an issue tracker reserved for bug reports and feature requests.
|
Please note that this is an issue tracker reserved for bug reports and feature requests.
|
||||||
|
|
||||||
For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com.
|
For general questions please use [discord](https://discord.gg/nthXNEv) or the Ethereum stack exchange at https://ethereum.stackexchange.com.
|
||||||
|
|
||||||
#### System information
|
#### System information
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
# This file configures github.com/golangci/golangci-lint.
|
# This file configures github.com/golangci/golangci-lint.
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 2m
|
timeout: 3m
|
||||||
tests: true
|
tests: true
|
||||||
# default is true. Enables skipping of directories:
|
# default is true. Enables skipping of directories:
|
||||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||||
skip-dirs-use-default: true
|
skip-dirs-use-default: true
|
||||||
|
skip-files:
|
||||||
|
- core/genesis_alloc.go
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
disable-all: true
|
||||||
@@ -43,3 +45,6 @@ issues:
|
|||||||
- path: core/vm/instructions_test.go
|
- path: core/vm/instructions_test.go
|
||||||
linters:
|
linters:
|
||||||
- goconst
|
- goconst
|
||||||
|
- path: cmd/faucet/
|
||||||
|
linters:
|
||||||
|
- deadcode
|
||||||
|
|||||||
58
.travis.yml
58
.travis.yml
@@ -2,12 +2,21 @@ language: go
|
|||||||
go_import_path: github.com/ethereum/go-ethereum
|
go_import_path: github.com/ethereum/go-ethereum
|
||||||
sudo: false
|
sudo: false
|
||||||
jobs:
|
jobs:
|
||||||
|
allow_failures:
|
||||||
|
- stage: build
|
||||||
|
os: osx
|
||||||
|
go: 1.14.x
|
||||||
|
env:
|
||||||
|
- azure-osx
|
||||||
|
- azure-ios
|
||||||
|
- cocoapods-ios
|
||||||
|
|
||||||
include:
|
include:
|
||||||
# This builder only tests code linters on latest version of Go
|
# This builder only tests code linters on latest version of Go
|
||||||
- stage: lint
|
- stage: lint
|
||||||
os: linux
|
os: linux
|
||||||
dist: xenial
|
dist: xenial
|
||||||
go: 1.13.x
|
go: 1.14.x
|
||||||
env:
|
env:
|
||||||
- lint
|
- lint
|
||||||
git:
|
git:
|
||||||
@@ -18,17 +27,7 @@ jobs:
|
|||||||
- stage: build
|
- stage: build
|
||||||
os: linux
|
os: linux
|
||||||
dist: xenial
|
dist: xenial
|
||||||
go: 1.11.x
|
go: 1.13.x
|
||||||
env:
|
|
||||||
- GO111MODULE=on
|
|
||||||
script:
|
|
||||||
- go run build/ci.go install
|
|
||||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
|
||||||
|
|
||||||
- stage: build
|
|
||||||
os: linux
|
|
||||||
dist: xenial
|
|
||||||
go: 1.12.x
|
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
@@ -40,7 +39,9 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: xenial
|
dist: xenial
|
||||||
go: 1.13.x
|
go: 1.14.x
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
- go run build/ci.go install
|
- go run build/ci.go install
|
||||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||||
@@ -50,14 +51,19 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: arm64
|
arch: arm64
|
||||||
dist: xenial
|
dist: xenial
|
||||||
go: 1.13.x
|
go: 1.14.x
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
- go run build/ci.go install
|
- go run build/ci.go install
|
||||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||||
|
|
||||||
- stage: build
|
- stage: build
|
||||||
os: osx
|
os: osx
|
||||||
go: 1.13.x
|
osx_image: xcode11.3
|
||||||
|
go: 1.14.x
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
- echo "Increase the maximum number of open file descriptors on macOS"
|
- echo "Increase the maximum number of open file descriptors on macOS"
|
||||||
- NOFILE=20480
|
- NOFILE=20480
|
||||||
@@ -76,9 +82,10 @@ jobs:
|
|||||||
if: type = push
|
if: type = push
|
||||||
os: linux
|
os: linux
|
||||||
dist: xenial
|
dist: xenial
|
||||||
go: 1.13.x
|
go: 1.14.x
|
||||||
env:
|
env:
|
||||||
- ubuntu-ppa
|
- ubuntu-ppa
|
||||||
|
- GO111MODULE=on
|
||||||
git:
|
git:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
addons:
|
addons:
|
||||||
@@ -92,7 +99,7 @@ jobs:
|
|||||||
- python-paramiko
|
- python-paramiko
|
||||||
script:
|
script:
|
||||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||||
- go run build/ci.go debsrc -goversion 1.13.4 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
- go run build/ci.go debsrc -goversion 1.14.2 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||||
|
|
||||||
# This builder does the Linux Azure uploads
|
# This builder does the Linux Azure uploads
|
||||||
- stage: build
|
- stage: build
|
||||||
@@ -100,9 +107,10 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
dist: xenial
|
dist: xenial
|
||||||
sudo: required
|
sudo: required
|
||||||
go: 1.13.x
|
go: 1.14.x
|
||||||
env:
|
env:
|
||||||
- azure-linux
|
- azure-linux
|
||||||
|
- GO111MODULE=on
|
||||||
git:
|
git:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
addons:
|
addons:
|
||||||
@@ -136,9 +144,10 @@ jobs:
|
|||||||
dist: xenial
|
dist: xenial
|
||||||
services:
|
services:
|
||||||
- docker
|
- docker
|
||||||
go: 1.13.x
|
go: 1.14.x
|
||||||
env:
|
env:
|
||||||
- azure-linux-mips
|
- azure-linux-mips
|
||||||
|
- GO111MODULE=on
|
||||||
git:
|
git:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
script:
|
script:
|
||||||
@@ -179,10 +188,11 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
- azure-android
|
- azure-android
|
||||||
- maven-android
|
- maven-android
|
||||||
|
- GO111MODULE=on
|
||||||
git:
|
git:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
before_install:
|
before_install:
|
||||||
- curl https://dl.google.com/go/go1.13.linux-amd64.tar.gz | tar -xz
|
- curl https://dl.google.com/go/go1.14.2.linux-amd64.tar.gz | tar -xz
|
||||||
- export PATH=`pwd`/go/bin:$PATH
|
- export PATH=`pwd`/go/bin:$PATH
|
||||||
- export GOROOT=`pwd`/go
|
- export GOROOT=`pwd`/go
|
||||||
- export GOPATH=$HOME/go
|
- export GOPATH=$HOME/go
|
||||||
@@ -200,11 +210,12 @@ jobs:
|
|||||||
- stage: build
|
- stage: build
|
||||||
if: type = push
|
if: type = push
|
||||||
os: osx
|
os: osx
|
||||||
go: 1.13.x
|
go: 1.14.x
|
||||||
env:
|
env:
|
||||||
- azure-osx
|
- azure-osx
|
||||||
- azure-ios
|
- azure-ios
|
||||||
- cocoapods-ios
|
- cocoapods-ios
|
||||||
|
- GO111MODULE=on
|
||||||
git:
|
git:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
script:
|
script:
|
||||||
@@ -231,9 +242,10 @@ jobs:
|
|||||||
if: type = cron
|
if: type = cron
|
||||||
os: linux
|
os: linux
|
||||||
dist: xenial
|
dist: xenial
|
||||||
go: 1.13.x
|
go: 1.14.x
|
||||||
env:
|
env:
|
||||||
- azure-purge
|
- azure-purge
|
||||||
|
- GO111MODULE=on
|
||||||
git:
|
git:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
script:
|
script:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.13-alpine as builder
|
FROM golang:1.14-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.13-alpine as builder
|
FROM golang:1.14-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
|||||||
43
Makefile
43
Makefile
@@ -10,33 +10,34 @@
|
|||||||
|
|
||||||
GOBIN = ./build/bin
|
GOBIN = ./build/bin
|
||||||
GO ?= latest
|
GO ?= latest
|
||||||
|
GORUN = env GO111MODULE=on go run
|
||||||
|
|
||||||
geth:
|
geth:
|
||||||
build/env.sh go run build/ci.go install ./cmd/geth
|
$(GORUN) build/ci.go install ./cmd/geth
|
||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||||
|
|
||||||
all:
|
all:
|
||||||
build/env.sh go run build/ci.go install
|
$(GORUN) build/ci.go install
|
||||||
|
|
||||||
android:
|
android:
|
||||||
build/env.sh go run build/ci.go aar --local
|
$(GORUN) build/ci.go aar --local
|
||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
|
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
|
||||||
|
|
||||||
ios:
|
ios:
|
||||||
build/env.sh go run build/ci.go xcode --local
|
$(GORUN) build/ci.go xcode --local
|
||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
|
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
|
||||||
|
|
||||||
test: all
|
test: all
|
||||||
build/env.sh go run build/ci.go test
|
$(GORUN) build/ci.go test
|
||||||
|
|
||||||
lint: ## Run linters.
|
lint: ## Run linters.
|
||||||
build/env.sh go run build/ci.go lint
|
$(GORUN) build/ci.go lint
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
go clean -cache
|
env GO111MODULE=on go clean -cache
|
||||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||||
|
|
||||||
# The devtools target installs tools required for 'go generate'.
|
# The devtools target installs tools required for 'go generate'.
|
||||||
@@ -63,12 +64,12 @@ geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 get
|
|||||||
@ls -ld $(GOBIN)/geth-linux-*
|
@ls -ld $(GOBIN)/geth-linux-*
|
||||||
|
|
||||||
geth-linux-386:
|
geth-linux-386:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
||||||
@echo "Linux 386 cross compilation done:"
|
@echo "Linux 386 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
||||||
|
|
||||||
geth-linux-amd64:
|
geth-linux-amd64:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
||||||
@echo "Linux amd64 cross compilation done:"
|
@echo "Linux amd64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
||||||
|
|
||||||
@@ -77,42 +78,42 @@ geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-ar
|
|||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
||||||
|
|
||||||
geth-linux-arm-5:
|
geth-linux-arm-5:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
||||||
@echo "Linux ARMv5 cross compilation done:"
|
@echo "Linux ARMv5 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
||||||
|
|
||||||
geth-linux-arm-6:
|
geth-linux-arm-6:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
||||||
@echo "Linux ARMv6 cross compilation done:"
|
@echo "Linux ARMv6 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
||||||
|
|
||||||
geth-linux-arm-7:
|
geth-linux-arm-7:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
||||||
@echo "Linux ARMv7 cross compilation done:"
|
@echo "Linux ARMv7 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
||||||
|
|
||||||
geth-linux-arm64:
|
geth-linux-arm64:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
||||||
@echo "Linux ARM64 cross compilation done:"
|
@echo "Linux ARM64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
||||||
|
|
||||||
geth-linux-mips:
|
geth-linux-mips:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||||
@echo "Linux MIPS cross compilation done:"
|
@echo "Linux MIPS cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips
|
@ls -ld $(GOBIN)/geth-linux-* | grep mips
|
||||||
|
|
||||||
geth-linux-mipsle:
|
geth-linux-mipsle:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||||
@echo "Linux MIPSle cross compilation done:"
|
@echo "Linux MIPSle cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
|
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
|
||||||
|
|
||||||
geth-linux-mips64:
|
geth-linux-mips64:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||||
@echo "Linux MIPS64 cross compilation done:"
|
@echo "Linux MIPS64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
|
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
|
||||||
|
|
||||||
geth-linux-mips64le:
|
geth-linux-mips64le:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||||
@echo "Linux MIPS64le cross compilation done:"
|
@echo "Linux MIPS64le cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
|
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
|
||||||
|
|
||||||
@@ -121,12 +122,12 @@ geth-darwin: geth-darwin-386 geth-darwin-amd64
|
|||||||
@ls -ld $(GOBIN)/geth-darwin-*
|
@ls -ld $(GOBIN)/geth-darwin-*
|
||||||
|
|
||||||
geth-darwin-386:
|
geth-darwin-386:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
||||||
@echo "Darwin 386 cross compilation done:"
|
@echo "Darwin 386 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
||||||
|
|
||||||
geth-darwin-amd64:
|
geth-darwin-amd64:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
||||||
@echo "Darwin amd64 cross compilation done:"
|
@echo "Darwin amd64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
||||||
|
|
||||||
@@ -135,11 +136,11 @@ geth-windows: geth-windows-386 geth-windows-amd64
|
|||||||
@ls -ld $(GOBIN)/geth-windows-*
|
@ls -ld $(GOBIN)/geth-windows-*
|
||||||
|
|
||||||
geth-windows-386:
|
geth-windows-386:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
||||||
@echo "Windows 386 cross compilation done:"
|
@echo "Windows 386 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
||||||
|
|
||||||
geth-windows-amd64:
|
geth-windows-amd64:
|
||||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
||||||
@echo "Windows amd64 cross compilation done:"
|
@echo "Windows amd64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
||||||
|
|||||||
71
README.md
71
README.md
@@ -4,7 +4,7 @@ Official Golang implementation of the Ethereum protocol.
|
|||||||
|
|
||||||
[](https://godoc.org/github.com/ethereum/go-ethereum)
|
)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
|
||||||
[](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
[](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||||
[](https://travis-ci.org/ethereum/go-ethereum)
|
[](https://travis-ci.org/ethereum/go-ethereum)
|
||||||
[](https://discord.gg/nthXNEv)
|
[](https://discord.gg/nthXNEv)
|
||||||
@@ -16,7 +16,7 @@ archives are published at https://geth.ethereum.org/downloads/.
|
|||||||
|
|
||||||
For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki.
|
For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki.
|
||||||
|
|
||||||
Building `geth` requires both a Go (version 1.10 or later) and a C compiler. You can install
|
Building `geth` requires both a Go (version 1.13 or later) and a C compiler. You can install
|
||||||
them using your favourite package manager. Once the dependencies are installed, run
|
them using your favourite package manager. Once the dependencies are installed, run
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@@ -39,7 +39,7 @@ directory.
|
|||||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
||||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||||
@@ -72,7 +72,7 @@ This command will:
|
|||||||
This tool is optional and if you leave it out you can always attach to an already running
|
This tool is optional and if you leave it out you can always attach to an already running
|
||||||
`geth` instance with `geth attach`.
|
`geth` instance with `geth attach`.
|
||||||
|
|
||||||
### A Full node on the Ethereum test network
|
### A Full node on the Görli test network
|
||||||
|
|
||||||
Transitioning towards developers, if you'd like to play around with creating Ethereum
|
Transitioning towards developers, if you'd like to play around with creating Ethereum
|
||||||
contracts, you almost certainly would like to do that without any real money involved until
|
contracts, you almost certainly would like to do that without any real money involved until
|
||||||
@@ -81,23 +81,24 @@ network, you want to join the **test** network with your node, which is fully eq
|
|||||||
the main network, but with play-Ether only.
|
the main network, but with play-Ether only.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ geth --testnet console
|
$ geth --goerli console
|
||||||
```
|
```
|
||||||
|
|
||||||
The `console` subcommand has the exact same meaning as above and they are equally
|
The `console` subcommand has the exact same meaning as above and they are equally
|
||||||
useful on the testnet too. Please see above for their explanations if you've skipped here.
|
useful on the testnet too. Please, see above for their explanations if you've skipped here.
|
||||||
|
|
||||||
Specifying the `--testnet` flag, however, will reconfigure your `geth` instance a bit:
|
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
|
||||||
|
|
||||||
|
* Instead of connecting the main Ethereum network, the client will connect to the Görli
|
||||||
|
test network, which uses different P2P bootnodes, different network IDs and genesis
|
||||||
|
states.
|
||||||
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
|
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
|
||||||
will nest itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on
|
will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on
|
||||||
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
|
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
|
||||||
requires the use of a custom endpoint since `geth attach` will try to attach to a
|
requires the use of a custom endpoint since `geth attach` will try to attach to a
|
||||||
production node endpoint by default. E.g.
|
production node endpoint by default, e.g.,
|
||||||
`geth attach <datadir>/testnet/geth.ipc`. Windows users are not affected by
|
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
|
||||||
this.
|
this.
|
||||||
* Instead of connecting the main Ethereum network, the client will connect to the test
|
|
||||||
network, which uses different P2P bootnodes, different network IDs and genesis states.
|
|
||||||
|
|
||||||
*Note: Although there are some internal protective measures to prevent transactions from
|
*Note: Although there are some internal protective measures to prevent transactions from
|
||||||
crossing over between the main network and test network, you should make sure to always
|
crossing over between the main network and test network, you should make sure to always
|
||||||
@@ -107,17 +108,26 @@ accounts available between them.*
|
|||||||
|
|
||||||
### Full node on the Rinkeby test network
|
### Full node on the Rinkeby test network
|
||||||
|
|
||||||
The above test network is a cross-client one based on the ethash proof-of-work consensus
|
Go Ethereum also supports connecting to the older proof-of-authority based test network
|
||||||
algorithm. As such, it has certain extra overhead and is more susceptible to reorganization
|
called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community.
|
||||||
attacks due to the network's low difficulty/security. Go Ethereum also supports connecting
|
|
||||||
to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io)
|
|
||||||
(operated by members of the community). This network is lighter, more secure, but is only
|
|
||||||
supported by go-ethereum.
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ geth --rinkeby console
|
$ geth --rinkeby console
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Full node on the Ropsten test network
|
||||||
|
|
||||||
|
In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The
|
||||||
|
Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such,
|
||||||
|
it has certain extra overhead and is more susceptible to reorganization attacks due to the
|
||||||
|
network's low difficulty/security.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ geth --ropsten console
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.*
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a
|
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a
|
||||||
@@ -152,7 +162,7 @@ above command does. It will also create a persistent volume in your home direct
|
|||||||
saving your blockchain as well as map the default ports. There is also an `alpine` tag
|
saving your blockchain as well as map the default ports. There is also an `alpine` tag
|
||||||
available for a slim version of the image.
|
available for a slim version of the image.
|
||||||
|
|
||||||
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers
|
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
|
||||||
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
|
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
|
||||||
accessible from the outside.
|
accessible from the outside.
|
||||||
|
|
||||||
@@ -172,16 +182,16 @@ you'd expect.
|
|||||||
|
|
||||||
HTTP based JSON-RPC API options:
|
HTTP based JSON-RPC API options:
|
||||||
|
|
||||||
* `--rpc` Enable the HTTP-RPC server
|
* `--http` Enable the HTTP-RPC server
|
||||||
* `--rpcaddr` HTTP-RPC server listening interface (default: `localhost`)
|
* `--http.addr` HTTP-RPC server listening interface (default: `localhost`)
|
||||||
* `--rpcport` HTTP-RPC server listening port (default: `8545`)
|
* `--http.port` HTTP-RPC server listening port (default: `8545`)
|
||||||
* `--rpcapi` API's offered over the HTTP-RPC interface (default: `eth,net,web3`)
|
* `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`)
|
||||||
* `--rpccorsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
|
* `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
|
||||||
* `--ws` Enable the WS-RPC server
|
* `--ws` Enable the WS-RPC server
|
||||||
* `--wsaddr` WS-RPC server listening interface (default: `localhost`)
|
* `--ws.addr` WS-RPC server listening interface (default: `localhost`)
|
||||||
* `--wsport` WS-RPC server listening port (default: `8546`)
|
* `--ws.port` WS-RPC server listening port (default: `8546`)
|
||||||
* `--wsapi` API's offered over the WS-RPC interface (default: `eth,net,web3`)
|
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
|
||||||
* `--wsorigins` Origins from which to accept websockets requests
|
* `--ws.origins` Origins from which to accept websockets requests
|
||||||
* `--ipcdisable` Disable the IPC-RPC server
|
* `--ipcdisable` Disable the IPC-RPC server
|
||||||
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`)
|
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`)
|
||||||
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
||||||
@@ -217,7 +227,8 @@ aware of and agree upon. This consists of a small JSON file (e.g. call it `genes
|
|||||||
"eip158Block": 0,
|
"eip158Block": 0,
|
||||||
"byzantiumBlock": 0,
|
"byzantiumBlock": 0,
|
||||||
"constantinopleBlock": 0,
|
"constantinopleBlock": 0,
|
||||||
"petersburgBlock": 0
|
"petersburgBlock": 0,
|
||||||
|
"istanbulBlock": 0
|
||||||
},
|
},
|
||||||
"alloc": {},
|
"alloc": {},
|
||||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||||
@@ -294,7 +305,7 @@ also need to configure a miner to process transactions and create new blocks for
|
|||||||
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
|
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
|
||||||
requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
|
requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
|
||||||
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
|
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
|
||||||
and the [Genoil miner](https://github.com/Genoil/cpp-ethereum) repository.
|
and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
|
||||||
|
|
||||||
In a private network setting, however a single CPU miner instance is more than enough for
|
In a private network setting, however a single CPU miner instance is more than enough for
|
||||||
practical purposes as it can produce a stable stream of blocks at the correct intervals
|
practical purposes as it can produce a stable stream of blocks at the correct intervals
|
||||||
|
|||||||
@@ -19,10 +19,12 @@ package abi
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The ABI holds information about a contract's context and available
|
// The ABI holds information about a contract's context and available
|
||||||
@@ -32,6 +34,12 @@ type ABI struct {
|
|||||||
Constructor Method
|
Constructor Method
|
||||||
Methods map[string]Method
|
Methods map[string]Method
|
||||||
Events map[string]Event
|
Events map[string]Event
|
||||||
|
|
||||||
|
// Additional "special" functions introduced in solidity v0.6.0.
|
||||||
|
// It's separated from the original default fallback. Each contract
|
||||||
|
// can only define one fallback and receive function.
|
||||||
|
Fallback Method // Note it's also used to represent legacy fallback before v0.6.0
|
||||||
|
Receive Method
|
||||||
}
|
}
|
||||||
|
|
||||||
// JSON returns a parsed ABI interface and error if it failed.
|
// JSON returns a parsed ABI interface and error if it failed.
|
||||||
@@ -42,7 +50,6 @@ func JSON(reader io.Reader) (ABI, error) {
|
|||||||
if err := dec.Decode(&abi); err != nil {
|
if err := dec.Decode(&abi); err != nil {
|
||||||
return ABI{}, err
|
return ABI{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return abi, nil
|
return abi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,7 +77,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Pack up the method ID too if not a constructor and return
|
// Pack up the method ID too if not a constructor and return
|
||||||
return append(method.ID(), arguments...), nil
|
return append(method.ID, arguments...), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpack output in v according to the abi specification
|
// Unpack output in v according to the abi specification
|
||||||
@@ -108,12 +115,22 @@ func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte)
|
|||||||
// UnmarshalJSON implements json.Unmarshaler interface
|
// UnmarshalJSON implements json.Unmarshaler interface
|
||||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||||
var fields []struct {
|
var fields []struct {
|
||||||
Type string
|
Type string
|
||||||
Name string
|
Name string
|
||||||
Constant bool
|
Inputs []Argument
|
||||||
|
Outputs []Argument
|
||||||
|
|
||||||
|
// Status indicator which can be: "pure", "view",
|
||||||
|
// "nonpayable" or "payable".
|
||||||
|
StateMutability string
|
||||||
|
|
||||||
|
// Deprecated Status indicators, but removed in v0.6.0.
|
||||||
|
Constant bool // True if function is either pure or view
|
||||||
|
Payable bool // True if function is payable
|
||||||
|
|
||||||
|
// Event relevant indicator represents the event is
|
||||||
|
// declared as anonymous.
|
||||||
Anonymous bool
|
Anonymous bool
|
||||||
Inputs []Argument
|
|
||||||
Outputs []Argument
|
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(data, &fields); err != nil {
|
if err := json.Unmarshal(data, &fields); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -123,43 +140,67 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
|||||||
for _, field := range fields {
|
for _, field := range fields {
|
||||||
switch field.Type {
|
switch field.Type {
|
||||||
case "constructor":
|
case "constructor":
|
||||||
abi.Constructor = Method{
|
abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
|
||||||
Inputs: field.Inputs,
|
case "function":
|
||||||
|
name := abi.overloadedMethodName(field.Name)
|
||||||
|
abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
|
||||||
|
case "fallback":
|
||||||
|
// New introduced function type in v0.6.0, check more detail
|
||||||
|
// here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
|
||||||
|
if abi.HasFallback() {
|
||||||
|
return errors.New("only single fallback is allowed")
|
||||||
}
|
}
|
||||||
// empty defaults to function according to the abi spec
|
abi.Fallback = NewMethod("", "", Fallback, field.StateMutability, field.Constant, field.Payable, nil, nil)
|
||||||
case "function", "":
|
case "receive":
|
||||||
name := field.Name
|
// New introduced function type in v0.6.0, check more detail
|
||||||
_, ok := abi.Methods[name]
|
// here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
|
||||||
for idx := 0; ok; idx++ {
|
if abi.HasReceive() {
|
||||||
name = fmt.Sprintf("%s%d", field.Name, idx)
|
return errors.New("only single receive is allowed")
|
||||||
_, ok = abi.Methods[name]
|
|
||||||
}
|
}
|
||||||
abi.Methods[name] = Method{
|
if field.StateMutability != "payable" {
|
||||||
Name: name,
|
return errors.New("the statemutability of receive can only be payable")
|
||||||
RawName: field.Name,
|
|
||||||
Const: field.Constant,
|
|
||||||
Inputs: field.Inputs,
|
|
||||||
Outputs: field.Outputs,
|
|
||||||
}
|
}
|
||||||
|
abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
|
||||||
case "event":
|
case "event":
|
||||||
name := field.Name
|
name := abi.overloadedEventName(field.Name)
|
||||||
_, ok := abi.Events[name]
|
abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
|
||||||
for idx := 0; ok; idx++ {
|
default:
|
||||||
name = fmt.Sprintf("%s%d", field.Name, idx)
|
return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
|
||||||
_, ok = abi.Events[name]
|
|
||||||
}
|
|
||||||
abi.Events[name] = Event{
|
|
||||||
Name: name,
|
|
||||||
RawName: field.Name,
|
|
||||||
Anonymous: field.Anonymous,
|
|
||||||
Inputs: field.Inputs,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// overloadedMethodName returns the next available name for a given function.
|
||||||
|
// Needed since solidity allows for function overload.
|
||||||
|
//
|
||||||
|
// e.g. if the abi contains Methods send, send1
|
||||||
|
// overloadedMethodName would return send2 for input send.
|
||||||
|
func (abi *ABI) overloadedMethodName(rawName string) string {
|
||||||
|
name := rawName
|
||||||
|
_, ok := abi.Methods[name]
|
||||||
|
for idx := 0; ok; idx++ {
|
||||||
|
name = fmt.Sprintf("%s%d", rawName, idx)
|
||||||
|
_, ok = abi.Methods[name]
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// overloadedEventName returns the next available name for a given event.
|
||||||
|
// Needed since solidity allows for event overload.
|
||||||
|
//
|
||||||
|
// e.g. if the abi contains events received, received1
|
||||||
|
// overloadedEventName would return received2 for input received.
|
||||||
|
func (abi *ABI) overloadedEventName(rawName string) string {
|
||||||
|
name := rawName
|
||||||
|
_, ok := abi.Events[name]
|
||||||
|
for idx := 0; ok; idx++ {
|
||||||
|
name = fmt.Sprintf("%s%d", rawName, idx)
|
||||||
|
_, ok = abi.Events[name]
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
// MethodById looks up a method by the 4-byte id
|
// MethodById looks up a method by the 4-byte id
|
||||||
// returns nil if none found
|
// returns nil if none found
|
||||||
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||||
@@ -167,7 +208,7 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
|||||||
return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata))
|
return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata))
|
||||||
}
|
}
|
||||||
for _, method := range abi.Methods {
|
for _, method := range abi.Methods {
|
||||||
if bytes.Equal(method.ID(), sigdata[:4]) {
|
if bytes.Equal(method.ID, sigdata[:4]) {
|
||||||
return &method, nil
|
return &method, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -178,9 +219,41 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
|||||||
// ABI and returns nil if none found.
|
// ABI and returns nil if none found.
|
||||||
func (abi *ABI) EventByID(topic common.Hash) (*Event, error) {
|
func (abi *ABI) EventByID(topic common.Hash) (*Event, error) {
|
||||||
for _, event := range abi.Events {
|
for _, event := range abi.Events {
|
||||||
if bytes.Equal(event.ID().Bytes(), topic.Bytes()) {
|
if bytes.Equal(event.ID.Bytes(), topic.Bytes()) {
|
||||||
return &event, nil
|
return &event, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("no event with id: %#x", topic.Hex())
|
return nil, fmt.Errorf("no event with id: %#x", topic.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasFallback returns an indicator whether a fallback function is included.
|
||||||
|
func (abi *ABI) HasFallback() bool {
|
||||||
|
return abi.Fallback.Type == Fallback
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReceive returns an indicator whether a receive function is included.
|
||||||
|
func (abi *ABI) HasReceive() bool {
|
||||||
|
return abi.Receive.Type == Receive
|
||||||
|
}
|
||||||
|
|
||||||
|
// revertSelector is a special function selector for revert reason unpacking.
|
||||||
|
var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
|
||||||
|
|
||||||
|
// UnpackRevert resolves the abi-encoded revert reason. According to the solidity
|
||||||
|
// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert,
|
||||||
|
// the provided revert reason is abi-encoded as if it were a call to a function
|
||||||
|
// `Error(string)`. So it's a special tool for it.
|
||||||
|
func UnpackRevert(data []byte) (string, error) {
|
||||||
|
if len(data) < 4 {
|
||||||
|
return "", errors.New("invalid data for unpacking")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(data[:4], revertSelector) {
|
||||||
|
return "", errors.New("invalid data for unpacking")
|
||||||
|
}
|
||||||
|
var reason string
|
||||||
|
typ, _ := NewType("string", "", nil)
|
||||||
|
if err := (Arguments{{Type: typ}}).Unpack(&reason, data[4:]); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return reason, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ package abi
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
@@ -26,57 +27,105 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
const jsondata = `
|
const jsondata = `
|
||||||
[
|
[
|
||||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
{ "type" : "function", "name" : "", "stateMutability" : "view" },
|
||||||
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
|
{ "type" : "function", "name" : "balance", "stateMutability" : "view" },
|
||||||
|
{ "type" : "function", "name" : "send", "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
||||||
|
{ "type" : "function", "name" : "test", "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
|
||||||
|
{ "type" : "function", "name" : "string", "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
|
||||||
|
{ "type" : "function", "name" : "bool", "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
|
||||||
|
{ "type" : "function", "name" : "address", "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
|
||||||
|
{ "type" : "function", "name" : "uint64[2]", "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
|
||||||
|
{ "type" : "function", "name" : "uint64[]", "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
|
||||||
|
{ "type" : "function", "name" : "int8", "inputs" : [ { "name" : "inputs", "type" : "int8" } ] },
|
||||||
|
{ "type" : "function", "name" : "foo", "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
|
||||||
|
{ "type" : "function", "name" : "bar", "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
|
||||||
|
{ "type" : "function", "name" : "slice", "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
||||||
|
{ "type" : "function", "name" : "slice256", "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
||||||
|
{ "type" : "function", "name" : "sliceAddress", "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
||||||
|
{ "type" : "function", "name" : "sliceMultiAddress", "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
|
||||||
|
{ "type" : "function", "name" : "nestedArray", "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
|
||||||
|
{ "type" : "function", "name" : "nestedArray2", "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
|
||||||
|
{ "type" : "function", "name" : "nestedSlice", "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] },
|
||||||
|
{ "type" : "function", "name" : "receive", "inputs" : [ { "name" : "memo", "type" : "bytes" }], "outputs" : [], "payable" : true, "stateMutability" : "payable" },
|
||||||
|
{ "type" : "function", "name" : "fixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
|
||||||
|
{ "type" : "function", "name" : "fixedArrBytes", "stateMutability" : "view", "inputs" : [ { "name" : "bytes", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
|
||||||
|
{ "type" : "function", "name" : "mixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" } ] },
|
||||||
|
{ "type" : "function", "name" : "doubleFixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type" : "uint256[2]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] },
|
||||||
|
{ "type" : "function", "name" : "multipleMixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type" : "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] },
|
||||||
|
{ "type" : "function", "name" : "overloadedNames", "stateMutability" : "view", "inputs": [ { "components": [ { "internalType": "uint256", "name": "_f", "type": "uint256" }, { "internalType": "uint256", "name": "__f", "type": "uint256"}, { "internalType": "uint256", "name": "f", "type": "uint256"}],"internalType": "struct Overloader.F", "name": "f","type": "tuple"}]}
|
||||||
]`
|
]`
|
||||||
|
|
||||||
const jsondata2 = `
|
var (
|
||||||
[
|
Uint256, _ = NewType("uint256", "", nil)
|
||||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
Uint32, _ = NewType("uint32", "", nil)
|
||||||
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
Uint16, _ = NewType("uint16", "", nil)
|
||||||
{ "type" : "function", "name" : "test", "constant" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
|
String, _ = NewType("string", "", nil)
|
||||||
{ "type" : "function", "name" : "string", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
|
Bool, _ = NewType("bool", "", nil)
|
||||||
{ "type" : "function", "name" : "bool", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
|
Bytes, _ = NewType("bytes", "", nil)
|
||||||
{ "type" : "function", "name" : "address", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
|
Address, _ = NewType("address", "", nil)
|
||||||
{ "type" : "function", "name" : "uint64[2]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
|
Uint64Arr, _ = NewType("uint64[]", "", nil)
|
||||||
{ "type" : "function", "name" : "uint64[]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
|
AddressArr, _ = NewType("address[]", "", nil)
|
||||||
{ "type" : "function", "name" : "foo", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
|
Int8, _ = NewType("int8", "", nil)
|
||||||
{ "type" : "function", "name" : "bar", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
|
// Special types for testing
|
||||||
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
Uint32Arr2, _ = NewType("uint32[2]", "", nil)
|
||||||
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
Uint64Arr2, _ = NewType("uint64[2]", "", nil)
|
||||||
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
Uint256Arr, _ = NewType("uint256[]", "", nil)
|
||||||
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
|
Uint256Arr2, _ = NewType("uint256[2]", "", nil)
|
||||||
{ "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
|
Uint256Arr3, _ = NewType("uint256[3]", "", nil)
|
||||||
{ "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
|
Uint256ArrNested, _ = NewType("uint256[2][2]", "", nil)
|
||||||
{ "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
|
Uint8ArrNested, _ = NewType("uint8[][2]", "", nil)
|
||||||
]`
|
Uint8SliceNested, _ = NewType("uint8[][]", "", nil)
|
||||||
|
TupleF, _ = NewType("tuple", "struct Overloader.F", []ArgumentMarshaling{
|
||||||
|
{Name: "_f", Type: "uint256"},
|
||||||
|
{Name: "__f", Type: "uint256"},
|
||||||
|
{Name: "f", Type: "uint256"}})
|
||||||
|
)
|
||||||
|
|
||||||
|
var methods = map[string]Method{
|
||||||
|
"": NewMethod("", "", Function, "view", false, false, nil, nil),
|
||||||
|
"balance": NewMethod("balance", "balance", Function, "view", false, false, nil, nil),
|
||||||
|
"send": NewMethod("send", "send", Function, "", false, false, []Argument{{"amount", Uint256, false}}, nil),
|
||||||
|
"test": NewMethod("test", "test", Function, "", false, false, []Argument{{"number", Uint32, false}}, nil),
|
||||||
|
"string": NewMethod("string", "string", Function, "", false, false, []Argument{{"inputs", String, false}}, nil),
|
||||||
|
"bool": NewMethod("bool", "bool", Function, "", false, false, []Argument{{"inputs", Bool, false}}, nil),
|
||||||
|
"address": NewMethod("address", "address", Function, "", false, false, []Argument{{"inputs", Address, false}}, nil),
|
||||||
|
"uint64[]": NewMethod("uint64[]", "uint64[]", Function, "", false, false, []Argument{{"inputs", Uint64Arr, false}}, nil),
|
||||||
|
"uint64[2]": NewMethod("uint64[2]", "uint64[2]", Function, "", false, false, []Argument{{"inputs", Uint64Arr2, false}}, nil),
|
||||||
|
"int8": NewMethod("int8", "int8", Function, "", false, false, []Argument{{"inputs", Int8, false}}, nil),
|
||||||
|
"foo": NewMethod("foo", "foo", Function, "", false, false, []Argument{{"inputs", Uint32, false}}, nil),
|
||||||
|
"bar": NewMethod("bar", "bar", Function, "", false, false, []Argument{{"inputs", Uint32, false}, {"string", Uint16, false}}, nil),
|
||||||
|
"slice": NewMethod("slice", "slice", Function, "", false, false, []Argument{{"inputs", Uint32Arr2, false}}, nil),
|
||||||
|
"slice256": NewMethod("slice256", "slice256", Function, "", false, false, []Argument{{"inputs", Uint256Arr2, false}}, nil),
|
||||||
|
"sliceAddress": NewMethod("sliceAddress", "sliceAddress", Function, "", false, false, []Argument{{"inputs", AddressArr, false}}, nil),
|
||||||
|
"sliceMultiAddress": NewMethod("sliceMultiAddress", "sliceMultiAddress", Function, "", false, false, []Argument{{"a", AddressArr, false}, {"b", AddressArr, false}}, nil),
|
||||||
|
"nestedArray": NewMethod("nestedArray", "nestedArray", Function, "", false, false, []Argument{{"a", Uint256ArrNested, false}, {"b", AddressArr, false}}, nil),
|
||||||
|
"nestedArray2": NewMethod("nestedArray2", "nestedArray2", Function, "", false, false, []Argument{{"a", Uint8ArrNested, false}}, nil),
|
||||||
|
"nestedSlice": NewMethod("nestedSlice", "nestedSlice", Function, "", false, false, []Argument{{"a", Uint8SliceNested, false}}, nil),
|
||||||
|
"receive": NewMethod("receive", "receive", Function, "payable", false, true, []Argument{{"memo", Bytes, false}}, []Argument{}),
|
||||||
|
"fixedArrStr": NewMethod("fixedArrStr", "fixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr", Uint256Arr2, false}}, nil),
|
||||||
|
"fixedArrBytes": NewMethod("fixedArrBytes", "fixedArrBytes", Function, "view", false, false, []Argument{{"bytes", Bytes, false}, {"fixedArr", Uint256Arr2, false}}, nil),
|
||||||
|
"mixedArrStr": NewMethod("mixedArrStr", "mixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr", Uint256Arr2, false}, {"dynArr", Uint256Arr, false}}, nil),
|
||||||
|
"doubleFixedArrStr": NewMethod("doubleFixedArrStr", "doubleFixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr1", Uint256Arr2, false}, {"fixedArr2", Uint256Arr3, false}}, nil),
|
||||||
|
"multipleMixedArrStr": NewMethod("multipleMixedArrStr", "multipleMixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr1", Uint256Arr2, false}, {"dynArr", Uint256Arr, false}, {"fixedArr2", Uint256Arr3, false}}, nil),
|
||||||
|
"overloadedNames": NewMethod("overloadedNames", "overloadedNames", Function, "view", false, false, []Argument{{"f", TupleF, false}}, nil),
|
||||||
|
}
|
||||||
|
|
||||||
func TestReader(t *testing.T) {
|
func TestReader(t *testing.T) {
|
||||||
Uint256, _ := NewType("uint256", "", nil)
|
abi := ABI{
|
||||||
exp := ABI{
|
Methods: methods,
|
||||||
Methods: map[string]Method{
|
|
||||||
"balance": {
|
|
||||||
"balance", "balance", true, nil, nil,
|
|
||||||
},
|
|
||||||
"send": {
|
|
||||||
"send", "send", false, []Argument{
|
|
||||||
{"amount", Uint256, false},
|
|
||||||
}, nil,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
exp, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// deep equal fails for some reason
|
|
||||||
for name, expM := range exp.Methods {
|
for name, expM := range exp.Methods {
|
||||||
gotM, exist := abi.Methods[name]
|
gotM, exist := abi.Methods[name]
|
||||||
if !exist {
|
if !exist {
|
||||||
@@ -98,8 +147,58 @@ func TestReader(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInvalidABI(t *testing.T) {
|
||||||
|
json := `[{ "type" : "function", "name" : "", "constant" : fals }]`
|
||||||
|
_, err := JSON(strings.NewReader(json))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("invalid json should produce error")
|
||||||
|
}
|
||||||
|
json2 := `[{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "typ" : "uint256" } ] }]`
|
||||||
|
_, err = JSON(strings.NewReader(json2))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("invalid json should produce error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestConstructor tests a constructor function.
|
||||||
|
// The test is based on the following contract:
|
||||||
|
// contract TestConstructor {
|
||||||
|
// constructor(uint256 a, uint256 b) public{}
|
||||||
|
// }
|
||||||
|
func TestConstructor(t *testing.T) {
|
||||||
|
json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
|
||||||
|
method := NewMethod("", "", Constructor, "nonpayable", false, false, []Argument{{"a", Uint256, false}, {"b", Uint256, false}}, nil)
|
||||||
|
// Test from JSON
|
||||||
|
abi, err := JSON(strings.NewReader(json))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(abi.Constructor, method) {
|
||||||
|
t.Error("Missing expected constructor")
|
||||||
|
}
|
||||||
|
// Test pack/unpack
|
||||||
|
packed, err := abi.Pack("", big.NewInt(1), big.NewInt(2))
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
v := struct {
|
||||||
|
A *big.Int
|
||||||
|
B *big.Int
|
||||||
|
}{new(big.Int), new(big.Int)}
|
||||||
|
//abi.Unpack(&v, "", packed)
|
||||||
|
if err := abi.Constructor.Inputs.Unpack(&v, packed); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(v.A, big.NewInt(1)) {
|
||||||
|
t.Error("Unable to pack/unpack from constructor")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(v.B, big.NewInt(2)) {
|
||||||
|
t.Error("Unable to pack/unpack from constructor")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestTestNumbers(t *testing.T) {
|
func TestTestNumbers(t *testing.T) {
|
||||||
abi, err := JSON(strings.NewReader(jsondata2))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -135,60 +234,22 @@ func TestTestNumbers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTestString(t *testing.T) {
|
|
||||||
abi, err := JSON(strings.NewReader(jsondata2))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := abi.Pack("string", "hello world"); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTestBool(t *testing.T) {
|
|
||||||
abi, err := JSON(strings.NewReader(jsondata2))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := abi.Pack("bool", true); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTestSlice(t *testing.T) {
|
|
||||||
abi, err := JSON(strings.NewReader(jsondata2))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
slice := make([]uint64, 2)
|
|
||||||
if _, err := abi.Pack("uint64[2]", slice); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if _, err := abi.Pack("uint64[]", slice); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMethodSignature(t *testing.T) {
|
func TestMethodSignature(t *testing.T) {
|
||||||
String, _ := NewType("string", "", nil)
|
m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil)
|
||||||
m := Method{"foo", "foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
|
|
||||||
exp := "foo(string,string)"
|
exp := "foo(string,string)"
|
||||||
if m.Sig() != exp {
|
if m.Sig != exp {
|
||||||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
t.Error("signature mismatch", exp, "!=", m.Sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
idexp := crypto.Keccak256([]byte(exp))[:4]
|
idexp := crypto.Keccak256([]byte(exp))[:4]
|
||||||
if !bytes.Equal(m.ID(), idexp) {
|
if !bytes.Equal(m.ID, idexp) {
|
||||||
t.Errorf("expected ids to match %x != %x", m.ID(), idexp)
|
t.Errorf("expected ids to match %x != %x", m.ID, idexp)
|
||||||
}
|
}
|
||||||
|
|
||||||
uintt, _ := NewType("uint256", "", nil)
|
m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", Uint256, false}}, nil)
|
||||||
m = Method{"foo", "foo", false, []Argument{{"bar", uintt, false}}, nil}
|
|
||||||
exp = "foo(uint256)"
|
exp = "foo(uint256)"
|
||||||
if m.Sig() != exp {
|
if m.Sig != exp {
|
||||||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
t.Error("signature mismatch", exp, "!=", m.Sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Method with tuple arguments
|
// Method with tuple arguments
|
||||||
@@ -204,10 +265,10 @@ func TestMethodSignature(t *testing.T) {
|
|||||||
{Name: "y", Type: "int256"},
|
{Name: "y", Type: "int256"},
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
m = Method{"foo", "foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
|
m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"s", s, false}, {"bar", String, false}}, nil)
|
||||||
exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
|
exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
|
||||||
if m.Sig() != exp {
|
if m.Sig != exp {
|
||||||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
t.Error("signature mismatch", exp, "!=", m.Sig)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,12 +280,12 @@ func TestOverloadedMethodSignature(t *testing.T) {
|
|||||||
}
|
}
|
||||||
check := func(name string, expect string, method bool) {
|
check := func(name string, expect string, method bool) {
|
||||||
if method {
|
if method {
|
||||||
if abi.Methods[name].Sig() != expect {
|
if abi.Methods[name].Sig != expect {
|
||||||
t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig())
|
t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if abi.Events[name].Sig() != expect {
|
if abi.Events[name].Sig != expect {
|
||||||
t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig())
|
t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -235,7 +296,7 @@ func TestOverloadedMethodSignature(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiPack(t *testing.T) {
|
func TestMultiPack(t *testing.T) {
|
||||||
abi, err := JSON(strings.NewReader(jsondata2))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -400,15 +461,7 @@ func TestInputVariableInputLength(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
||||||
const definition = `[
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
{ "type" : "function", "name" : "fixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
|
|
||||||
{ "type" : "function", "name" : "fixedArrBytes", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
|
|
||||||
{ "type" : "function", "name" : "mixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type": "uint256[2]" }, { "name" : "dynArr", "type": "uint256[]" } ] },
|
|
||||||
{ "type" : "function", "name" : "doubleFixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "fixedArr2", "type": "uint256[3]" } ] },
|
|
||||||
{ "type" : "function", "name" : "multipleMixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }
|
|
||||||
]`
|
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@@ -555,7 +608,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
|||||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||||
fixedarrin1value1 = common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
|
fixedarrin1value1 = common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
|
||||||
fixedarrin1value2 = common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
|
fixedarrin1value2 = common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
|
||||||
dynarroffset = U256(big.NewInt(int64(256 + ((len(strin)/32)+1)*32)))
|
dynarroffset = math.U256Bytes(big.NewInt(int64(256 + ((len(strin)/32)+1)*32)))
|
||||||
dynarrlength = make([]byte, 32)
|
dynarrlength = make([]byte, 32)
|
||||||
dynarrlength[31] = byte(len(dynarrin))
|
dynarrlength[31] = byte(len(dynarrin))
|
||||||
dynarrinvalue1 = common.LeftPadBytes(dynarrin[0].Bytes(), 32)
|
dynarrinvalue1 = common.LeftPadBytes(dynarrin[0].Bytes(), 32)
|
||||||
@@ -582,7 +635,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDefaultFunctionParsing(t *testing.T) {
|
func TestDefaultFunctionParsing(t *testing.T) {
|
||||||
const definition = `[{ "name" : "balance" }]`
|
const definition = `[{ "name" : "balance", "type" : "function" }]`
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -602,8 +655,6 @@ func TestBareEvents(t *testing.T) {
|
|||||||
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
||||||
]`
|
]`
|
||||||
|
|
||||||
arg0, _ := NewType("uint256", "", nil)
|
|
||||||
arg1, _ := NewType("address", "", nil)
|
|
||||||
tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||||
|
|
||||||
expectedEvents := map[string]struct {
|
expectedEvents := map[string]struct {
|
||||||
@@ -613,12 +664,12 @@ func TestBareEvents(t *testing.T) {
|
|||||||
"balance": {false, nil},
|
"balance": {false, nil},
|
||||||
"anon": {true, nil},
|
"anon": {true, nil},
|
||||||
"args": {false, []Argument{
|
"args": {false, []Argument{
|
||||||
{Name: "arg0", Type: arg0, Indexed: false},
|
{Name: "arg0", Type: Uint256, Indexed: false},
|
||||||
{Name: "arg1", Type: arg1, Indexed: true},
|
{Name: "arg1", Type: Address, Indexed: true},
|
||||||
}},
|
}},
|
||||||
"tuple": {false, []Argument{
|
"tuple": {false, []Argument{
|
||||||
{Name: "t", Type: tuple, Indexed: false},
|
{Name: "t", Type: tuple, Indexed: false},
|
||||||
{Name: "arg1", Type: arg1, Indexed: true},
|
{Name: "arg1", Type: Address, Indexed: true},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -891,45 +942,25 @@ func TestUnpackIntoMapNamingConflict(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestABI_MethodById(t *testing.T) {
|
func TestABI_MethodById(t *testing.T) {
|
||||||
const abiJSON = `[
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
{"type":"function","name":"receive","constant":false,"inputs":[{"name":"memo","type":"bytes"}],"outputs":[],"payable":true,"stateMutability":"payable"},
|
|
||||||
{"type":"event","name":"received","anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}]},
|
|
||||||
{"type":"function","name":"fixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"}]},
|
|
||||||
{"type":"function","name":"fixedArrBytes","constant":true,"inputs":[{"name":"str","type":"bytes"},{"name":"fixedArr","type":"uint256[2]"}]},
|
|
||||||
{"type":"function","name":"mixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"}]},
|
|
||||||
{"type":"function","name":"doubleFixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"fixedArr2","type":"uint256[3]"}]},
|
|
||||||
{"type":"function","name":"multipleMixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"},{"name":"fixedArr2","type":"uint256[3]"}]},
|
|
||||||
{"type":"function","name":"balance","constant":true},
|
|
||||||
{"type":"function","name":"send","constant":false,"inputs":[{"name":"amount","type":"uint256"}]},
|
|
||||||
{"type":"function","name":"test","constant":false,"inputs":[{"name":"number","type":"uint32"}]},
|
|
||||||
{"type":"function","name":"string","constant":false,"inputs":[{"name":"inputs","type":"string"}]},
|
|
||||||
{"type":"function","name":"bool","constant":false,"inputs":[{"name":"inputs","type":"bool"}]},
|
|
||||||
{"type":"function","name":"address","constant":false,"inputs":[{"name":"inputs","type":"address"}]},
|
|
||||||
{"type":"function","name":"uint64[2]","constant":false,"inputs":[{"name":"inputs","type":"uint64[2]"}]},
|
|
||||||
{"type":"function","name":"uint64[]","constant":false,"inputs":[{"name":"inputs","type":"uint64[]"}]},
|
|
||||||
{"type":"function","name":"foo","constant":false,"inputs":[{"name":"inputs","type":"uint32"}]},
|
|
||||||
{"type":"function","name":"bar","constant":false,"inputs":[{"name":"inputs","type":"uint32"},{"name":"string","type":"uint16"}]},
|
|
||||||
{"type":"function","name":"_slice","constant":false,"inputs":[{"name":"inputs","type":"uint32[2]"}]},
|
|
||||||
{"type":"function","name":"__slice256","constant":false,"inputs":[{"name":"inputs","type":"uint256[2]"}]},
|
|
||||||
{"type":"function","name":"sliceAddress","constant":false,"inputs":[{"name":"inputs","type":"address[]"}]},
|
|
||||||
{"type":"function","name":"sliceMultiAddress","constant":false,"inputs":[{"name":"a","type":"address[]"},{"name":"b","type":"address[]"}]}
|
|
||||||
]
|
|
||||||
`
|
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
for name, m := range abi.Methods {
|
for name, m := range abi.Methods {
|
||||||
a := fmt.Sprintf("%v", m)
|
a := fmt.Sprintf("%v", m)
|
||||||
m2, err := abi.MethodById(m.ID())
|
m2, err := abi.MethodById(m.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to look up ABI method: %v", err)
|
t.Fatalf("Failed to look up ABI method: %v", err)
|
||||||
}
|
}
|
||||||
b := fmt.Sprintf("%v", m2)
|
b := fmt.Sprintf("%v", m2)
|
||||||
if a != b {
|
if a != b {
|
||||||
t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID())
|
t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// test unsuccessful lookups
|
||||||
|
if _, err = abi.MethodById(crypto.Keccak256()); err == nil {
|
||||||
|
t.Error("Expected error: no method with this id")
|
||||||
|
}
|
||||||
// Also test empty
|
// Also test empty
|
||||||
if _, err := abi.MethodById([]byte{0x00}); err == nil {
|
if _, err := abi.MethodById([]byte{0x00}); err == nil {
|
||||||
t.Errorf("Expected error, too short to decode data")
|
t.Errorf("Expected error, too short to decode data")
|
||||||
@@ -995,8 +1026,8 @@ func TestABI_EventById(t *testing.T) {
|
|||||||
t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum)
|
t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
if event.ID() != topicID {
|
if event.ID != topicID {
|
||||||
t.Errorf("Event id %s does not match topic %s, test #%d", event.ID().Hex(), topicID.Hex(), testnum)
|
t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
unknowntopicID := crypto.Keccak256Hash([]byte("unknownEvent"))
|
unknowntopicID := crypto.Keccak256Hash([]byte("unknownEvent"))
|
||||||
@@ -1010,26 +1041,6 @@ func TestABI_EventById(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicateMethodNames(t *testing.T) {
|
|
||||||
abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
|
|
||||||
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, ok := contractAbi.Methods["transfer"]; !ok {
|
|
||||||
t.Fatalf("Could not find original method")
|
|
||||||
}
|
|
||||||
if _, ok := contractAbi.Methods["transfer0"]; !ok {
|
|
||||||
t.Fatalf("Could not find duplicate method")
|
|
||||||
}
|
|
||||||
if _, ok := contractAbi.Methods["transfer1"]; !ok {
|
|
||||||
t.Fatalf("Could not find duplicate method")
|
|
||||||
}
|
|
||||||
if _, ok := contractAbi.Methods["transfer2"]; ok {
|
|
||||||
t.Fatalf("Should not have found extra method")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name
|
// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name
|
||||||
// conflict and that the second transfer method will be renamed transfer1.
|
// conflict and that the second transfer method will be renamed transfer1.
|
||||||
func TestDoubleDuplicateMethodNames(t *testing.T) {
|
func TestDoubleDuplicateMethodNames(t *testing.T) {
|
||||||
@@ -1051,3 +1062,87 @@ func TestDoubleDuplicateMethodNames(t *testing.T) {
|
|||||||
t.Fatalf("Should not have found extra method")
|
t.Fatalf("Should not have found extra method")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name
|
||||||
|
// conflict and that the second send event will be renamed send1.
|
||||||
|
// The test runs the abi of the following contract.
|
||||||
|
// contract DuplicateEvent {
|
||||||
|
// event send(uint256 a);
|
||||||
|
// event send0();
|
||||||
|
// event send();
|
||||||
|
// }
|
||||||
|
func TestDoubleDuplicateEventNames(t *testing.T) {
|
||||||
|
abiJSON := `[{"anonymous": false,"inputs": [{"indexed": false,"internalType": "uint256","name": "a","type": "uint256"}],"name": "send","type": "event"},{"anonymous": false,"inputs": [],"name": "send0","type": "event"},{ "anonymous": false, "inputs": [],"name": "send","type": "event"}]`
|
||||||
|
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, ok := contractAbi.Events["send"]; !ok {
|
||||||
|
t.Fatalf("Could not find original event")
|
||||||
|
}
|
||||||
|
if _, ok := contractAbi.Events["send0"]; !ok {
|
||||||
|
t.Fatalf("Could not find duplicate event")
|
||||||
|
}
|
||||||
|
if _, ok := contractAbi.Events["send1"]; !ok {
|
||||||
|
t.Fatalf("Could not find duplicate event")
|
||||||
|
}
|
||||||
|
if _, ok := contractAbi.Events["send2"]; ok {
|
||||||
|
t.Fatalf("Should not have found extra event")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestUnnamedEventParam checks that an event with unnamed parameters is
|
||||||
|
// correctly handled
|
||||||
|
// The test runs the abi of the following contract.
|
||||||
|
// contract TestEvent {
|
||||||
|
// event send(uint256, uint256);
|
||||||
|
// }
|
||||||
|
func TestUnnamedEventParam(t *testing.T) {
|
||||||
|
abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]`
|
||||||
|
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
event, ok := contractAbi.Events["send"]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("Could not find event")
|
||||||
|
}
|
||||||
|
if event.Inputs[0].Name != "arg0" {
|
||||||
|
t.Fatalf("Could not find input")
|
||||||
|
}
|
||||||
|
if event.Inputs[1].Name != "arg1" {
|
||||||
|
t.Fatalf("Could not find input")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnpackRevert(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var cases = []struct {
|
||||||
|
input string
|
||||||
|
expect string
|
||||||
|
expectErr error
|
||||||
|
}{
|
||||||
|
{"", "", errors.New("invalid data for unpacking")},
|
||||||
|
{"08c379a1", "", errors.New("invalid data for unpacking")},
|
||||||
|
{"08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000", "revert reason", nil},
|
||||||
|
}
|
||||||
|
for index, c := range cases {
|
||||||
|
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
|
||||||
|
got, err := UnpackRevert(common.Hex2Bytes(c.input))
|
||||||
|
if c.expectErr != nil {
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected non-nil error")
|
||||||
|
}
|
||||||
|
if err.Error() != c.expectErr.Error() {
|
||||||
|
t.Fatalf("Expected error mismatch, want %v, got %v", c.expectErr, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.expect != got {
|
||||||
|
t.Fatalf("Output mismatch, want %v, got %v", c.expect, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -59,18 +59,6 @@ func (argument *Argument) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LengthNonIndexed returns the number of arguments when not counting 'indexed' ones. Only events
|
|
||||||
// can ever have 'indexed' arguments, it should always be false on arguments for method input/output
|
|
||||||
func (arguments Arguments) LengthNonIndexed() int {
|
|
||||||
out := 0
|
|
||||||
for _, arg := range arguments {
|
|
||||||
if !arg.Indexed {
|
|
||||||
out++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// NonIndexed returns the arguments with indexed arguments filtered out
|
// NonIndexed returns the arguments with indexed arguments filtered out
|
||||||
func (arguments Arguments) NonIndexed() Arguments {
|
func (arguments Arguments) NonIndexed() Arguments {
|
||||||
var ret []Argument
|
var ret []Argument
|
||||||
@@ -92,9 +80,8 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
|||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
if len(arguments) != 0 {
|
if len(arguments) != 0 {
|
||||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||||
} else {
|
|
||||||
return nil // Nothing to unmarshal, return
|
|
||||||
}
|
}
|
||||||
|
return nil // Nothing to unmarshal, return
|
||||||
}
|
}
|
||||||
// make sure the passed value is arguments pointer
|
// make sure the passed value is arguments pointer
|
||||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||||
@@ -104,6 +91,9 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if len(marshalledValues) == 0 {
|
||||||
|
return fmt.Errorf("abi: Unpack(no-values unmarshalled %T)", v)
|
||||||
|
}
|
||||||
if arguments.isTuple() {
|
if arguments.isTuple() {
|
||||||
return arguments.unpackTuple(v, marshalledValues)
|
return arguments.unpackTuple(v, marshalledValues)
|
||||||
}
|
}
|
||||||
@@ -112,96 +102,20 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
|||||||
|
|
||||||
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value
|
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value
|
||||||
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
||||||
|
// Make sure map is not nil
|
||||||
|
if v == nil {
|
||||||
|
return fmt.Errorf("abi: cannot unpack into a nil map")
|
||||||
|
}
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
if len(arguments) != 0 {
|
if len(arguments) != 0 {
|
||||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||||
} else {
|
|
||||||
return nil // Nothing to unmarshal, return
|
|
||||||
}
|
}
|
||||||
|
return nil // Nothing to unmarshal, return
|
||||||
}
|
}
|
||||||
marshalledValues, err := arguments.UnpackValues(data)
|
marshalledValues, err := arguments.UnpackValues(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return arguments.unpackIntoMap(v, marshalledValues)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpack sets the unmarshalled value to go format.
|
|
||||||
// Note the dst here must be settable.
|
|
||||||
func unpack(t *Type, dst interface{}, src interface{}) error {
|
|
||||||
var (
|
|
||||||
dstVal = reflect.ValueOf(dst).Elem()
|
|
||||||
srcVal = reflect.ValueOf(src)
|
|
||||||
)
|
|
||||||
tuple, typ := false, t
|
|
||||||
for {
|
|
||||||
if typ.T == SliceTy || typ.T == ArrayTy {
|
|
||||||
typ = typ.Elem
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tuple = typ.T == TupleTy
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if !tuple {
|
|
||||||
return set(dstVal, srcVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dereferences interface or pointer wrapper
|
|
||||||
dstVal = indirectInterfaceOrPtr(dstVal)
|
|
||||||
|
|
||||||
switch t.T {
|
|
||||||
case TupleTy:
|
|
||||||
if dstVal.Kind() != reflect.Struct {
|
|
||||||
return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind())
|
|
||||||
}
|
|
||||||
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for i, elem := range t.TupleElems {
|
|
||||||
fname := fieldmap[t.TupleRawNames[i]]
|
|
||||||
field := dstVal.FieldByName(fname)
|
|
||||||
if !field.IsValid() {
|
|
||||||
return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i])
|
|
||||||
}
|
|
||||||
if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case SliceTy:
|
|
||||||
if dstVal.Kind() != reflect.Slice {
|
|
||||||
return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind())
|
|
||||||
}
|
|
||||||
slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len())
|
|
||||||
for i := 0; i < slice.Len(); i++ {
|
|
||||||
if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dstVal.Set(slice)
|
|
||||||
case ArrayTy:
|
|
||||||
if dstVal.Kind() != reflect.Array {
|
|
||||||
return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind())
|
|
||||||
}
|
|
||||||
array := reflect.New(dstVal.Type()).Elem()
|
|
||||||
for i := 0; i < array.Len(); i++ {
|
|
||||||
if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dstVal.Set(array)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpackIntoMap unpacks marshalledValues into the provided map[string]interface{}
|
|
||||||
func (arguments Arguments) unpackIntoMap(v map[string]interface{}, marshalledValues []interface{}) error {
|
|
||||||
// Make sure map is not nil
|
|
||||||
if v == nil {
|
|
||||||
return fmt.Errorf("abi: cannot unpack into a nil map")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, arg := range arguments.NonIndexed() {
|
for i, arg := range arguments.NonIndexed() {
|
||||||
v[arg.Name] = marshalledValues[i]
|
v[arg.Name] = marshalledValues[i]
|
||||||
}
|
}
|
||||||
@@ -210,88 +124,63 @@ func (arguments Arguments) unpackIntoMap(v map[string]interface{}, marshalledVal
|
|||||||
|
|
||||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||||
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
|
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
|
||||||
if arguments.LengthNonIndexed() == 0 {
|
dst := reflect.ValueOf(v).Elem()
|
||||||
return nil
|
src := reflect.ValueOf(marshalledValues)
|
||||||
}
|
|
||||||
argument := arguments.NonIndexed()[0]
|
|
||||||
elem := reflect.ValueOf(v).Elem()
|
|
||||||
|
|
||||||
if elem.Kind() == reflect.Struct && argument.Type.T != TupleTy {
|
if dst.Kind() == reflect.Struct && src.Kind() != reflect.Struct {
|
||||||
fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem)
|
return set(dst.Field(0), src)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
field := elem.FieldByName(fieldmap[argument.Name])
|
|
||||||
if !field.IsValid() {
|
|
||||||
return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name)
|
|
||||||
}
|
|
||||||
return unpack(&argument.Type, field.Addr().Interface(), marshalledValues)
|
|
||||||
}
|
}
|
||||||
return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues)
|
return set(dst, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
|
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
|
||||||
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
||||||
var (
|
value := reflect.ValueOf(v).Elem()
|
||||||
value = reflect.ValueOf(v).Elem()
|
nonIndexedArgs := arguments.NonIndexed()
|
||||||
typ = value.Type()
|
|
||||||
kind = value.Kind()
|
|
||||||
)
|
|
||||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the interface is a struct, get of abi->struct_field mapping
|
switch value.Kind() {
|
||||||
var abi2struct map[string]string
|
case reflect.Struct:
|
||||||
if kind == reflect.Struct {
|
argNames := make([]string, len(nonIndexedArgs))
|
||||||
var (
|
for i, arg := range nonIndexedArgs {
|
||||||
argNames []string
|
argNames[i] = arg.Name
|
||||||
err error
|
|
||||||
)
|
|
||||||
for _, arg := range arguments.NonIndexed() {
|
|
||||||
argNames = append(argNames, arg.Name)
|
|
||||||
}
|
}
|
||||||
abi2struct, err = mapArgNamesToStructFields(argNames, value)
|
var err error
|
||||||
|
abi2struct, err := mapArgNamesToStructFields(argNames, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
for i, arg := range nonIndexedArgs {
|
||||||
for i, arg := range arguments.NonIndexed() {
|
|
||||||
switch kind {
|
|
||||||
case reflect.Struct:
|
|
||||||
field := value.FieldByName(abi2struct[arg.Name])
|
field := value.FieldByName(abi2struct[arg.Name])
|
||||||
if !field.IsValid() {
|
if !field.IsValid() {
|
||||||
return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
|
return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
|
||||||
}
|
}
|
||||||
if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil {
|
if err := set(field, reflect.ValueOf(marshalledValues[i])); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case reflect.Slice, reflect.Array:
|
|
||||||
if value.Len() < i {
|
|
||||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
|
|
||||||
}
|
|
||||||
v := value.Index(i)
|
|
||||||
if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", typ)
|
|
||||||
}
|
}
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
if value.Len() < len(marshalledValues) {
|
||||||
|
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
|
||||||
|
}
|
||||||
|
for i := range nonIndexedArgs {
|
||||||
|
if err := set(value.Index(i), reflect.ValueOf(marshalledValues[i])); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", value.Type())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
|
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
|
||||||
// without supplying a struct to unpack into. Instead, this method returns a list containing the
|
// without supplying a struct to unpack into. Instead, this method returns a list containing the
|
||||||
// values. An atomic argument will be a list with one element.
|
// values. An atomic argument will be a list with one element.
|
||||||
func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
||||||
retval := make([]interface{}, 0, arguments.LengthNonIndexed())
|
nonIndexedArgs := arguments.NonIndexed()
|
||||||
|
retval := make([]interface{}, 0, len(nonIndexedArgs))
|
||||||
virtualArgs := 0
|
virtualArgs := 0
|
||||||
for index, arg := range arguments.NonIndexed() {
|
for index, arg := range nonIndexedArgs {
|
||||||
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
||||||
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
|
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
|
||||||
// If we have a static array, like [3]uint256, these are coded as
|
// If we have a static array, like [3]uint256, these are coded as
|
||||||
@@ -329,7 +218,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
|||||||
// Make sure arguments match up and pack them
|
// Make sure arguments match up and pack them
|
||||||
abiArgs := arguments
|
abiArgs := arguments
|
||||||
if len(args) != len(abiArgs) {
|
if len(args) != len(abiArgs) {
|
||||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
|
return nil, fmt.Errorf("argument count mismatch: got %d for %d", len(args), len(abiArgs))
|
||||||
}
|
}
|
||||||
// variable input is the output appended at the end of packed
|
// variable input is the output appended at the end of packed
|
||||||
// output. This is used for strings and bytes types input.
|
// output. This is used for strings and bytes types input.
|
||||||
|
|||||||
@@ -25,8 +25,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
@@ -38,6 +40,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/filters"
|
"github.com/ethereum/go-ethereum/eth/filters"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
)
|
)
|
||||||
@@ -46,19 +49,23 @@ import (
|
|||||||
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
|
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
|
||||||
errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
|
errBlockDoesNotExist = errors.New("block does not exist in blockchain")
|
||||||
|
errTransactionDoesNotExist = errors.New("transaction does not exist")
|
||||||
)
|
)
|
||||||
|
|
||||||
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
|
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
|
||||||
// the background. Its main purpose is to allow easily testing contract bindings.
|
// the background. Its main purpose is to allow easily testing contract bindings.
|
||||||
|
// Simulated backend implements the following interfaces:
|
||||||
|
// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor,
|
||||||
|
// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender
|
||||||
type SimulatedBackend struct {
|
type SimulatedBackend struct {
|
||||||
database ethdb.Database // In memory database to store our testing data
|
database ethdb.Database // In memory database to store our testing data
|
||||||
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
|
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
pendingBlock *types.Block // Currently pending block that will be imported on request
|
pendingBlock *types.Block // Currently pending block that will be imported on request
|
||||||
pendingState *state.StateDB // Currently pending state that will be the active on on request
|
pendingState *state.StateDB // Currently pending state that will be the active on request
|
||||||
|
|
||||||
events *filters.EventSystem // Event system for filtering log events live
|
events *filters.EventSystem // Event system for filtering log events live
|
||||||
|
|
||||||
@@ -70,13 +77,13 @@ type SimulatedBackend struct {
|
|||||||
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
||||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
|
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
|
||||||
genesis.MustCommit(database)
|
genesis.MustCommit(database)
|
||||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil)
|
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
|
|
||||||
backend := &SimulatedBackend{
|
backend := &SimulatedBackend{
|
||||||
database: database,
|
database: database,
|
||||||
blockchain: blockchain,
|
blockchain: blockchain,
|
||||||
config: genesis.Config,
|
config: genesis.Config,
|
||||||
events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
|
events: filters.NewEventSystem(&filterBackend{database, blockchain}, false),
|
||||||
}
|
}
|
||||||
backend.rollback()
|
backend.rollback()
|
||||||
return backend
|
return backend
|
||||||
@@ -119,7 +126,19 @@ func (b *SimulatedBackend) rollback() {
|
|||||||
statedb, _ := b.blockchain.State()
|
statedb, _ := b.blockchain.State()
|
||||||
|
|
||||||
b.pendingBlock = blocks[0]
|
b.pendingBlock = blocks[0]
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateByBlockNumber retrieves a state by a given blocknumber.
|
||||||
|
func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
|
||||||
|
if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 {
|
||||||
|
return b.blockchain.State()
|
||||||
|
}
|
||||||
|
block, err := b.blockByNumberNoLock(ctx, blockNumber)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b.blockchain.StateAt(block.Root())
|
||||||
}
|
}
|
||||||
|
|
||||||
// CodeAt returns the code associated with a certain account in the blockchain.
|
// CodeAt returns the code associated with a certain account in the blockchain.
|
||||||
@@ -127,10 +146,11 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address,
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||||
return nil, errBlockNumberUnsupported
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
statedb, _ := b.blockchain.State()
|
|
||||||
return statedb.GetCode(contract), nil
|
return statedb.GetCode(contract), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,10 +159,11 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||||
return nil, errBlockNumberUnsupported
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
statedb, _ := b.blockchain.State()
|
|
||||||
return statedb.GetBalance(contract), nil
|
return statedb.GetBalance(contract), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,10 +172,11 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||||
return 0, errBlockNumberUnsupported
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
}
|
}
|
||||||
statedb, _ := b.blockchain.State()
|
|
||||||
return statedb.GetNonce(contract), nil
|
return statedb.GetNonce(contract), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -163,16 +185,20 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||||
return nil, errBlockNumberUnsupported
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
statedb, _ := b.blockchain.State()
|
|
||||||
val := statedb.GetState(contract, key)
|
val := statedb.GetState(contract, key)
|
||||||
return val[:], nil
|
return val[:], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TransactionReceipt returns the receipt of a transaction.
|
// TransactionReceipt returns the receipt of a transaction.
|
||||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
}
|
}
|
||||||
@@ -196,6 +222,121 @@ func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.
|
|||||||
return nil, false, ethereum.NotFound
|
return nil, false, ethereum.NotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BlockByHash retrieves a block based on the block hash
|
||||||
|
func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
if hash == b.pendingBlock.Hash() {
|
||||||
|
return b.pendingBlock, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
block := b.blockchain.GetBlockByHash(hash)
|
||||||
|
if block != nil {
|
||||||
|
return block, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errBlockDoesNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockByNumber retrieves a block from the database by number, caching it
|
||||||
|
// (associated with its hash) if found.
|
||||||
|
func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
return b.blockByNumberNoLock(ctx, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// blockByNumberNoLock retrieves a block from the database by number, caching it
|
||||||
|
// (associated with its hash) if found without Lock.
|
||||||
|
func (b *SimulatedBackend) blockByNumberNoLock(ctx context.Context, number *big.Int) (*types.Block, error) {
|
||||||
|
if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
|
||||||
|
return b.blockchain.CurrentBlock(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
|
||||||
|
if block == nil {
|
||||||
|
return nil, errBlockDoesNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return block, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderByHash returns a block header from the current canonical chain.
|
||||||
|
func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
if hash == b.pendingBlock.Hash() {
|
||||||
|
return b.pendingBlock.Header(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
header := b.blockchain.GetHeaderByHash(hash)
|
||||||
|
if header == nil {
|
||||||
|
return nil, errBlockDoesNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return header, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderByNumber returns a block header from the current canonical chain. If number is
|
||||||
|
// nil, the latest known header is returned.
|
||||||
|
func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
if block == nil || block.Cmp(b.pendingBlock.Number()) == 0 {
|
||||||
|
return b.blockchain.CurrentHeader(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransactionCount returns the number of transactions in a given block
|
||||||
|
func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
if blockHash == b.pendingBlock.Hash() {
|
||||||
|
return uint(b.pendingBlock.Transactions().Len()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
block := b.blockchain.GetBlockByHash(blockHash)
|
||||||
|
if block == nil {
|
||||||
|
return uint(0), errBlockDoesNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint(block.Transactions().Len()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransactionInBlock returns the transaction for a specific block at a specific index
|
||||||
|
func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
if blockHash == b.pendingBlock.Hash() {
|
||||||
|
transactions := b.pendingBlock.Transactions()
|
||||||
|
if uint(len(transactions)) < index+1 {
|
||||||
|
return nil, errTransactionDoesNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return transactions[index], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
block := b.blockchain.GetBlockByHash(blockHash)
|
||||||
|
if block == nil {
|
||||||
|
return nil, errBlockDoesNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
transactions := block.Transactions()
|
||||||
|
if uint(len(transactions)) < index+1 {
|
||||||
|
return nil, errTransactionDoesNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return transactions[index], nil
|
||||||
|
}
|
||||||
|
|
||||||
// PendingCodeAt returns the code associated with an account in the pending state.
|
// PendingCodeAt returns the code associated with an account in the pending state.
|
||||||
func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
|
func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
@@ -204,6 +345,36 @@ func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Ad
|
|||||||
return b.pendingState.GetCode(contract), nil
|
return b.pendingState.GetCode(contract), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newRevertError(result *core.ExecutionResult) *revertError {
|
||||||
|
reason, errUnpack := abi.UnpackRevert(result.Revert())
|
||||||
|
err := errors.New("execution reverted")
|
||||||
|
if errUnpack == nil {
|
||||||
|
err = fmt.Errorf("execution reverted: %v", reason)
|
||||||
|
}
|
||||||
|
return &revertError{
|
||||||
|
error: err,
|
||||||
|
reason: hexutil.Encode(result.Revert()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// revertError is an API error that encompassas an EVM revertal with JSON error
|
||||||
|
// code and a binary data blob.
|
||||||
|
type revertError struct {
|
||||||
|
error
|
||||||
|
reason string // revert reason hex encoded
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCode returns the JSON error code for a revertal.
|
||||||
|
// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal
|
||||||
|
func (e *revertError) ErrorCode() int {
|
||||||
|
return 3
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorData returns the hex encoded revert reason.
|
||||||
|
func (e *revertError) ErrorData() interface{} {
|
||||||
|
return e.reason
|
||||||
|
}
|
||||||
|
|
||||||
// CallContract executes a contract call.
|
// CallContract executes a contract call.
|
||||||
func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
@@ -216,8 +387,15 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
||||||
return rval, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// If the result contains a revert reason, try to unpack and return it.
|
||||||
|
if len(res.Revert()) > 0 {
|
||||||
|
return nil, newRevertError(res)
|
||||||
|
}
|
||||||
|
return res.Return(), res.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PendingCallContract executes a contract call on the pending state.
|
// PendingCallContract executes a contract call on the pending state.
|
||||||
@@ -226,8 +404,15 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu
|
|||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
|
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
|
||||||
|
|
||||||
rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||||
return rval, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// If the result contains a revert reason, try to unpack and return it.
|
||||||
|
if len(res.Revert()) > 0 {
|
||||||
|
return nil, newRevertError(res)
|
||||||
|
}
|
||||||
|
return res.Return(), res.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving
|
// PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving
|
||||||
@@ -262,25 +447,57 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||||||
} else {
|
} else {
|
||||||
hi = b.pendingBlock.GasLimit()
|
hi = b.pendingBlock.GasLimit()
|
||||||
}
|
}
|
||||||
|
// Recap the highest gas allowance with account's balance.
|
||||||
|
if call.GasPrice != nil && call.GasPrice.BitLen() != 0 {
|
||||||
|
balance := b.pendingState.GetBalance(call.From) // from can't be nil
|
||||||
|
available := new(big.Int).Set(balance)
|
||||||
|
if call.Value != nil {
|
||||||
|
if call.Value.Cmp(available) >= 0 {
|
||||||
|
return 0, errors.New("insufficient funds for transfer")
|
||||||
|
}
|
||||||
|
available.Sub(available, call.Value)
|
||||||
|
}
|
||||||
|
allowance := new(big.Int).Div(available, call.GasPrice)
|
||||||
|
if allowance.IsUint64() && hi > allowance.Uint64() {
|
||||||
|
transfer := call.Value
|
||||||
|
if transfer == nil {
|
||||||
|
transfer = new(big.Int)
|
||||||
|
}
|
||||||
|
log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance,
|
||||||
|
"sent", transfer, "gasprice", call.GasPrice, "fundable", allowance)
|
||||||
|
hi = allowance.Uint64()
|
||||||
|
}
|
||||||
|
}
|
||||||
cap = hi
|
cap = hi
|
||||||
|
|
||||||
// Create a helper to check if a gas allowance results in an executable transaction
|
// Create a helper to check if a gas allowance results in an executable transaction
|
||||||
executable := func(gas uint64) bool {
|
executable := func(gas uint64) (bool, *core.ExecutionResult, error) {
|
||||||
call.Gas = gas
|
call.Gas = gas
|
||||||
|
|
||||||
snapshot := b.pendingState.Snapshot()
|
snapshot := b.pendingState.Snapshot()
|
||||||
_, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||||
b.pendingState.RevertToSnapshot(snapshot)
|
b.pendingState.RevertToSnapshot(snapshot)
|
||||||
|
|
||||||
if err != nil || failed {
|
if err != nil {
|
||||||
return false
|
if err == core.ErrIntrinsicGas {
|
||||||
|
return true, nil, nil // Special case, raise gas limit
|
||||||
|
}
|
||||||
|
return true, nil, err // Bail out
|
||||||
}
|
}
|
||||||
return true
|
return res.Failed(), res, nil
|
||||||
}
|
}
|
||||||
// Execute the binary search and hone in on an executable gas limit
|
// Execute the binary search and hone in on an executable gas limit
|
||||||
for lo+1 < hi {
|
for lo+1 < hi {
|
||||||
mid := (hi + lo) / 2
|
mid := (hi + lo) / 2
|
||||||
if !executable(mid) {
|
failed, _, err := executable(mid)
|
||||||
|
|
||||||
|
// If the error is not nil(consensus error), it means the provided message
|
||||||
|
// call or transaction will never be accepted no matter how much gas it is
|
||||||
|
// assigned. Return the error directly, don't struggle any more
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if failed {
|
||||||
lo = mid
|
lo = mid
|
||||||
} else {
|
} else {
|
||||||
hi = mid
|
hi = mid
|
||||||
@@ -288,8 +505,19 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||||||
}
|
}
|
||||||
// Reject the transaction as invalid if it still fails at the highest allowance
|
// Reject the transaction as invalid if it still fails at the highest allowance
|
||||||
if hi == cap {
|
if hi == cap {
|
||||||
if !executable(hi) {
|
failed, result, err := executable(hi)
|
||||||
return 0, errGasEstimationFailed
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if failed {
|
||||||
|
if result != nil && result.Err != vm.ErrOutOfGas {
|
||||||
|
if len(result.Revert()) > 0 {
|
||||||
|
return 0, newRevertError(result)
|
||||||
|
}
|
||||||
|
return 0, result.Err
|
||||||
|
}
|
||||||
|
// Otherwise, the specified gas cap is too low
|
||||||
|
return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return hi, nil
|
return hi, nil
|
||||||
@@ -297,7 +525,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||||||
|
|
||||||
// callContract implements common code between normal and pending contract calls.
|
// callContract implements common code between normal and pending contract calls.
|
||||||
// state is modified during execution, make sure to copy it if necessary.
|
// state is modified during execution, make sure to copy it if necessary.
|
||||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, uint64, bool, error) {
|
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) (*core.ExecutionResult, error) {
|
||||||
// Ensure message is initialized properly.
|
// Ensure message is initialized properly.
|
||||||
if call.GasPrice == nil {
|
if call.GasPrice == nil {
|
||||||
call.GasPrice = big.NewInt(1)
|
call.GasPrice = big.NewInt(1)
|
||||||
@@ -347,7 +575,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
|||||||
statedb, _ := b.blockchain.State()
|
statedb, _ := b.blockchain.State()
|
||||||
|
|
||||||
b.pendingBlock = blocks[0]
|
b.pendingBlock = blocks[0]
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -419,10 +647,38 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere
|
|||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SubscribeNewHead returns an event subscription for a new header
|
||||||
|
func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
|
||||||
|
// subscribe to a new head
|
||||||
|
sink := make(chan *types.Header)
|
||||||
|
sub := b.events.SubscribeNewHeads(sink)
|
||||||
|
|
||||||
|
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||||
|
defer sub.Unsubscribe()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case head := <-sink:
|
||||||
|
select {
|
||||||
|
case ch <- head:
|
||||||
|
case err := <-sub.Err():
|
||||||
|
return err
|
||||||
|
case <-quit:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case err := <-sub.Err():
|
||||||
|
return err
|
||||||
|
case <-quit:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
|
||||||
// AdjustTime adds a time shift to the simulated clock.
|
// AdjustTime adds a time shift to the simulated clock.
|
||||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||||
for _, tx := range b.pendingBlock.Transactions() {
|
for _, tx := range b.pendingBlock.Transactions() {
|
||||||
block.AddTx(tx)
|
block.AddTx(tx)
|
||||||
@@ -432,7 +688,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
|||||||
statedb, _ := b.blockchain.State()
|
statedb, _ := b.blockchain.State()
|
||||||
|
|
||||||
b.pendingBlock = blocks[0]
|
b.pendingBlock = blocks[0]
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -502,22 +758,34 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||||
|
return nullSubscription()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||||
|
return fb.bc.SubscribeChainEvent(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||||
|
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||||
|
return fb.bc.SubscribeLogsEvent(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||||
|
return nullSubscription()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
||||||
|
|
||||||
|
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func nullSubscription() event.Subscription {
|
||||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||||
<-quit
|
<-quit
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
|
||||||
return fb.bc.SubscribeChainEvent(ch)
|
|
||||||
}
|
|
||||||
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
|
||||||
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
|
||||||
}
|
|
||||||
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
|
||||||
return fb.bc.SubscribeLogsEvent(ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
|
||||||
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
|
||||||
panic("not supported")
|
|
||||||
}
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -49,7 +49,7 @@ type TransactOpts struct {
|
|||||||
Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state)
|
Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state)
|
||||||
Signer SignerFn // Method to use for signing the transaction (mandatory)
|
Signer SignerFn // Method to use for signing the transaction (mandatory)
|
||||||
|
|
||||||
Value *big.Int // Funds to transfer along along the transaction (nil = 0 = no funds)
|
Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds)
|
||||||
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
|
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
|
||||||
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
|
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
|
||||||
|
|
||||||
@@ -171,12 +171,24 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// todo(rjl493456442) check the method is payable or not,
|
||||||
|
// reject invalid transaction at the first place
|
||||||
return c.transact(opts, &c.address, input)
|
return c.transact(opts, &c.address, input)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RawTransact initiates a transaction with the given raw calldata as the input.
|
||||||
|
// It's usually used to initiates transaction for invoking **Fallback** function.
|
||||||
|
func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
|
||||||
|
// todo(rjl493456442) check the method is payable or not,
|
||||||
|
// reject invalid transaction at the first place
|
||||||
|
return c.transact(opts, &c.address, calldata)
|
||||||
|
}
|
||||||
|
|
||||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||||
// its default method if one is available.
|
// its default method if one is available.
|
||||||
func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) {
|
func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) {
|
||||||
|
// todo(rjl493456442) check the payable fallback or receive is defined
|
||||||
|
// or not, reject invalid transaction at the first place
|
||||||
return c.transact(opts, &c.address, nil)
|
return c.transact(opts, &c.address, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -252,9 +264,9 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int
|
|||||||
opts = new(FilterOpts)
|
opts = new(FilterOpts)
|
||||||
}
|
}
|
||||||
// Append the event selector to the query parameters and construct the topic set
|
// Append the event selector to the query parameters and construct the topic set
|
||||||
query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...)
|
query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
|
||||||
|
|
||||||
topics, err := makeTopics(query...)
|
topics, err := abi.MakeTopics(query...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -301,9 +313,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
|
|||||||
opts = new(WatchOpts)
|
opts = new(WatchOpts)
|
||||||
}
|
}
|
||||||
// Append the event selector to the query parameters and construct the topic set
|
// Append the event selector to the query parameters and construct the topic set
|
||||||
query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...)
|
query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
|
||||||
|
|
||||||
topics, err := makeTopics(query...)
|
topics, err := abi.MakeTopics(query...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -337,7 +349,7 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
|
|||||||
indexed = append(indexed, arg)
|
indexed = append(indexed, arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return parseTopics(out, indexed, log.Topics[1:])
|
return abi.ParseTopics(out, indexed, log.Topics[1:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
||||||
@@ -353,7 +365,7 @@ func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event strin
|
|||||||
indexed = append(indexed, arg)
|
indexed = append(indexed, arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return parseTopicsIntoMap(out, indexed, log.Topics[1:])
|
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensureContext is a helper method to ensure a context is not nil, even if the
|
// ensureContext is a helper method to ensure a context is not nil, even if the
|
||||||
|
|||||||
@@ -17,9 +17,9 @@
|
|||||||
package bind_test
|
package bind_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -34,8 +34,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type mockCaller struct {
|
type mockCaller struct {
|
||||||
codeAtBlockNumber *big.Int
|
codeAtBlockNumber *big.Int
|
||||||
callContractBlockNumber *big.Int
|
callContractBlockNumber *big.Int
|
||||||
|
pendingCodeAtCalled bool
|
||||||
|
pendingCallContractCalled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||||
@@ -47,6 +49,16 @@ func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, b
|
|||||||
mc.callContractBlockNumber = blockNumber
|
mc.callContractBlockNumber = blockNumber
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mc *mockCaller) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
|
||||||
|
mc.pendingCodeAtCalled = true
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockCaller) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
|
||||||
|
mc.pendingCallContractCalled = true
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
func TestPassingBlockNumber(t *testing.T) {
|
func TestPassingBlockNumber(t *testing.T) {
|
||||||
|
|
||||||
mc := &mockCaller{}
|
mc := &mockCaller{}
|
||||||
@@ -82,57 +94,39 @@ func TestPassingBlockNumber(t *testing.T) {
|
|||||||
if mc.codeAtBlockNumber != nil {
|
if mc.codeAtBlockNumber != nil {
|
||||||
t.Fatalf("CodeAt() was passed a block number when it should not have been")
|
t.Fatalf("CodeAt() was passed a block number when it should not have been")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bc.Call(&bind.CallOpts{BlockNumber: blockNumber, Pending: true}, &ret, "something")
|
||||||
|
|
||||||
|
if !mc.pendingCallContractCalled {
|
||||||
|
t.Fatalf("CallContract() was not passed the block number")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !mc.pendingCodeAtCalled {
|
||||||
|
t.Fatalf("CodeAt() was not passed the block number")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158"
|
const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158"
|
||||||
|
|
||||||
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
||||||
hash := crypto.Keccak256Hash([]byte("testName"))
|
hash := crypto.Keccak256Hash([]byte("testName"))
|
||||||
mockLog := types.Log{
|
topics := []common.Hash{
|
||||||
Address: common.HexToAddress("0x0"),
|
common.HexToHash("0x0"),
|
||||||
Topics: []common.Hash{
|
hash,
|
||||||
common.HexToHash("0x0"),
|
|
||||||
hash,
|
|
||||||
},
|
|
||||||
Data: hexutil.MustDecode(hexData),
|
|
||||||
BlockNumber: uint64(26),
|
|
||||||
TxHash: common.HexToHash("0x0"),
|
|
||||||
TxIndex: 111,
|
|
||||||
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
|
|
||||||
Index: 7,
|
|
||||||
Removed: false,
|
|
||||||
}
|
}
|
||||||
|
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
||||||
|
|
||||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
||||||
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
||||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
||||||
|
|
||||||
receivedMap := make(map[string]interface{})
|
|
||||||
expectedReceivedMap := map[string]interface{}{
|
expectedReceivedMap := map[string]interface{}{
|
||||||
"name": hash,
|
"name": hash,
|
||||||
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
||||||
"amount": big.NewInt(1),
|
"amount": big.NewInt(1),
|
||||||
"memo": []byte{88},
|
"memo": []byte{88},
|
||||||
}
|
}
|
||||||
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
|
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(receivedMap) != 4 {
|
|
||||||
t.Fatal("unpacked map expected to have length 4")
|
|
||||||
}
|
|
||||||
if receivedMap["name"] != expectedReceivedMap["name"] {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if receivedMap["sender"] != expectedReceivedMap["sender"] {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
||||||
@@ -141,51 +135,23 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
hash := crypto.Keccak256Hash(sliceBytes)
|
hash := crypto.Keccak256Hash(sliceBytes)
|
||||||
mockLog := types.Log{
|
topics := []common.Hash{
|
||||||
Address: common.HexToAddress("0x0"),
|
common.HexToHash("0x0"),
|
||||||
Topics: []common.Hash{
|
hash,
|
||||||
common.HexToHash("0x0"),
|
|
||||||
hash,
|
|
||||||
},
|
|
||||||
Data: hexutil.MustDecode(hexData),
|
|
||||||
BlockNumber: uint64(26),
|
|
||||||
TxHash: common.HexToHash("0x0"),
|
|
||||||
TxIndex: 111,
|
|
||||||
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
|
|
||||||
Index: 7,
|
|
||||||
Removed: false,
|
|
||||||
}
|
}
|
||||||
|
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
||||||
|
|
||||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"names","type":"string[]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"names","type":"string[]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
||||||
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
||||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
||||||
|
|
||||||
receivedMap := make(map[string]interface{})
|
|
||||||
expectedReceivedMap := map[string]interface{}{
|
expectedReceivedMap := map[string]interface{}{
|
||||||
"names": hash,
|
"names": hash,
|
||||||
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
||||||
"amount": big.NewInt(1),
|
"amount": big.NewInt(1),
|
||||||
"memo": []byte{88},
|
"memo": []byte{88},
|
||||||
}
|
}
|
||||||
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
|
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(receivedMap) != 4 {
|
|
||||||
t.Fatal("unpacked map expected to have length 4")
|
|
||||||
}
|
|
||||||
if receivedMap["names"] != expectedReceivedMap["names"] {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if receivedMap["sender"] != expectedReceivedMap["sender"] {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
||||||
@@ -194,51 +160,23 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
hash := crypto.Keccak256Hash(arrBytes)
|
hash := crypto.Keccak256Hash(arrBytes)
|
||||||
mockLog := types.Log{
|
topics := []common.Hash{
|
||||||
Address: common.HexToAddress("0x0"),
|
common.HexToHash("0x0"),
|
||||||
Topics: []common.Hash{
|
hash,
|
||||||
common.HexToHash("0x0"),
|
|
||||||
hash,
|
|
||||||
},
|
|
||||||
Data: hexutil.MustDecode(hexData),
|
|
||||||
BlockNumber: uint64(26),
|
|
||||||
TxHash: common.HexToHash("0x0"),
|
|
||||||
TxIndex: 111,
|
|
||||||
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
|
|
||||||
Index: 7,
|
|
||||||
Removed: false,
|
|
||||||
}
|
}
|
||||||
|
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
||||||
|
|
||||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"addresses","type":"address[2]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"addresses","type":"address[2]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
||||||
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
||||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
||||||
|
|
||||||
receivedMap := make(map[string]interface{})
|
|
||||||
expectedReceivedMap := map[string]interface{}{
|
expectedReceivedMap := map[string]interface{}{
|
||||||
"addresses": hash,
|
"addresses": hash,
|
||||||
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
||||||
"amount": big.NewInt(1),
|
"amount": big.NewInt(1),
|
||||||
"memo": []byte{88},
|
"memo": []byte{88},
|
||||||
}
|
}
|
||||||
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
|
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(receivedMap) != 4 {
|
|
||||||
t.Fatal("unpacked map expected to have length 4")
|
|
||||||
}
|
|
||||||
if receivedMap["addresses"] != expectedReceivedMap["addresses"] {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if receivedMap["sender"] != expectedReceivedMap["sender"] {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
||||||
@@ -249,99 +187,72 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
|||||||
functionTyBytes := append(addrBytes, functionSelector...)
|
functionTyBytes := append(addrBytes, functionSelector...)
|
||||||
var functionTy [24]byte
|
var functionTy [24]byte
|
||||||
copy(functionTy[:], functionTyBytes[0:24])
|
copy(functionTy[:], functionTyBytes[0:24])
|
||||||
mockLog := types.Log{
|
topics := []common.Hash{
|
||||||
Address: common.HexToAddress("0x0"),
|
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
|
||||||
Topics: []common.Hash{
|
common.BytesToHash(functionTyBytes),
|
||||||
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
|
|
||||||
common.BytesToHash(functionTyBytes),
|
|
||||||
},
|
|
||||||
Data: hexutil.MustDecode(hexData),
|
|
||||||
BlockNumber: uint64(26),
|
|
||||||
TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"),
|
|
||||||
TxIndex: 111,
|
|
||||||
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
|
|
||||||
Index: 7,
|
|
||||||
Removed: false,
|
|
||||||
}
|
}
|
||||||
|
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
|
||||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"function","type":"function"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"function","type":"function"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
||||||
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
||||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
||||||
|
|
||||||
receivedMap := make(map[string]interface{})
|
|
||||||
expectedReceivedMap := map[string]interface{}{
|
expectedReceivedMap := map[string]interface{}{
|
||||||
"function": functionTy,
|
"function": functionTy,
|
||||||
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
||||||
"amount": big.NewInt(1),
|
"amount": big.NewInt(1),
|
||||||
"memo": []byte{88},
|
"memo": []byte{88},
|
||||||
}
|
}
|
||||||
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
|
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(receivedMap) != 4 {
|
|
||||||
t.Fatal("unpacked map expected to have length 4")
|
|
||||||
}
|
|
||||||
if receivedMap["function"] != expectedReceivedMap["function"] {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if receivedMap["sender"] != expectedReceivedMap["sender"] {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
||||||
byts := []byte{1, 2, 3, 4, 5}
|
bytes := []byte{1, 2, 3, 4, 5}
|
||||||
hash := crypto.Keccak256Hash(byts)
|
hash := crypto.Keccak256Hash(bytes)
|
||||||
mockLog := types.Log{
|
topics := []common.Hash{
|
||||||
Address: common.HexToAddress("0x0"),
|
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
|
||||||
Topics: []common.Hash{
|
hash,
|
||||||
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
|
|
||||||
hash,
|
|
||||||
},
|
|
||||||
Data: hexutil.MustDecode(hexData),
|
|
||||||
BlockNumber: uint64(26),
|
|
||||||
TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"),
|
|
||||||
TxIndex: 111,
|
|
||||||
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
|
|
||||||
Index: 7,
|
|
||||||
Removed: false,
|
|
||||||
}
|
}
|
||||||
|
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
|
||||||
|
|
||||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"content","type":"bytes"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"content","type":"bytes"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
||||||
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
||||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
||||||
|
|
||||||
receivedMap := make(map[string]interface{})
|
|
||||||
expectedReceivedMap := map[string]interface{}{
|
expectedReceivedMap := map[string]interface{}{
|
||||||
"content": hash,
|
"content": hash,
|
||||||
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
|
||||||
"amount": big.NewInt(1),
|
"amount": big.NewInt(1),
|
||||||
"memo": []byte{88},
|
"memo": []byte{88},
|
||||||
}
|
}
|
||||||
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
|
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) {
|
||||||
|
received := make(map[string]interface{})
|
||||||
|
if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(receivedMap) != 4 {
|
if len(received) != len(expected) {
|
||||||
t.Fatal("unpacked map expected to have length 4")
|
t.Fatalf("unpacked map length %v not equal expected length of %v", len(received), len(expected))
|
||||||
}
|
}
|
||||||
if receivedMap["content"] != expectedReceivedMap["content"] {
|
for name, elem := range expected {
|
||||||
t.Error("unpacked map does not match expected map")
|
if !reflect.DeepEqual(elem, received[name]) {
|
||||||
}
|
t.Errorf("field %v does not match expected, want %v, got %v", name, elem, received[name])
|
||||||
if receivedMap["sender"] != expectedReceivedMap["sender"] {
|
}
|
||||||
t.Error("unpacked map does not match expected map")
|
}
|
||||||
}
|
}
|
||||||
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
|
|
||||||
t.Error("unpacked map does not match expected map")
|
func newMockLog(topics []common.Hash, txHash common.Hash) types.Log {
|
||||||
}
|
return types.Log{
|
||||||
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
|
Address: common.HexToAddress("0x0"),
|
||||||
t.Error("unpacked map does not match expected map")
|
Topics: topics,
|
||||||
|
Data: hexutil.MustDecode(hexData),
|
||||||
|
BlockNumber: uint64(26),
|
||||||
|
TxHash: txHash,
|
||||||
|
TxIndex: 111,
|
||||||
|
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
|
||||||
|
Index: 7,
|
||||||
|
Removed: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -77,6 +77,8 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
calls = make(map[string]*tmplMethod)
|
calls = make(map[string]*tmplMethod)
|
||||||
transacts = make(map[string]*tmplMethod)
|
transacts = make(map[string]*tmplMethod)
|
||||||
events = make(map[string]*tmplEvent)
|
events = make(map[string]*tmplEvent)
|
||||||
|
fallback *tmplMethod
|
||||||
|
receive *tmplMethod
|
||||||
|
|
||||||
// identifiers are used to detect duplicated identifier of function
|
// identifiers are used to detect duplicated identifier of function
|
||||||
// and event. For all calls, transacts and events, abigen will generate
|
// and event. For all calls, transacts and events, abigen will generate
|
||||||
@@ -92,7 +94,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
|
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
|
||||||
// Ensure there is no duplicated identifier
|
// Ensure there is no duplicated identifier
|
||||||
var identifiers = callIdentifiers
|
var identifiers = callIdentifiers
|
||||||
if !original.Const {
|
if !original.IsConstant() {
|
||||||
identifiers = transactIdentifiers
|
identifiers = transactIdentifiers
|
||||||
}
|
}
|
||||||
if identifiers[normalizedName] {
|
if identifiers[normalizedName] {
|
||||||
@@ -121,7 +123,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Append the methods to the call or transact lists
|
// Append the methods to the call or transact lists
|
||||||
if original.Const {
|
if original.IsConstant() {
|
||||||
calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
|
calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
|
||||||
} else {
|
} else {
|
||||||
transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
|
transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
|
||||||
@@ -156,7 +158,13 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
// Append the event to the accumulator list
|
// Append the event to the accumulator list
|
||||||
events[original.Name] = &tmplEvent{Original: original, Normalized: normalized}
|
events[original.Name] = &tmplEvent{Original: original, Normalized: normalized}
|
||||||
}
|
}
|
||||||
|
// Add two special fallback functions if they exist
|
||||||
|
if evmABI.HasFallback() {
|
||||||
|
fallback = &tmplMethod{Original: evmABI.Fallback}
|
||||||
|
}
|
||||||
|
if evmABI.HasReceive() {
|
||||||
|
receive = &tmplMethod{Original: evmABI.Receive}
|
||||||
|
}
|
||||||
// There is no easy way to pass arbitrary java objects to the Go side.
|
// There is no easy way to pass arbitrary java objects to the Go side.
|
||||||
if len(structs) > 0 && lang == LangJava {
|
if len(structs) > 0 && lang == LangJava {
|
||||||
return "", errors.New("java binding for tuple arguments is not supported yet")
|
return "", errors.New("java binding for tuple arguments is not supported yet")
|
||||||
@@ -169,6 +177,8 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
Constructor: evmABI.Constructor,
|
Constructor: evmABI.Constructor,
|
||||||
Calls: calls,
|
Calls: calls,
|
||||||
Transacts: transacts,
|
Transacts: transacts,
|
||||||
|
Fallback: fallback,
|
||||||
|
Receive: receive,
|
||||||
Events: events,
|
Events: events,
|
||||||
Libraries: make(map[string]string),
|
Libraries: make(map[string]string),
|
||||||
}
|
}
|
||||||
@@ -210,8 +220,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
"bindtype": bindType[lang],
|
"bindtype": bindType[lang],
|
||||||
"bindtopictype": bindTopicType[lang],
|
"bindtopictype": bindTopicType[lang],
|
||||||
"namedtype": namedType[lang],
|
"namedtype": namedType[lang],
|
||||||
"formatmethod": formatMethod,
|
|
||||||
"formatevent": formatEvent,
|
|
||||||
"capitalise": capitalise,
|
"capitalise": capitalise,
|
||||||
"decapitalise": decapitalise,
|
"decapitalise": decapitalise,
|
||||||
}
|
}
|
||||||
@@ -527,9 +535,7 @@ var methodNormalizer = map[Lang]func(string) string{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// capitalise makes a camel-case string which starts with an upper case character.
|
// capitalise makes a camel-case string which starts with an upper case character.
|
||||||
func capitalise(input string) string {
|
var capitalise = abi.ToCamelCase
|
||||||
return abi.ToCamelCase(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
// decapitalise makes a camel-case string which starts with a lower case character.
|
// decapitalise makes a camel-case string which starts with a lower case character.
|
||||||
func decapitalise(input string) string {
|
func decapitalise(input string) string {
|
||||||
@@ -578,63 +584,3 @@ func hasStruct(t abi.Type) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveArgName converts a raw argument representation into a user friendly format.
|
|
||||||
func resolveArgName(arg abi.Argument, structs map[string]*tmplStruct) string {
|
|
||||||
var (
|
|
||||||
prefix string
|
|
||||||
embedded string
|
|
||||||
typ = &arg.Type
|
|
||||||
)
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
switch typ.T {
|
|
||||||
case abi.SliceTy:
|
|
||||||
prefix += "[]"
|
|
||||||
case abi.ArrayTy:
|
|
||||||
prefix += fmt.Sprintf("[%d]", typ.Size)
|
|
||||||
default:
|
|
||||||
embedded = typ.TupleRawName + typ.String()
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
typ = typ.Elem
|
|
||||||
}
|
|
||||||
if s, exist := structs[embedded]; exist {
|
|
||||||
return prefix + s.Name
|
|
||||||
} else {
|
|
||||||
return arg.Type.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatMethod transforms raw method representation into a user friendly one.
|
|
||||||
func formatMethod(method abi.Method, structs map[string]*tmplStruct) string {
|
|
||||||
inputs := make([]string, len(method.Inputs))
|
|
||||||
for i, input := range method.Inputs {
|
|
||||||
inputs[i] = fmt.Sprintf("%v %v", resolveArgName(input, structs), input.Name)
|
|
||||||
}
|
|
||||||
outputs := make([]string, len(method.Outputs))
|
|
||||||
for i, output := range method.Outputs {
|
|
||||||
outputs[i] = resolveArgName(output, structs)
|
|
||||||
if len(output.Name) > 0 {
|
|
||||||
outputs[i] += fmt.Sprintf(" %v", output.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
constant := ""
|
|
||||||
if method.Const {
|
|
||||||
constant = "constant "
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.RawName, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatEvent transforms raw event representation into a user friendly one.
|
|
||||||
func formatEvent(event abi.Event, structs map[string]*tmplStruct) string {
|
|
||||||
inputs := make([]string, len(event.Inputs))
|
|
||||||
for i, input := range event.Inputs {
|
|
||||||
if input.Indexed {
|
|
||||||
inputs[i] = fmt.Sprintf("%v indexed %v", resolveArgName(input, structs), input.Name)
|
|
||||||
} else {
|
|
||||||
inputs[i] = fmt.Sprintf("%v %v", resolveArgName(input, structs), input.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("event %v(%v)", event.RawName, strings.Join(inputs, ", "))
|
|
||||||
}
|
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
@@ -35,6 +35,8 @@ type tmplContract struct {
|
|||||||
Constructor abi.Method // Contract constructor for deploy parametrization
|
Constructor abi.Method // Contract constructor for deploy parametrization
|
||||||
Calls map[string]*tmplMethod // Contract calls that only read state data
|
Calls map[string]*tmplMethod // Contract calls that only read state data
|
||||||
Transacts map[string]*tmplMethod // Contract calls that write state data
|
Transacts map[string]*tmplMethod // Contract calls that write state data
|
||||||
|
Fallback *tmplMethod // Additional special fallback function
|
||||||
|
Receive *tmplMethod // Additional special receive function
|
||||||
Events map[string]*tmplEvent // Contract events accessors
|
Events map[string]*tmplEvent // Contract events accessors
|
||||||
Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs
|
Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs
|
||||||
Library bool // Indicator whether the contract is a library
|
Library bool // Indicator whether the contract is a library
|
||||||
@@ -62,7 +64,7 @@ type tmplField struct {
|
|||||||
SolKind abi.Type // Raw abi type information
|
SolKind abi.Type // Raw abi type information
|
||||||
}
|
}
|
||||||
|
|
||||||
// tmplStruct is a wrapper around an abi.tuple contains a auto-generated
|
// tmplStruct is a wrapper around an abi.tuple contains an auto-generated
|
||||||
// struct name.
|
// struct name.
|
||||||
type tmplStruct struct {
|
type tmplStruct struct {
|
||||||
Name string // Auto-generated struct name(before solidity v0.5.11) or raw name.
|
Name string // Auto-generated struct name(before solidity v0.5.11) or raw name.
|
||||||
@@ -101,7 +103,6 @@ var (
|
|||||||
_ = big.NewInt
|
_ = big.NewInt
|
||||||
_ = strings.NewReader
|
_ = strings.NewReader
|
||||||
_ = ethereum.NotFound
|
_ = ethereum.NotFound
|
||||||
_ = abi.U256
|
|
||||||
_ = bind.Bind
|
_ = bind.Bind
|
||||||
_ = common.Big1
|
_ = common.Big1
|
||||||
_ = types.BloomLookup
|
_ = types.BloomLookup
|
||||||
@@ -296,7 +297,7 @@ var (
|
|||||||
{{range .Calls}}
|
{{range .Calls}}
|
||||||
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatmethod .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) {
|
||||||
{{if .Structured}}ret := new(struct{
|
{{if .Structured}}ret := new(struct{
|
||||||
{{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}}
|
{{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}}
|
||||||
@@ -315,14 +316,14 @@ var (
|
|||||||
|
|
||||||
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatmethod .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
|
||||||
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
|
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatmethod .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
|
||||||
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
|
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||||
}
|
}
|
||||||
@@ -331,26 +332,72 @@ var (
|
|||||||
{{range .Transacts}}
|
{{range .Transacts}}
|
||||||
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
|
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatmethod .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
|
||||||
return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
|
return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
|
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatmethod .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
|
||||||
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
|
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
|
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatmethod .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
|
||||||
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
|
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||||
}
|
}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
{{if .Fallback}}
|
||||||
|
// Fallback is a paid mutator transaction binding the contract fallback function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Fallback.Original.String}}
|
||||||
|
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) {
|
||||||
|
return _{{$contract.Type}}.contract.RawTransact(opts, calldata)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback is a paid mutator transaction binding the contract fallback function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Fallback.Original.String}}
|
||||||
|
func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) {
|
||||||
|
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback is a paid mutator transaction binding the contract fallback function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Fallback.Original.String}}
|
||||||
|
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) {
|
||||||
|
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{if .Receive}}
|
||||||
|
// Receive is a paid mutator transaction binding the contract receive function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Receive.Original.String}}
|
||||||
|
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) {
|
||||||
|
return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive is a paid mutator transaction binding the contract receive function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Receive.Original.String}}
|
||||||
|
func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) {
|
||||||
|
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive is a paid mutator transaction binding the contract receive function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Receive.Original.String}}
|
||||||
|
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) {
|
||||||
|
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
{{range .Events}}
|
{{range .Events}}
|
||||||
// {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
|
// {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
|
||||||
type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
|
type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
|
||||||
@@ -424,7 +471,7 @@ var (
|
|||||||
|
|
||||||
// Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
// Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatevent .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
|
||||||
{{range .Normalized.Inputs}}
|
{{range .Normalized.Inputs}}
|
||||||
{{if .Indexed}}var {{.Name}}Rule []interface{}
|
{{if .Indexed}}var {{.Name}}Rule []interface{}
|
||||||
@@ -441,7 +488,7 @@ var (
|
|||||||
|
|
||||||
// Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
// Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatevent .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) {
|
||||||
{{range .Normalized.Inputs}}
|
{{range .Normalized.Inputs}}
|
||||||
{{if .Indexed}}var {{.Name}}Rule []interface{}
|
{{if .Indexed}}var {{.Name}}Rule []interface{}
|
||||||
@@ -483,7 +530,7 @@ var (
|
|||||||
|
|
||||||
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{formatevent .Original $structs}}
|
// Solidity: {{.Original.String}}
|
||||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
|
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
|
||||||
event := new({{$contract.Type}}{{.Normalized.Name}})
|
event := new({{$contract.Type}}{{.Normalized.Name}})
|
||||||
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
|
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
|
||||||
@@ -577,7 +624,7 @@ import java.util.*;
|
|||||||
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
//
|
//
|
||||||
// Solidity: {{.Original.String}}
|
// Solidity: {{.Original.String}}
|
||||||
public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
|
public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
|
||||||
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
|
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
|
||||||
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
|
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
|
||||||
{{end}}
|
{{end}}
|
||||||
@@ -611,6 +658,24 @@ import java.util.*;
|
|||||||
return this.Contract.transact(opts, "{{.Original.Name}}" , args);
|
return this.Contract.transact(opts, "{{.Original.Name}}" , args);
|
||||||
}
|
}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
{{if .Fallback}}
|
||||||
|
// Fallback is a paid mutator transaction binding the contract fallback function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Fallback.Original.String}}
|
||||||
|
public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception {
|
||||||
|
return this.Contract.rawTransact(opts, calldata);
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{if .Receive}}
|
||||||
|
// Receive is a paid mutator transaction binding the contract receive function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Receive.Original.String}}
|
||||||
|
public Transaction Receive(TransactOpts opts) throws Exception {
|
||||||
|
return this.Contract.rawTransact(opts, null);
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
}
|
}
|
||||||
{{end}}
|
{{end}}
|
||||||
`
|
`
|
||||||
|
|||||||
@@ -1,244 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package bind
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// makeTopics converts a filter query argument list into a filter topic set.
|
|
||||||
func makeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
|
||||||
topics := make([][]common.Hash, len(query))
|
|
||||||
for i, filter := range query {
|
|
||||||
for _, rule := range filter {
|
|
||||||
var topic common.Hash
|
|
||||||
|
|
||||||
// Try to generate the topic based on simple types
|
|
||||||
switch rule := rule.(type) {
|
|
||||||
case common.Hash:
|
|
||||||
copy(topic[:], rule[:])
|
|
||||||
case common.Address:
|
|
||||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
|
||||||
case *big.Int:
|
|
||||||
blob := rule.Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case bool:
|
|
||||||
if rule {
|
|
||||||
topic[common.HashLength-1] = 1
|
|
||||||
}
|
|
||||||
case int8:
|
|
||||||
blob := big.NewInt(int64(rule)).Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case int16:
|
|
||||||
blob := big.NewInt(int64(rule)).Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case int32:
|
|
||||||
blob := big.NewInt(int64(rule)).Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case int64:
|
|
||||||
blob := big.NewInt(rule).Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case uint8:
|
|
||||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case uint16:
|
|
||||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case uint32:
|
|
||||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case uint64:
|
|
||||||
blob := new(big.Int).SetUint64(rule).Bytes()
|
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case string:
|
|
||||||
hash := crypto.Keccak256Hash([]byte(rule))
|
|
||||||
copy(topic[:], hash[:])
|
|
||||||
case []byte:
|
|
||||||
hash := crypto.Keccak256Hash(rule)
|
|
||||||
copy(topic[:], hash[:])
|
|
||||||
|
|
||||||
default:
|
|
||||||
// todo(rjl493456442) according solidity documentation, indexed event
|
|
||||||
// parameters that are not value types i.e. arrays and structs are not
|
|
||||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
|
||||||
//
|
|
||||||
// We only convert stringS and bytes to hash, still need to deal with
|
|
||||||
// array(both fixed-size and dynamic-size) and struct.
|
|
||||||
|
|
||||||
// Attempt to generate the topic from funky types
|
|
||||||
val := reflect.ValueOf(rule)
|
|
||||||
switch {
|
|
||||||
// static byte array
|
|
||||||
case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8:
|
|
||||||
reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported indexed type: %T", rule)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
topics[i] = append(topics[i], topic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return topics, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Big batch of reflect types for topic reconstruction.
|
|
||||||
var (
|
|
||||||
reflectHash = reflect.TypeOf(common.Hash{})
|
|
||||||
reflectAddress = reflect.TypeOf(common.Address{})
|
|
||||||
reflectBigInt = reflect.TypeOf(new(big.Int))
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseTopics converts the indexed topic fields into actual log field values.
|
|
||||||
//
|
|
||||||
// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256
|
|
||||||
// hashes as the topic value!
|
|
||||||
func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) error {
|
|
||||||
// Sanity check that the fields and topics match up
|
|
||||||
if len(fields) != len(topics) {
|
|
||||||
return errors.New("topic/field count mismatch")
|
|
||||||
}
|
|
||||||
// Iterate over all the fields and reconstruct them from topics
|
|
||||||
for _, arg := range fields {
|
|
||||||
if !arg.Indexed {
|
|
||||||
return errors.New("non-indexed field in topic reconstruction")
|
|
||||||
}
|
|
||||||
field := reflect.ValueOf(out).Elem().FieldByName(capitalise(arg.Name))
|
|
||||||
|
|
||||||
// Try to parse the topic back into the fields based on primitive types
|
|
||||||
switch field.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
if topics[0][common.HashLength-1] == 1 {
|
|
||||||
field.Set(reflect.ValueOf(true))
|
|
||||||
}
|
|
||||||
case reflect.Int8:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(int8(num.Int64())))
|
|
||||||
|
|
||||||
case reflect.Int16:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(int16(num.Int64())))
|
|
||||||
|
|
||||||
case reflect.Int32:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(int32(num.Int64())))
|
|
||||||
|
|
||||||
case reflect.Int64:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(num.Int64()))
|
|
||||||
|
|
||||||
case reflect.Uint8:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(uint8(num.Uint64())))
|
|
||||||
|
|
||||||
case reflect.Uint16:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(uint16(num.Uint64())))
|
|
||||||
|
|
||||||
case reflect.Uint32:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(uint32(num.Uint64())))
|
|
||||||
|
|
||||||
case reflect.Uint64:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(num.Uint64()))
|
|
||||||
|
|
||||||
default:
|
|
||||||
// Ran out of plain primitive types, try custom types
|
|
||||||
|
|
||||||
switch field.Type() {
|
|
||||||
case reflectHash: // Also covers all dynamic types
|
|
||||||
field.Set(reflect.ValueOf(topics[0]))
|
|
||||||
|
|
||||||
case reflectAddress:
|
|
||||||
var addr common.Address
|
|
||||||
copy(addr[:], topics[0][common.HashLength-common.AddressLength:])
|
|
||||||
field.Set(reflect.ValueOf(addr))
|
|
||||||
|
|
||||||
case reflectBigInt:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
field.Set(reflect.ValueOf(num))
|
|
||||||
|
|
||||||
default:
|
|
||||||
// Ran out of custom types, try the crazies
|
|
||||||
switch {
|
|
||||||
// static byte array
|
|
||||||
case arg.Type.T == abi.FixedBytesTy:
|
|
||||||
reflect.Copy(field, reflect.ValueOf(topics[0][:arg.Type.Size]))
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unsupported indexed type: %v", arg.Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
topics = topics[1:]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs
|
|
||||||
func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics []common.Hash) error {
|
|
||||||
// Sanity check that the fields and topics match up
|
|
||||||
if len(fields) != len(topics) {
|
|
||||||
return errors.New("topic/field count mismatch")
|
|
||||||
}
|
|
||||||
// Iterate over all the fields and reconstruct them from topics
|
|
||||||
for _, arg := range fields {
|
|
||||||
if !arg.Indexed {
|
|
||||||
return errors.New("non-indexed field in topic reconstruction")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch arg.Type.T {
|
|
||||||
case abi.BoolTy:
|
|
||||||
out[arg.Name] = topics[0][common.HashLength-1] == 1
|
|
||||||
case abi.IntTy, abi.UintTy:
|
|
||||||
num := new(big.Int).SetBytes(topics[0][:])
|
|
||||||
out[arg.Name] = num
|
|
||||||
case abi.AddressTy:
|
|
||||||
var addr common.Address
|
|
||||||
copy(addr[:], topics[0][common.HashLength-common.AddressLength:])
|
|
||||||
out[arg.Name] = addr
|
|
||||||
case abi.HashTy:
|
|
||||||
out[arg.Name] = topics[0]
|
|
||||||
case abi.FixedBytesTy:
|
|
||||||
out[arg.Name] = topics[0][:]
|
|
||||||
case abi.StringTy, abi.BytesTy, abi.SliceTy, abi.ArrayTy:
|
|
||||||
// Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash
|
|
||||||
// whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash
|
|
||||||
out[arg.Name] = topics[0]
|
|
||||||
case abi.FunctionTy:
|
|
||||||
if garbage := binary.BigEndian.Uint64(topics[0][0:8]); garbage != 0 {
|
|
||||||
return fmt.Errorf("bind: got improperly encoded function type, got %v", topics[0].Bytes())
|
|
||||||
}
|
|
||||||
var tmp [24]byte
|
|
||||||
copy(tmp[:], topics[0][8:32])
|
|
||||||
out[arg.Name] = tmp
|
|
||||||
default: // Not handling tuples
|
|
||||||
return fmt.Errorf("unsupported indexed type: %v", arg.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
topics = topics[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
// Copyright 2019 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package bind
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMakeTopics(t *testing.T) {
|
|
||||||
type args struct {
|
|
||||||
query [][]interface{}
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
want [][]common.Hash
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"support fixed byte types, right padded to 32 bytes",
|
|
||||||
args{[][]interface{}{{[5]byte{1, 2, 3, 4, 5}}}},
|
|
||||||
[][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}},
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := makeTopics(tt.args.query...)
|
|
||||||
if (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(got, tt.want) {
|
|
||||||
t.Errorf("makeTopics() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseTopics(t *testing.T) {
|
|
||||||
type bytesStruct struct {
|
|
||||||
StaticBytes [5]byte
|
|
||||||
}
|
|
||||||
bytesType, _ := abi.NewType("bytes5", "", nil)
|
|
||||||
type args struct {
|
|
||||||
createObj func() interface{}
|
|
||||||
resultObj func() interface{}
|
|
||||||
fields abi.Arguments
|
|
||||||
topics []common.Hash
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "support fixed byte types, right padded to 32 bytes",
|
|
||||||
args: args{
|
|
||||||
createObj: func() interface{} { return &bytesStruct{} },
|
|
||||||
resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} },
|
|
||||||
fields: abi.Arguments{abi.Argument{
|
|
||||||
Name: "staticBytes",
|
|
||||||
Type: bytesType,
|
|
||||||
Indexed: true,
|
|
||||||
}},
|
|
||||||
topics: []common.Hash{
|
|
||||||
{1, 2, 3, 4, 5},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
createObj := tt.args.createObj()
|
|
||||||
if err := parseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
}
|
|
||||||
resultObj := tt.args.resultObj()
|
|
||||||
if !reflect.DeepEqual(createObj, resultObj) {
|
|
||||||
t.Errorf("parseTopics() = %v, want %v", createObj, resultObj)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -18,7 +18,7 @@ package bind
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@@ -56,14 +56,14 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
|
|||||||
// contract address when it is mined. It stops waiting when ctx is canceled.
|
// contract address when it is mined. It stops waiting when ctx is canceled.
|
||||||
func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (common.Address, error) {
|
func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (common.Address, error) {
|
||||||
if tx.To() != nil {
|
if tx.To() != nil {
|
||||||
return common.Address{}, fmt.Errorf("tx is not contract creation")
|
return common.Address{}, errors.New("tx is not contract creation")
|
||||||
}
|
}
|
||||||
receipt, err := WaitMined(ctx, b, tx)
|
receipt, err := WaitMined(ctx, b, tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.Address{}, err
|
return common.Address{}, err
|
||||||
}
|
}
|
||||||
if receipt.ContractAddress == (common.Address{}) {
|
if receipt.ContractAddress == (common.Address{}) {
|
||||||
return common.Address{}, fmt.Errorf("zero address")
|
return common.Address{}, errors.New("zero address")
|
||||||
}
|
}
|
||||||
// Check that code has indeed been deployed at the address.
|
// Check that code has indeed been deployed at the address.
|
||||||
// This matters on pre-Homestead chains: OOG in the constructor
|
// This matters on pre-Homestead chains: OOG in the constructor
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ package bind_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -84,7 +85,7 @@ func TestWaitDeployed(t *testing.T) {
|
|||||||
select {
|
select {
|
||||||
case <-mined:
|
case <-mined:
|
||||||
if err != test.wantErr {
|
if err != test.wantErr {
|
||||||
t.Errorf("test %q: error mismatch: got %q, want %q", name, err, test.wantErr)
|
t.Errorf("test %q: error mismatch: want %q, got %q", name, test.wantErr, err)
|
||||||
}
|
}
|
||||||
if address != test.wantAddress {
|
if address != test.wantAddress {
|
||||||
t.Errorf("test %q: unexpected contract address %s", name, address.Hex())
|
t.Errorf("test %q: unexpected contract address %s", name, address.Hex())
|
||||||
@@ -94,3 +95,40 @@ func TestWaitDeployed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWaitDeployedCornerCases(t *testing.T) {
|
||||||
|
backend := backends.NewSimulatedBackend(
|
||||||
|
core.GenesisAlloc{
|
||||||
|
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
|
||||||
|
},
|
||||||
|
10000000,
|
||||||
|
)
|
||||||
|
defer backend.Close()
|
||||||
|
|
||||||
|
// Create a transaction to an account.
|
||||||
|
code := "6060604052600a8060106000396000f360606040526008565b00"
|
||||||
|
tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code))
|
||||||
|
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
backend.SendTransaction(ctx, tx)
|
||||||
|
backend.Commit()
|
||||||
|
notContentCreation := errors.New("tx is not contract creation")
|
||||||
|
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContentCreation.Error() {
|
||||||
|
t.Errorf("error missmatch: want %q, got %q, ", notContentCreation, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a transaction that is not mined.
|
||||||
|
tx = types.NewContractCreation(1, big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code))
|
||||||
|
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
contextCanceled := errors.New("context canceled")
|
||||||
|
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() {
|
||||||
|
t.Errorf("error missmatch: want %q, got %q, ", contextCanceled, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
backend.SendTransaction(ctx, tx)
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|||||||
@@ -39,23 +39,21 @@ func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
|||||||
// type in t.
|
// type in t.
|
||||||
func sliceTypeCheck(t Type, val reflect.Value) error {
|
func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||||
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
|
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
|
||||||
return typeErr(formatSliceString(t.Kind, t.Size), val.Type())
|
return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type())
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.T == ArrayTy && val.Len() != t.Size {
|
if t.T == ArrayTy && val.Len() != t.Size {
|
||||||
return typeErr(formatSliceString(t.Elem.Kind, t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.Elem.T == SliceTy {
|
if t.Elem.T == SliceTy || t.Elem.T == ArrayTy {
|
||||||
if val.Len() > 0 {
|
if val.Len() > 0 {
|
||||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||||
}
|
}
|
||||||
} else if t.Elem.T == ArrayTy {
|
|
||||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind {
|
if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.GetType().Kind() {
|
||||||
return typeErr(formatSliceString(t.Elem.Kind, t.Size), val.Type())
|
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -68,10 +66,10 @@ func typeCheck(t Type, value reflect.Value) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check base type validity. Element types will be checked later on.
|
// Check base type validity. Element types will be checked later on.
|
||||||
if t.Kind != value.Kind() {
|
if t.GetType().Kind() != value.Kind() {
|
||||||
return typeErr(t.Kind, value.Kind())
|
return typeErr(t.GetType().Kind(), value.Kind())
|
||||||
} else if t.T == FixedBytesTy && t.Size != value.Len() {
|
} else if t.T == FixedBytesTy && t.Size != value.Len() {
|
||||||
return typeErr(t.Type, value.Type())
|
return typeErr(t.GetType(), value.Type())
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,36 +42,59 @@ type Event struct {
|
|||||||
RawName string
|
RawName string
|
||||||
Anonymous bool
|
Anonymous bool
|
||||||
Inputs Arguments
|
Inputs Arguments
|
||||||
|
str string
|
||||||
|
// Sig contains the string signature according to the ABI spec.
|
||||||
|
// e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
|
||||||
|
// Please note that "int" is substitute for its canonical representation "int256"
|
||||||
|
Sig string
|
||||||
|
// ID returns the canonical representation of the event's signature used by the
|
||||||
|
// abi definition to identify event names and types.
|
||||||
|
ID common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEvent creates a new Event.
|
||||||
|
// It sanitizes the input arguments to remove unnamed arguments.
|
||||||
|
// It also precomputes the id, signature and string representation
|
||||||
|
// of the event.
|
||||||
|
func NewEvent(name, rawName string, anonymous bool, inputs Arguments) Event {
|
||||||
|
// sanitize inputs to remove inputs without names
|
||||||
|
// and precompute string and sig representation.
|
||||||
|
names := make([]string, len(inputs))
|
||||||
|
types := make([]string, len(inputs))
|
||||||
|
for i, input := range inputs {
|
||||||
|
if input.Name == "" {
|
||||||
|
inputs[i] = Argument{
|
||||||
|
Name: fmt.Sprintf("arg%d", i),
|
||||||
|
Indexed: input.Indexed,
|
||||||
|
Type: input.Type,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
inputs[i] = input
|
||||||
|
}
|
||||||
|
// string representation
|
||||||
|
names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name)
|
||||||
|
if input.Indexed {
|
||||||
|
names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name)
|
||||||
|
}
|
||||||
|
// sig representation
|
||||||
|
types[i] = input.Type.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
str := fmt.Sprintf("event %v(%v)", rawName, strings.Join(names, ", "))
|
||||||
|
sig := fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
|
||||||
|
id := common.BytesToHash(crypto.Keccak256([]byte(sig)))
|
||||||
|
|
||||||
|
return Event{
|
||||||
|
Name: name,
|
||||||
|
RawName: rawName,
|
||||||
|
Anonymous: anonymous,
|
||||||
|
Inputs: inputs,
|
||||||
|
str: str,
|
||||||
|
Sig: sig,
|
||||||
|
ID: id,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e Event) String() string {
|
func (e Event) String() string {
|
||||||
inputs := make([]string, len(e.Inputs))
|
return e.str
|
||||||
for i, input := range e.Inputs {
|
|
||||||
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
|
|
||||||
if input.Indexed {
|
|
||||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("event %v(%v)", e.RawName, strings.Join(inputs, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sig returns the event string signature according to the ABI spec.
|
|
||||||
//
|
|
||||||
// Example
|
|
||||||
//
|
|
||||||
// event foo(uint32 a, int b) = "foo(uint32,int256)"
|
|
||||||
//
|
|
||||||
// Please note that "int" is substitute for its canonical representation "int256"
|
|
||||||
func (e Event) Sig() string {
|
|
||||||
types := make([]string, len(e.Inputs))
|
|
||||||
for i, input := range e.Inputs {
|
|
||||||
types[i] = input.Type.String()
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%v(%v)", e.RawName, strings.Join(types, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the canonical representation of the event's signature used by the
|
|
||||||
// abi definition to identify event names and types.
|
|
||||||
func (e Event) ID() common.Hash {
|
|
||||||
return common.BytesToHash(crypto.Keccak256([]byte(e.Sig())))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -104,8 +104,8 @@ func TestEventId(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for name, event := range abi.Events {
|
for name, event := range abi.Events {
|
||||||
if event.ID() != test.expectations[name] {
|
if event.ID != test.expectations[name] {
|
||||||
t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID())
|
t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -312,14 +312,14 @@ func TestEventTupleUnpack(t *testing.T) {
|
|||||||
&[]interface{}{common.Address{}, new(big.Int)},
|
&[]interface{}{common.Address{}, new(big.Int)},
|
||||||
&[]interface{}{},
|
&[]interface{}{},
|
||||||
jsonEventPledge,
|
jsonEventPledge,
|
||||||
"abi: insufficient number of elements in the list/array for unpack, want 3, got 2",
|
"abi: insufficient number of arguments for unpack, want 3, got 2",
|
||||||
"Can not unpack Pledge event into too short slice",
|
"Can not unpack Pledge event into too short slice",
|
||||||
}, {
|
}, {
|
||||||
pledgeData1,
|
pledgeData1,
|
||||||
new(map[string]interface{}),
|
new(map[string]interface{}),
|
||||||
&[]interface{}{},
|
&[]interface{}{},
|
||||||
jsonEventPledge,
|
jsonEventPledge,
|
||||||
"abi: cannot unmarshal tuple into map[string]interface {}",
|
"abi:[2] cannot unmarshal tuple in to map[string]interface {}",
|
||||||
"Can not unpack Pledge event into map",
|
"Can not unpack Pledge event into map",
|
||||||
}, {
|
}, {
|
||||||
mixedCaseData1,
|
mixedCaseData1,
|
||||||
|
|||||||
@@ -23,6 +23,24 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// FunctionType represents different types of functions a contract might have.
|
||||||
|
type FunctionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Constructor represents the constructor of the contract.
|
||||||
|
// The constructor function is called while deploying a contract.
|
||||||
|
Constructor FunctionType = iota
|
||||||
|
// Fallback represents the fallback function.
|
||||||
|
// This function is executed if no other function matches the given function
|
||||||
|
// signature and no receive function is specified.
|
||||||
|
Fallback
|
||||||
|
// Receive represents the receive function.
|
||||||
|
// This function is executed on plain Ether transfers.
|
||||||
|
Receive
|
||||||
|
// Function represents a normal function.
|
||||||
|
Function
|
||||||
|
)
|
||||||
|
|
||||||
// Method represents a callable given a `Name` and whether the method is a constant.
|
// Method represents a callable given a `Name` and whether the method is a constant.
|
||||||
// If the method is `Const` no transaction needs to be created for this
|
// If the method is `Const` no transaction needs to be created for this
|
||||||
// particular Method call. It can easily be simulated using a local VM.
|
// particular Method call. It can easily be simulated using a local VM.
|
||||||
@@ -41,50 +59,109 @@ type Method struct {
|
|||||||
// * foo(uint,uint)
|
// * foo(uint,uint)
|
||||||
// The method name of the first one will be resolved as foo while the second one
|
// The method name of the first one will be resolved as foo while the second one
|
||||||
// will be resolved as foo0.
|
// will be resolved as foo0.
|
||||||
Name string
|
Name string
|
||||||
// RawName is the raw method name parsed from ABI.
|
RawName string // RawName is the raw method name parsed from ABI
|
||||||
RawName string
|
|
||||||
Const bool
|
// Type indicates whether the method is a
|
||||||
|
// special fallback introduced in solidity v0.6.0
|
||||||
|
Type FunctionType
|
||||||
|
|
||||||
|
// StateMutability indicates the mutability state of method,
|
||||||
|
// the default value is nonpayable. It can be empty if the abi
|
||||||
|
// is generated by legacy compiler.
|
||||||
|
StateMutability string
|
||||||
|
|
||||||
|
// Legacy indicators generated by compiler before v0.6.0
|
||||||
|
Constant bool
|
||||||
|
Payable bool
|
||||||
|
|
||||||
Inputs Arguments
|
Inputs Arguments
|
||||||
Outputs Arguments
|
Outputs Arguments
|
||||||
|
str string
|
||||||
|
// Sig returns the methods string signature according to the ABI spec.
|
||||||
|
// e.g. function foo(uint32 a, int b) = "foo(uint32,int256)"
|
||||||
|
// Please note that "int" is substitute for its canonical representation "int256"
|
||||||
|
Sig string
|
||||||
|
// ID returns the canonical representation of the method's signature used by the
|
||||||
|
// abi definition to identify method names and types.
|
||||||
|
ID []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sig returns the methods string signature according to the ABI spec.
|
// NewMethod creates a new Method.
|
||||||
//
|
// A method should always be created using NewMethod.
|
||||||
// Example
|
// It also precomputes the sig representation and the string representation
|
||||||
//
|
// of the method.
|
||||||
// function foo(uint32 a, int b) = "foo(uint32,int256)"
|
func NewMethod(name string, rawName string, funType FunctionType, mutability string, isConst, isPayable bool, inputs Arguments, outputs Arguments) Method {
|
||||||
//
|
var (
|
||||||
// Please note that "int" is substitute for its canonical representation "int256"
|
types = make([]string, len(inputs))
|
||||||
func (method Method) Sig() string {
|
inputNames = make([]string, len(inputs))
|
||||||
types := make([]string, len(method.Inputs))
|
outputNames = make([]string, len(outputs))
|
||||||
for i, input := range method.Inputs {
|
)
|
||||||
|
for i, input := range inputs {
|
||||||
|
inputNames[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
|
||||||
types[i] = input.Type.String()
|
types[i] = input.Type.String()
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%v(%v)", method.RawName, strings.Join(types, ","))
|
for i, output := range outputs {
|
||||||
|
outputNames[i] = output.Type.String()
|
||||||
|
if len(output.Name) > 0 {
|
||||||
|
outputNames[i] += fmt.Sprintf(" %v", output.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// calculate the signature and method id. Note only function
|
||||||
|
// has meaningful signature and id.
|
||||||
|
var (
|
||||||
|
sig string
|
||||||
|
id []byte
|
||||||
|
)
|
||||||
|
if funType == Function {
|
||||||
|
sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
|
||||||
|
id = crypto.Keccak256([]byte(sig))[:4]
|
||||||
|
}
|
||||||
|
// Extract meaningful state mutability of solidity method.
|
||||||
|
// If it's default value, never print it.
|
||||||
|
state := mutability
|
||||||
|
if state == "nonpayable" {
|
||||||
|
state = ""
|
||||||
|
}
|
||||||
|
if state != "" {
|
||||||
|
state = state + " "
|
||||||
|
}
|
||||||
|
identity := fmt.Sprintf("function %v", rawName)
|
||||||
|
if funType == Fallback {
|
||||||
|
identity = "fallback"
|
||||||
|
} else if funType == Receive {
|
||||||
|
identity = "receive"
|
||||||
|
} else if funType == Constructor {
|
||||||
|
identity = "constructor"
|
||||||
|
}
|
||||||
|
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
|
||||||
|
|
||||||
|
return Method{
|
||||||
|
Name: name,
|
||||||
|
RawName: rawName,
|
||||||
|
Type: funType,
|
||||||
|
StateMutability: mutability,
|
||||||
|
Constant: isConst,
|
||||||
|
Payable: isPayable,
|
||||||
|
Inputs: inputs,
|
||||||
|
Outputs: outputs,
|
||||||
|
str: str,
|
||||||
|
Sig: sig,
|
||||||
|
ID: id,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (method Method) String() string {
|
func (method Method) String() string {
|
||||||
inputs := make([]string, len(method.Inputs))
|
return method.str
|
||||||
for i, input := range method.Inputs {
|
|
||||||
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
|
|
||||||
}
|
|
||||||
outputs := make([]string, len(method.Outputs))
|
|
||||||
for i, output := range method.Outputs {
|
|
||||||
outputs[i] = output.Type.String()
|
|
||||||
if len(output.Name) > 0 {
|
|
||||||
outputs[i] += fmt.Sprintf(" %v", output.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
constant := ""
|
|
||||||
if method.Const {
|
|
||||||
constant = "constant "
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.RawName, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the canonical representation of the method's signature used by the
|
// IsConstant returns the indicator whether the method is read-only.
|
||||||
// abi definition to identify method names and types.
|
func (method Method) IsConstant() bool {
|
||||||
func (method Method) ID() []byte {
|
return method.StateMutability == "view" || method.StateMutability == "pure" || method.Constant
|
||||||
return crypto.Keccak256([]byte(method.Sig()))[:4]
|
}
|
||||||
|
|
||||||
|
// IsPayable returns the indicator whether the method can process
|
||||||
|
// plain ether transfers.
|
||||||
|
func (method Method) IsPayable() bool {
|
||||||
|
return method.StateMutability == "payable" || method.Payable
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,13 +23,15 @@ import (
|
|||||||
|
|
||||||
const methoddata = `
|
const methoddata = `
|
||||||
[
|
[
|
||||||
{"type": "function", "name": "balance", "constant": true },
|
{"type": "function", "name": "balance", "stateMutability": "view"},
|
||||||
{"type": "function", "name": "send", "constant": false, "inputs": [{ "name": "amount", "type": "uint256" }]},
|
{"type": "function", "name": "send", "inputs": [{ "name": "amount", "type": "uint256" }]},
|
||||||
{"type": "function", "name": "transfer", "constant": false, "inputs": [{"name": "from", "type": "address"}, {"name": "to", "type": "address"}, {"name": "value", "type": "uint256"}], "outputs": [{"name": "success", "type": "bool"}]},
|
{"type": "function", "name": "transfer", "inputs": [{"name": "from", "type": "address"}, {"name": "to", "type": "address"}, {"name": "value", "type": "uint256"}], "outputs": [{"name": "success", "type": "bool"}]},
|
||||||
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple"}],"name":"tuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple"}],"name":"tuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[]"}],"name":"tupleSlice","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[]"}],"name":"tupleSlice","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5]"}],"name":"tupleArray","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5]"}],"name":"tupleArray","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5][]"}],"name":"complexTuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}
|
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5][]"}],"name":"complexTuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"stateMutability":"nonpayable","type":"fallback"},
|
||||||
|
{"stateMutability":"payable","type":"receive"}
|
||||||
]`
|
]`
|
||||||
|
|
||||||
func TestMethodString(t *testing.T) {
|
func TestMethodString(t *testing.T) {
|
||||||
@@ -39,7 +41,7 @@ func TestMethodString(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
method: "balance",
|
method: "balance",
|
||||||
expectation: "function balance() constant returns()",
|
expectation: "function balance() view returns()",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
method: "send",
|
method: "send",
|
||||||
@@ -65,6 +67,14 @@ func TestMethodString(t *testing.T) {
|
|||||||
method: "complexTuple",
|
method: "complexTuple",
|
||||||
expectation: "function complexTuple((uint256,uint256)[5][] a) returns()",
|
expectation: "function complexTuple((uint256,uint256)[5][] a) returns()",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
method: "fallback",
|
||||||
|
expectation: "fallback() returns()",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
method: "receive",
|
||||||
|
expectation: "receive() payable returns()",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(methoddata))
|
abi, err := JSON(strings.NewReader(methoddata))
|
||||||
@@ -73,7 +83,14 @@ func TestMethodString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
got := abi.Methods[test.method].String()
|
var got string
|
||||||
|
if test.method == "fallback" {
|
||||||
|
got = abi.Fallback.String()
|
||||||
|
} else if test.method == "receive" {
|
||||||
|
got = abi.Receive.String()
|
||||||
|
} else {
|
||||||
|
got = abi.Methods[test.method].String()
|
||||||
|
}
|
||||||
if got != test.expectation {
|
if got != test.expectation {
|
||||||
t.Errorf("expected string to be %s, got %s", test.expectation, got)
|
t.Errorf("expected string to be %s, got %s", test.expectation, got)
|
||||||
}
|
}
|
||||||
@@ -120,7 +137,7 @@ func TestMethodSig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
got := abi.Methods[test.method].Sig()
|
got := abi.Methods[test.method].Sig
|
||||||
if got != test.expect {
|
if got != test.expect {
|
||||||
t.Errorf("expected string to be %s, got %s", test.expect, got)
|
t.Errorf("expected string to be %s, got %s", test.expect, got)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,11 +69,11 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
|
|||||||
func packNum(value reflect.Value) []byte {
|
func packNum(value reflect.Value) []byte {
|
||||||
switch kind := value.Kind(); kind {
|
switch kind := value.Kind(); kind {
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
return U256(new(big.Int).SetUint64(value.Uint()))
|
return math.U256Bytes(new(big.Int).SetUint64(value.Uint()))
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
return U256(big.NewInt(value.Int()))
|
return math.U256Bytes(big.NewInt(value.Int()))
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
return U256(value.Interface().(*big.Int))
|
return math.U256Bytes(new(big.Int).Set(value.Interface().(*big.Int)))
|
||||||
default:
|
default:
|
||||||
panic("abi: fatal error")
|
panic("abi: fatal error")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,623 +18,62 @@ package abi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestPack tests the general pack/unpack tests in packing_test.go
|
||||||
func TestPack(t *testing.T) {
|
func TestPack(t *testing.T) {
|
||||||
for i, test := range []struct {
|
for i, test := range packUnpackTests {
|
||||||
typ string
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
components []ArgumentMarshaling
|
encb, err := hex.DecodeString(test.packed)
|
||||||
input interface{}
|
if err != nil {
|
||||||
output []byte
|
t.Fatalf("invalid hex %s: %v", test.packed, err)
|
||||||
}{
|
}
|
||||||
{
|
inDef := fmt.Sprintf(`[{ "name" : "method", "type": "function", "inputs": %s}]`, test.def)
|
||||||
"uint8",
|
inAbi, err := JSON(strings.NewReader(inDef))
|
||||||
nil,
|
if err != nil {
|
||||||
uint8(2),
|
t.Fatalf("invalid ABI definition %s, %v", inDef, err)
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
}
|
||||||
},
|
var packed []byte
|
||||||
{
|
if reflect.TypeOf(test.unpacked).Kind() != reflect.Struct {
|
||||||
"uint8[]",
|
packed, err = inAbi.Pack("method", test.unpacked)
|
||||||
nil,
|
} else {
|
||||||
[]uint8{1, 2},
|
// if want is a struct we need to use the components.
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
elem := reflect.ValueOf(test.unpacked)
|
||||||
},
|
var values []interface{}
|
||||||
{
|
for i := 0; i < elem.NumField(); i++ {
|
||||||
"uint16",
|
field := elem.Field(i)
|
||||||
nil,
|
values = append(values, field.Interface())
|
||||||
uint16(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"uint16[]",
|
|
||||||
nil,
|
|
||||||
[]uint16{1, 2},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"uint32",
|
|
||||||
nil,
|
|
||||||
uint32(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"uint32[]",
|
|
||||||
nil,
|
|
||||||
[]uint32{1, 2},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"uint64",
|
|
||||||
nil,
|
|
||||||
uint64(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"uint64[]",
|
|
||||||
nil,
|
|
||||||
[]uint64{1, 2},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"uint256",
|
|
||||||
nil,
|
|
||||||
big.NewInt(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"uint256[]",
|
|
||||||
nil,
|
|
||||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int8",
|
|
||||||
nil,
|
|
||||||
int8(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int8[]",
|
|
||||||
nil,
|
|
||||||
[]int8{1, 2},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int16",
|
|
||||||
nil,
|
|
||||||
int16(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int16[]",
|
|
||||||
nil,
|
|
||||||
[]int16{1, 2},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int32",
|
|
||||||
nil,
|
|
||||||
int32(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int32[]",
|
|
||||||
nil,
|
|
||||||
[]int32{1, 2},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int64",
|
|
||||||
nil,
|
|
||||||
int64(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int64[]",
|
|
||||||
nil,
|
|
||||||
[]int64{1, 2},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int256",
|
|
||||||
nil,
|
|
||||||
big.NewInt(2),
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"int256[]",
|
|
||||||
nil,
|
|
||||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes1",
|
|
||||||
nil,
|
|
||||||
[1]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes2",
|
|
||||||
nil,
|
|
||||||
[2]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes3",
|
|
||||||
nil,
|
|
||||||
[3]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes4",
|
|
||||||
nil,
|
|
||||||
[4]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes5",
|
|
||||||
nil,
|
|
||||||
[5]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes6",
|
|
||||||
nil,
|
|
||||||
[6]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes7",
|
|
||||||
nil,
|
|
||||||
[7]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes8",
|
|
||||||
nil,
|
|
||||||
[8]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes9",
|
|
||||||
nil,
|
|
||||||
[9]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes10",
|
|
||||||
nil,
|
|
||||||
[10]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes11",
|
|
||||||
nil,
|
|
||||||
[11]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes12",
|
|
||||||
nil,
|
|
||||||
[12]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes13",
|
|
||||||
nil,
|
|
||||||
[13]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes14",
|
|
||||||
nil,
|
|
||||||
[14]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes15",
|
|
||||||
nil,
|
|
||||||
[15]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes16",
|
|
||||||
nil,
|
|
||||||
[16]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes17",
|
|
||||||
nil,
|
|
||||||
[17]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes18",
|
|
||||||
nil,
|
|
||||||
[18]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes19",
|
|
||||||
nil,
|
|
||||||
[19]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes20",
|
|
||||||
nil,
|
|
||||||
[20]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes21",
|
|
||||||
nil,
|
|
||||||
[21]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes22",
|
|
||||||
nil,
|
|
||||||
[22]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes23",
|
|
||||||
nil,
|
|
||||||
[23]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes24",
|
|
||||||
nil,
|
|
||||||
[24]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes25",
|
|
||||||
nil,
|
|
||||||
[25]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes26",
|
|
||||||
nil,
|
|
||||||
[26]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes27",
|
|
||||||
nil,
|
|
||||||
[27]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes28",
|
|
||||||
nil,
|
|
||||||
[28]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes29",
|
|
||||||
nil,
|
|
||||||
[29]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes30",
|
|
||||||
nil,
|
|
||||||
[30]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes31",
|
|
||||||
nil,
|
|
||||||
[31]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes32",
|
|
||||||
nil,
|
|
||||||
[32]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"uint32[2][3][4]",
|
|
||||||
nil,
|
|
||||||
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"address[]",
|
|
||||||
nil,
|
|
||||||
[]common.Address{{1}, {2}},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes32[]",
|
|
||||||
nil,
|
|
||||||
[]common.Hash{{1}, {2}},
|
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"function",
|
|
||||||
nil,
|
|
||||||
[24]byte{1},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"string",
|
|
||||||
nil,
|
|
||||||
"foobar",
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"string[]",
|
|
||||||
nil,
|
|
||||||
[]string{"hello", "foobar"},
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
|
|
||||||
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
|
|
||||||
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"string[2]",
|
|
||||||
nil,
|
|
||||||
[]string{"hello", "foobar"},
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
|
|
||||||
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
|
|
||||||
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes32[][]",
|
|
||||||
nil,
|
|
||||||
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
|
||||||
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
|
|
||||||
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
|
||||||
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
|
|
||||||
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
|
||||||
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
|
||||||
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
"bytes32[][2]",
|
|
||||||
nil,
|
|
||||||
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
|
||||||
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
|
|
||||||
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
|
||||||
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
|
|
||||||
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
|
||||||
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
|
||||||
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
"bytes32[3][2]",
|
|
||||||
nil,
|
|
||||||
[][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
|
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
|
||||||
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
|
||||||
"0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
|
|
||||||
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
|
||||||
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
|
||||||
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// static tuple
|
|
||||||
"tuple",
|
|
||||||
[]ArgumentMarshaling{
|
|
||||||
{Name: "a", Type: "int64"},
|
|
||||||
{Name: "b", Type: "int256"},
|
|
||||||
{Name: "c", Type: "int256"},
|
|
||||||
{Name: "d", Type: "bool"},
|
|
||||||
{Name: "e", Type: "bytes32[3][2]"},
|
|
||||||
},
|
|
||||||
struct {
|
|
||||||
A int64
|
|
||||||
B *big.Int
|
|
||||||
C *big.Int
|
|
||||||
D bool
|
|
||||||
E [][]common.Hash
|
|
||||||
}{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
|
||||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
|
|
||||||
"0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
|
|
||||||
"0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
|
|
||||||
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
|
|
||||||
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
|
|
||||||
"0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
|
|
||||||
"0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// dynamic tuple
|
|
||||||
"tuple",
|
|
||||||
[]ArgumentMarshaling{
|
|
||||||
{Name: "a", Type: "string"},
|
|
||||||
{Name: "b", Type: "int64"},
|
|
||||||
{Name: "c", Type: "bytes"},
|
|
||||||
{Name: "d", Type: "string[]"},
|
|
||||||
{Name: "e", Type: "int256[]"},
|
|
||||||
{Name: "f", Type: "address[]"},
|
|
||||||
},
|
|
||||||
struct {
|
|
||||||
FieldA string `abi:"a"` // Test whether abi tag works
|
|
||||||
FieldB int64 `abi:"b"`
|
|
||||||
C []byte
|
|
||||||
D []string
|
|
||||||
E []*big.Int
|
|
||||||
F []common.Address
|
|
||||||
}{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
|
|
||||||
common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
|
|
||||||
"666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
|
|
||||||
"0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000003" + // foo length
|
|
||||||
"666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
|
|
||||||
"6261720000000000000000000000000000000000000000000000000000000000" + // bar
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // 1
|
|
||||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
|
|
||||||
"0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
|
|
||||||
"0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// nested tuple
|
|
||||||
"tuple",
|
|
||||||
[]ArgumentMarshaling{
|
|
||||||
{Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}},
|
|
||||||
{Name: "b", Type: "int256[]"},
|
|
||||||
},
|
|
||||||
struct {
|
|
||||||
A struct {
|
|
||||||
FieldA *big.Int `abi:"a"`
|
|
||||||
B []*big.Int
|
|
||||||
}
|
}
|
||||||
B []*big.Int
|
packed, err = inAbi.Pack("method", values...)
|
||||||
}{
|
}
|
||||||
A: struct {
|
|
||||||
FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
|
|
||||||
B []*big.Int
|
|
||||||
}{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
|
||||||
B: []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset
|
|
||||||
"00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // b length
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// tuple slice
|
|
||||||
"tuple[]",
|
|
||||||
[]ArgumentMarshaling{
|
|
||||||
{Name: "a", Type: "int256"},
|
|
||||||
{Name: "b", Type: "int256[]"},
|
|
||||||
},
|
|
||||||
[]struct {
|
|
||||||
A *big.Int
|
|
||||||
B []*big.Int
|
|
||||||
}{
|
|
||||||
{big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
|
||||||
{big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
|
|
||||||
},
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
|
|
||||||
"00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
|
|
||||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
|
|
||||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// static tuple array
|
|
||||||
"tuple[2]",
|
|
||||||
[]ArgumentMarshaling{
|
|
||||||
{Name: "a", Type: "int256"},
|
|
||||||
{Name: "b", Type: "int256"},
|
|
||||||
},
|
|
||||||
[2]struct {
|
|
||||||
A *big.Int
|
|
||||||
B *big.Int
|
|
||||||
}{
|
|
||||||
{big.NewInt(-1), big.NewInt(1)},
|
|
||||||
{big.NewInt(1), big.NewInt(-1)},
|
|
||||||
},
|
|
||||||
common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
|
|
||||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// dynamic tuple array
|
|
||||||
"tuple[2]",
|
|
||||||
[]ArgumentMarshaling{
|
|
||||||
{Name: "a", Type: "int256[]"},
|
|
||||||
},
|
|
||||||
[2]struct {
|
|
||||||
A []*big.Int
|
|
||||||
}{
|
|
||||||
{[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
|
|
||||||
{[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
|
|
||||||
},
|
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
|
|
||||||
"00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
|
|
||||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
|
|
||||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
|
|
||||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
typ, err := NewType(test.typ, "", test.components)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
|
||||||
}
|
|
||||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(output, test.output) {
|
if err != nil {
|
||||||
t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
|
t.Fatalf("test %d (%v) failed: %v", i, test.def, err)
|
||||||
}
|
}
|
||||||
|
if !reflect.DeepEqual(packed[4:], encb) {
|
||||||
|
t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, encb, packed[4:])
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodPack(t *testing.T) {
|
func TestMethodPack(t *testing.T) {
|
||||||
abi, err := JSON(strings.NewReader(jsondata2))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sig := abi.Methods["slice"].ID()
|
sig := abi.Methods["slice"].ID
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
|
|
||||||
@@ -648,7 +87,7 @@ func TestMethodPack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||||
sig = abi.Methods["sliceAddress"].ID()
|
sig = abi.Methods["sliceAddress"].ID
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||||
@@ -663,7 +102,7 @@ func TestMethodPack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||||
sig = abi.Methods["sliceMultiAddress"].ID()
|
sig = abi.Methods["sliceMultiAddress"].ID
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
@@ -681,7 +120,7 @@ func TestMethodPack(t *testing.T) {
|
|||||||
t.Errorf("expected %x got %x", sig, packed)
|
t.Errorf("expected %x got %x", sig, packed)
|
||||||
}
|
}
|
||||||
|
|
||||||
sig = abi.Methods["slice256"].ID()
|
sig = abi.Methods["slice256"].ID
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
|
|
||||||
@@ -695,7 +134,7 @@ func TestMethodPack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
|
a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
|
||||||
sig = abi.Methods["nestedArray"].ID()
|
sig = abi.Methods["nestedArray"].ID
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
@@ -712,7 +151,7 @@ func TestMethodPack(t *testing.T) {
|
|||||||
t.Errorf("expected %x got %x", sig, packed)
|
t.Errorf("expected %x got %x", sig, packed)
|
||||||
}
|
}
|
||||||
|
|
||||||
sig = abi.Methods["nestedArray2"].ID()
|
sig = abi.Methods["nestedArray2"].ID
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
|
||||||
@@ -728,7 +167,7 @@ func TestMethodPack(t *testing.T) {
|
|||||||
t.Errorf("expected %x got %x", sig, packed)
|
t.Errorf("expected %x got %x", sig, packed)
|
||||||
}
|
}
|
||||||
|
|
||||||
sig = abi.Methods["nestedSlice"].ID()
|
sig = abi.Methods["nestedSlice"].ID
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
|
||||||
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
|
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
|
||||||
|
|||||||
988
accounts/abi/packing_test.go
Normal file
988
accounts/abi/packing_test.go
Normal file
@@ -0,0 +1,988 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
type packUnpackTest struct {
|
||||||
|
def string
|
||||||
|
unpacked interface{}
|
||||||
|
packed string
|
||||||
|
}
|
||||||
|
|
||||||
|
var packUnpackTests = []packUnpackTest{
|
||||||
|
// Booleans
|
||||||
|
{
|
||||||
|
def: `[{ "type": "bool" }]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{ "type": "bool" }]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: false,
|
||||||
|
},
|
||||||
|
// Integers
|
||||||
|
{
|
||||||
|
def: `[{ "type": "uint8" }]`,
|
||||||
|
unpacked: uint8(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{ "type": "uint8[]" }]`,
|
||||||
|
unpacked: []uint8{1, 2},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{ "type": "uint16" }]`,
|
||||||
|
unpacked: uint16(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{ "type": "uint16[]" }]`,
|
||||||
|
unpacked: []uint16{1, 2},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint17"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: big.NewInt(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint32"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: uint32(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint32[]"}]`,
|
||||||
|
unpacked: []uint32{1, 2},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint64"}]`,
|
||||||
|
unpacked: uint64(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint64[]"}]`,
|
||||||
|
unpacked: []uint64{1, 2},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint256"}]`,
|
||||||
|
unpacked: big.NewInt(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint256[]"}]`,
|
||||||
|
unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int8"}]`,
|
||||||
|
unpacked: int8(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int8[]"}]`,
|
||||||
|
unpacked: []int8{1, 2},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int16"}]`,
|
||||||
|
unpacked: int16(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int16[]"}]`,
|
||||||
|
unpacked: []int16{1, 2},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int17"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: big.NewInt(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int32"}]`,
|
||||||
|
unpacked: int32(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int32"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: int32(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int32[]"}]`,
|
||||||
|
unpacked: []int32{1, 2},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int64"}]`,
|
||||||
|
unpacked: int64(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int64[]"}]`,
|
||||||
|
unpacked: []int64{1, 2},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int256"}]`,
|
||||||
|
unpacked: big.NewInt(2),
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int256"}]`,
|
||||||
|
packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||||
|
unpacked: big.NewInt(-1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int256[]"}]`,
|
||||||
|
unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
},
|
||||||
|
// Address
|
||||||
|
{
|
||||||
|
def: `[{"type": "address"}]`,
|
||||||
|
packed: "0000000000000000000000000100000000000000000000000000000000000000",
|
||||||
|
unpacked: common.Address{1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "address[]"}]`,
|
||||||
|
unpacked: []common.Address{{1}, {2}},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000100000000000000000000000000000000000000" +
|
||||||
|
"0000000000000000000000000200000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
// Bytes
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes1"}]`,
|
||||||
|
unpacked: [1]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes2"}]`,
|
||||||
|
unpacked: [2]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes3"}]`,
|
||||||
|
unpacked: [3]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes4"}]`,
|
||||||
|
unpacked: [4]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes5"}]`,
|
||||||
|
unpacked: [5]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes6"}]`,
|
||||||
|
unpacked: [6]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes7"}]`,
|
||||||
|
unpacked: [7]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes8"}]`,
|
||||||
|
unpacked: [8]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes9"}]`,
|
||||||
|
unpacked: [9]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes10"}]`,
|
||||||
|
unpacked: [10]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes11"}]`,
|
||||||
|
unpacked: [11]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes12"}]`,
|
||||||
|
unpacked: [12]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes13"}]`,
|
||||||
|
unpacked: [13]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes14"}]`,
|
||||||
|
unpacked: [14]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes15"}]`,
|
||||||
|
unpacked: [15]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes16"}]`,
|
||||||
|
unpacked: [16]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes17"}]`,
|
||||||
|
unpacked: [17]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes18"}]`,
|
||||||
|
unpacked: [18]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes19"}]`,
|
||||||
|
unpacked: [19]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes20"}]`,
|
||||||
|
unpacked: [20]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes21"}]`,
|
||||||
|
unpacked: [21]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes22"}]`,
|
||||||
|
unpacked: [22]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes23"}]`,
|
||||||
|
unpacked: [23]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes24"}]`,
|
||||||
|
unpacked: [24]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes25"}]`,
|
||||||
|
unpacked: [25]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes26"}]`,
|
||||||
|
unpacked: [26]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes27"}]`,
|
||||||
|
unpacked: [27]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes28"}]`,
|
||||||
|
unpacked: [28]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes29"}]`,
|
||||||
|
unpacked: [29]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes30"}]`,
|
||||||
|
unpacked: [30]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes31"}]`,
|
||||||
|
unpacked: [31]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes32"}]`,
|
||||||
|
unpacked: [32]byte{1},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes32"}]`,
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes32"}]`,
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||||
|
},
|
||||||
|
// Functions
|
||||||
|
{
|
||||||
|
def: `[{"type": "function"}]`,
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: [24]byte{1},
|
||||||
|
},
|
||||||
|
// Slice and Array
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []uint8{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: []uint8{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint256[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: []*big.Int{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2]uint8{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int8[2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2]int8{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int16[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []int16{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int16[2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2]int16{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int32[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []int32{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int32[2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2]int32{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int64[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []int64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int64[2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2]int64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int256[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "int256[3]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003",
|
||||||
|
unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||||
|
},
|
||||||
|
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[][]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: [][]uint8{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[][]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" +
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000a0" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [][]uint8{{1, 2}, {1, 2}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[][]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" +
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000a0" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003",
|
||||||
|
unpacked: [][]uint8{{1, 2}, {1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[2][2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2][2]uint8{{1, 2}, {1, 2}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[][2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000060" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: [2][]uint8{{}, {}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[][2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: [2][]uint8{{1}, {1}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[2][]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: [][2]uint8{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[2][]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [][2]uint8{{1, 2}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[2][]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [][2]uint8{{1, 2}, {1, 2}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint16[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []uint16{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint16[2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2]uint16{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint32[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []uint32{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint32[2][3][4]"}]`,
|
||||||
|
unpacked: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000004" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000005" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000006" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000007" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000008" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000009" +
|
||||||
|
"000000000000000000000000000000000000000000000000000000000000000a" +
|
||||||
|
"000000000000000000000000000000000000000000000000000000000000000b" +
|
||||||
|
"000000000000000000000000000000000000000000000000000000000000000c" +
|
||||||
|
"000000000000000000000000000000000000000000000000000000000000000d" +
|
||||||
|
"000000000000000000000000000000000000000000000000000000000000000e" +
|
||||||
|
"000000000000000000000000000000000000000000000000000000000000000f" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000010" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000011" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000012" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000013" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000014" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000015" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000016" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000017" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000018",
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes32[]"}]`,
|
||||||
|
unpacked: []common.Hash{{1}, {2}},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" +
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint32[2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2]uint32{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint64[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []uint64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint64[2]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: [2]uint64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint256[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint256[3]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003",
|
||||||
|
unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "string[4]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" +
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000c0" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000100" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000140" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000005" +
|
||||||
|
"48656c6c6f000000000000000000000000000000000000000000000000000000" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000005" +
|
||||||
|
"576f726c64000000000000000000000000000000000000000000000000000000" +
|
||||||
|
"000000000000000000000000000000000000000000000000000000000000000b" +
|
||||||
|
"476f2d657468657265756d000000000000000000000000000000000000000000" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000008" +
|
||||||
|
"457468657265756d000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "string[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000008" +
|
||||||
|
"457468657265756d000000000000000000000000000000000000000000000000" +
|
||||||
|
"000000000000000000000000000000000000000000000000000000000000000b" +
|
||||||
|
"676f2d657468657265756d000000000000000000000000000000000000000000",
|
||||||
|
unpacked: []string{"Ethereum", "go-ethereum"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes[]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" +
|
||||||
|
"f0f0f00000000000000000000000000000000000000000000000000000000000" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" +
|
||||||
|
"f0f0f00000000000000000000000000000000000000000000000000000000000",
|
||||||
|
unpacked: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint256[2][][]"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" +
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000e0" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000c8" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000003e8" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000c8" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000003e8",
|
||||||
|
unpacked: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
|
||||||
|
},
|
||||||
|
// struct outputs
|
||||||
|
{
|
||||||
|
def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: struct {
|
||||||
|
Int1 *big.Int
|
||||||
|
Int2 *big.Int
|
||||||
|
}{big.NewInt(1), big.NewInt(2)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one","type":"int256"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int__one","type":"int256"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one_","type":"int256"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
unpacked: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
unpacked: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
Intone *big.Int
|
||||||
|
}{big.NewInt(1), big.NewInt(2)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "string"}]`,
|
||||||
|
unpacked: "foobar",
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000006" +
|
||||||
|
"666f6f6261720000000000000000000000000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "string[]"}]`,
|
||||||
|
unpacked: []string{"hello", "foobar"},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
|
||||||
|
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
|
||||||
|
"666f6f6261720000000000000000000000000000000000000000000000000000", // str[1]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "string[2]"}]`,
|
||||||
|
unpacked: [2]string{"hello", "foobar"},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
|
||||||
|
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
|
||||||
|
"666f6f6261720000000000000000000000000000000000000000000000000000", // str[1]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes32[][]"}]`,
|
||||||
|
unpacked: [][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||||
|
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||||
|
"0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes32[][2]"}]`,
|
||||||
|
unpacked: [2][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||||
|
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||||
|
"0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes32[3][2]"}]`,
|
||||||
|
unpacked: [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
|
||||||
|
packed: "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||||
|
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||||
|
"0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// static tuple
|
||||||
|
def: `[{"name":"a","type":"int64"},
|
||||||
|
{"name":"b","type":"int256"},
|
||||||
|
{"name":"c","type":"int256"},
|
||||||
|
{"name":"d","type":"bool"},
|
||||||
|
{"name":"e","type":"bytes32[3][2]"}]`,
|
||||||
|
unpacked: struct {
|
||||||
|
A int64
|
||||||
|
B *big.Int
|
||||||
|
C *big.Int
|
||||||
|
D bool
|
||||||
|
E [2][3][32]byte
|
||||||
|
}{1, big.NewInt(1), big.NewInt(-1), true, [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
|
||||||
|
"0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
|
||||||
|
"0500000000000000000000000000000000000000000000000000000000000000", // struct[e] array[1][2]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"a","type":"string"},
|
||||||
|
{"name":"b","type":"int64"},
|
||||||
|
{"name":"c","type":"bytes"},
|
||||||
|
{"name":"d","type":"string[]"},
|
||||||
|
{"name":"e","type":"int256[]"},
|
||||||
|
{"name":"f","type":"address[]"}]`,
|
||||||
|
unpacked: struct {
|
||||||
|
FieldA string `abi:"a"` // Test whether abi tag works
|
||||||
|
FieldB int64 `abi:"b"`
|
||||||
|
C []byte
|
||||||
|
D []string
|
||||||
|
E []*big.Int
|
||||||
|
F []common.Address
|
||||||
|
}{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
|
||||||
|
packed: "00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
|
||||||
|
"666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // foo length
|
||||||
|
"666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
|
||||||
|
"6261720000000000000000000000000000000000000000000000000000000000" + // bar
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // 1
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
|
||||||
|
"0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
|
||||||
|
"0000000000000000000000000200000000000000000000000000000000000000", // common.Address{2}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"components": [{"name": "a","type": "uint256"},
|
||||||
|
{"name": "b","type": "uint256[]"}],
|
||||||
|
"name": "a","type": "tuple"},
|
||||||
|
{"name": "b","type": "uint256[]"}]`,
|
||||||
|
unpacked: struct {
|
||||||
|
A struct {
|
||||||
|
FieldA *big.Int `abi:"a"`
|
||||||
|
B []*big.Int
|
||||||
|
}
|
||||||
|
B []*big.Int
|
||||||
|
}{
|
||||||
|
A: struct {
|
||||||
|
FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
|
||||||
|
B []*big.Int
|
||||||
|
}{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(2)}},
|
||||||
|
B: []*big.Int{big.NewInt(1), big.NewInt(2)}},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000040" + // a offset
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b[1] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // b length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // b[1] value
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
def: `[{"components": [{"name": "a","type": "int256"},
|
||||||
|
{"name": "b","type": "int256[]"}],
|
||||||
|
"name": "a","type": "tuple[]"}]`,
|
||||||
|
unpacked: []struct {
|
||||||
|
A *big.Int
|
||||||
|
B []*big.Int
|
||||||
|
}{
|
||||||
|
{big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(3)}},
|
||||||
|
{big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
|
||||||
|
},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // tuple[0].B[1] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].B[1] value
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"components": [{"name": "a","type": "int256"},
|
||||||
|
{"name": "b","type": "int256"}],
|
||||||
|
"name": "a","type": "tuple[2]"}]`,
|
||||||
|
unpacked: [2]struct {
|
||||||
|
A *big.Int
|
||||||
|
B *big.Int
|
||||||
|
}{
|
||||||
|
{big.NewInt(-1), big.NewInt(1)},
|
||||||
|
{big.NewInt(1), big.NewInt(-1)},
|
||||||
|
},
|
||||||
|
packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].b
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"components": [{"name": "a","type": "int256[]"}],
|
||||||
|
"name": "a","type": "tuple[2]"}]`,
|
||||||
|
unpacked: [2]struct {
|
||||||
|
A []*big.Int
|
||||||
|
}{
|
||||||
|
{[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
|
||||||
|
{[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
|
||||||
|
},
|
||||||
|
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].A[1]
|
||||||
|
},
|
||||||
|
}
|
||||||
@@ -17,7 +17,9 @@
|
|||||||
package abi
|
package abi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@@ -25,46 +27,38 @@ import (
|
|||||||
// indirect recursively dereferences the value until it either gets the value
|
// indirect recursively dereferences the value until it either gets the value
|
||||||
// or finds a big.Int
|
// or finds a big.Int
|
||||||
func indirect(v reflect.Value) reflect.Value {
|
func indirect(v reflect.Value) reflect.Value {
|
||||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbigT {
|
if v.Kind() == reflect.Ptr && v.Elem().Type() != reflect.TypeOf(big.Int{}) {
|
||||||
return indirect(v.Elem())
|
return indirect(v.Elem())
|
||||||
}
|
}
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
// indirectInterfaceOrPtr recursively dereferences the value until value is not interface.
|
// reflectIntType returns the reflect using the given size and
|
||||||
func indirectInterfaceOrPtr(v reflect.Value) reflect.Value {
|
|
||||||
if (v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr) && v.Elem().IsValid() {
|
|
||||||
return indirect(v.Elem())
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// reflectIntKind returns the reflect using the given size and
|
|
||||||
// unsignedness.
|
// unsignedness.
|
||||||
func reflectIntKindAndType(unsigned bool, size int) (reflect.Kind, reflect.Type) {
|
func reflectIntType(unsigned bool, size int) reflect.Type {
|
||||||
|
if unsigned {
|
||||||
|
switch size {
|
||||||
|
case 8:
|
||||||
|
return reflect.TypeOf(uint8(0))
|
||||||
|
case 16:
|
||||||
|
return reflect.TypeOf(uint16(0))
|
||||||
|
case 32:
|
||||||
|
return reflect.TypeOf(uint32(0))
|
||||||
|
case 64:
|
||||||
|
return reflect.TypeOf(uint64(0))
|
||||||
|
}
|
||||||
|
}
|
||||||
switch size {
|
switch size {
|
||||||
case 8:
|
case 8:
|
||||||
if unsigned {
|
return reflect.TypeOf(int8(0))
|
||||||
return reflect.Uint8, uint8T
|
|
||||||
}
|
|
||||||
return reflect.Int8, int8T
|
|
||||||
case 16:
|
case 16:
|
||||||
if unsigned {
|
return reflect.TypeOf(int16(0))
|
||||||
return reflect.Uint16, uint16T
|
|
||||||
}
|
|
||||||
return reflect.Int16, int16T
|
|
||||||
case 32:
|
case 32:
|
||||||
if unsigned {
|
return reflect.TypeOf(int32(0))
|
||||||
return reflect.Uint32, uint32T
|
|
||||||
}
|
|
||||||
return reflect.Int32, int32T
|
|
||||||
case 64:
|
case 64:
|
||||||
if unsigned {
|
return reflect.TypeOf(int64(0))
|
||||||
return reflect.Uint64, uint64T
|
|
||||||
}
|
|
||||||
return reflect.Int64, int64T
|
|
||||||
}
|
}
|
||||||
return reflect.Ptr, bigT
|
return reflect.TypeOf(&big.Int{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
|
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
|
||||||
@@ -84,12 +78,16 @@ func set(dst, src reflect.Value) error {
|
|||||||
switch {
|
switch {
|
||||||
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid():
|
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid():
|
||||||
return set(dst.Elem(), src)
|
return set(dst.Elem(), src)
|
||||||
case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT:
|
case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
|
||||||
return set(dst.Elem(), src)
|
return set(dst.Elem(), src)
|
||||||
case srcType.AssignableTo(dstType) && dst.CanSet():
|
case srcType.AssignableTo(dstType) && dst.CanSet():
|
||||||
dst.Set(src)
|
dst.Set(src)
|
||||||
case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice:
|
case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice && dst.CanSet():
|
||||||
return setSlice(dst, src)
|
return setSlice(dst, src)
|
||||||
|
case dstType.Kind() == reflect.Array:
|
||||||
|
return setArray(dst, src)
|
||||||
|
case dstType.Kind() == reflect.Struct:
|
||||||
|
return setStruct(dst, src)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||||
}
|
}
|
||||||
@@ -98,38 +96,56 @@ func set(dst, src reflect.Value) error {
|
|||||||
|
|
||||||
// setSlice attempts to assign src to dst when slices are not assignable by default
|
// setSlice attempts to assign src to dst when slices are not assignable by default
|
||||||
// e.g. src: [][]byte -> dst: [][15]byte
|
// e.g. src: [][]byte -> dst: [][15]byte
|
||||||
|
// setSlice ignores if we cannot copy all of src' elements.
|
||||||
func setSlice(dst, src reflect.Value) error {
|
func setSlice(dst, src reflect.Value) error {
|
||||||
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
|
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
|
||||||
for i := 0; i < src.Len(); i++ {
|
for i := 0; i < src.Len(); i++ {
|
||||||
v := src.Index(i)
|
if src.Index(i).Kind() == reflect.Struct {
|
||||||
reflect.Copy(slice.Index(i), v)
|
if err := set(slice.Index(i), src.Index(i)); err != nil {
|
||||||
}
|
return err
|
||||||
|
}
|
||||||
dst.Set(slice)
|
} else {
|
||||||
return nil
|
// e.g. [][32]uint8 to []common.Hash
|
||||||
}
|
if err := set(slice.Index(i), src.Index(i)); err != nil {
|
||||||
|
return err
|
||||||
// requireAssignable assures that `dest` is a pointer and it's not an interface.
|
}
|
||||||
func requireAssignable(dst, src reflect.Value) error {
|
}
|
||||||
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
|
}
|
||||||
return fmt.Errorf("abi: cannot unmarshal %v into %v", src.Type(), dst.Type())
|
if dst.CanSet() {
|
||||||
}
|
dst.Set(slice)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return errors.New("Cannot set slice, destination not settable")
|
||||||
// requireUnpackKind verifies preconditions for unpacking `args` into `kind`
|
}
|
||||||
func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
|
||||||
args Arguments) error {
|
func setArray(dst, src reflect.Value) error {
|
||||||
|
array := reflect.New(dst.Type()).Elem()
|
||||||
switch k {
|
min := src.Len()
|
||||||
case reflect.Struct:
|
if src.Len() > dst.Len() {
|
||||||
case reflect.Slice, reflect.Array:
|
min = dst.Len()
|
||||||
if minLen := args.LengthNonIndexed(); v.Len() < minLen {
|
}
|
||||||
return fmt.Errorf("abi: insufficient number of elements in the list/array for unpack, want %d, got %d",
|
for i := 0; i < min; i++ {
|
||||||
minLen, v.Len())
|
if err := set(array.Index(i), src.Index(i)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dst.CanSet() {
|
||||||
|
dst.Set(array)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.New("Cannot set array, destination not settable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func setStruct(dst, src reflect.Value) error {
|
||||||
|
for i := 0; i < src.NumField(); i++ {
|
||||||
|
srcField := src.Field(i)
|
||||||
|
dstField := dst.Field(i)
|
||||||
|
if !dstField.IsValid() || !srcField.IsValid() {
|
||||||
|
return fmt.Errorf("Could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
|
||||||
|
}
|
||||||
|
if err := set(dstField, srcField); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
return fmt.Errorf("abi: cannot unmarshal tuple into %v", t)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -156,9 +172,8 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// skip fields that have no abi:"" tag.
|
// skip fields that have no abi:"" tag.
|
||||||
var ok bool
|
tagName, ok := typ.Field(i).Tag.Lookup("abi")
|
||||||
var tagName string
|
if !ok {
|
||||||
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// check if tag is empty.
|
// check if tag is empty.
|
||||||
|
|||||||
173
accounts/abi/topics.go
Normal file
173
accounts/abi/topics.go
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MakeTopics converts a filter query argument list into a filter topic set.
|
||||||
|
func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
||||||
|
topics := make([][]common.Hash, len(query))
|
||||||
|
for i, filter := range query {
|
||||||
|
for _, rule := range filter {
|
||||||
|
var topic common.Hash
|
||||||
|
|
||||||
|
// Try to generate the topic based on simple types
|
||||||
|
switch rule := rule.(type) {
|
||||||
|
case common.Hash:
|
||||||
|
copy(topic[:], rule[:])
|
||||||
|
case common.Address:
|
||||||
|
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||||
|
case *big.Int:
|
||||||
|
blob := rule.Bytes()
|
||||||
|
copy(topic[common.HashLength-len(blob):], blob)
|
||||||
|
case bool:
|
||||||
|
if rule {
|
||||||
|
topic[common.HashLength-1] = 1
|
||||||
|
}
|
||||||
|
case int8:
|
||||||
|
copy(topic[:], genIntType(int64(rule), 1))
|
||||||
|
case int16:
|
||||||
|
copy(topic[:], genIntType(int64(rule), 2))
|
||||||
|
case int32:
|
||||||
|
copy(topic[:], genIntType(int64(rule), 4))
|
||||||
|
case int64:
|
||||||
|
copy(topic[:], genIntType(rule, 8))
|
||||||
|
case uint8:
|
||||||
|
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||||
|
copy(topic[common.HashLength-len(blob):], blob)
|
||||||
|
case uint16:
|
||||||
|
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||||
|
copy(topic[common.HashLength-len(blob):], blob)
|
||||||
|
case uint32:
|
||||||
|
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||||
|
copy(topic[common.HashLength-len(blob):], blob)
|
||||||
|
case uint64:
|
||||||
|
blob := new(big.Int).SetUint64(rule).Bytes()
|
||||||
|
copy(topic[common.HashLength-len(blob):], blob)
|
||||||
|
case string:
|
||||||
|
hash := crypto.Keccak256Hash([]byte(rule))
|
||||||
|
copy(topic[:], hash[:])
|
||||||
|
case []byte:
|
||||||
|
hash := crypto.Keccak256Hash(rule)
|
||||||
|
copy(topic[:], hash[:])
|
||||||
|
|
||||||
|
default:
|
||||||
|
// todo(rjl493456442) according solidity documentation, indexed event
|
||||||
|
// parameters that are not value types i.e. arrays and structs are not
|
||||||
|
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||||
|
//
|
||||||
|
// We only convert stringS and bytes to hash, still need to deal with
|
||||||
|
// array(both fixed-size and dynamic-size) and struct.
|
||||||
|
|
||||||
|
// Attempt to generate the topic from funky types
|
||||||
|
val := reflect.ValueOf(rule)
|
||||||
|
switch {
|
||||||
|
// static byte array
|
||||||
|
case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8:
|
||||||
|
reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported indexed type: %T", rule)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
topics[i] = append(topics[i], topic)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return topics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func genIntType(rule int64, size uint) []byte {
|
||||||
|
var topic [common.HashLength]byte
|
||||||
|
if rule < 0 {
|
||||||
|
// if a rule is negative, we need to put it into two's complement.
|
||||||
|
// extended to common.Hashlength bytes.
|
||||||
|
topic = [common.HashLength]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
|
||||||
|
}
|
||||||
|
for i := uint(0); i < size; i++ {
|
||||||
|
topic[common.HashLength-i-1] = byte(rule >> (i * 8))
|
||||||
|
}
|
||||||
|
return topic[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseTopics converts the indexed topic fields into actual log field values.
|
||||||
|
func ParseTopics(out interface{}, fields Arguments, topics []common.Hash) error {
|
||||||
|
return parseTopicWithSetter(fields, topics,
|
||||||
|
func(arg Argument, reconstr interface{}) {
|
||||||
|
field := reflect.ValueOf(out).Elem().FieldByName(ToCamelCase(arg.Name))
|
||||||
|
field.Set(reflect.ValueOf(reconstr))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs
|
||||||
|
func ParseTopicsIntoMap(out map[string]interface{}, fields Arguments, topics []common.Hash) error {
|
||||||
|
return parseTopicWithSetter(fields, topics,
|
||||||
|
func(arg Argument, reconstr interface{}) {
|
||||||
|
out[arg.Name] = reconstr
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTopicWithSetter converts the indexed topic field-value pairs and stores them using the
|
||||||
|
// provided set function.
|
||||||
|
//
|
||||||
|
// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256
|
||||||
|
// hashes as the topic value!
|
||||||
|
func parseTopicWithSetter(fields Arguments, topics []common.Hash, setter func(Argument, interface{})) error {
|
||||||
|
// Sanity check that the fields and topics match up
|
||||||
|
if len(fields) != len(topics) {
|
||||||
|
return errors.New("topic/field count mismatch")
|
||||||
|
}
|
||||||
|
// Iterate over all the fields and reconstruct them from topics
|
||||||
|
for i, arg := range fields {
|
||||||
|
if !arg.Indexed {
|
||||||
|
return errors.New("non-indexed field in topic reconstruction")
|
||||||
|
}
|
||||||
|
var reconstr interface{}
|
||||||
|
switch arg.Type.T {
|
||||||
|
case TupleTy:
|
||||||
|
return errors.New("tuple type in topic reconstruction")
|
||||||
|
case StringTy, BytesTy, SliceTy, ArrayTy:
|
||||||
|
// Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash
|
||||||
|
// whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash
|
||||||
|
reconstr = topics[i]
|
||||||
|
case FunctionTy:
|
||||||
|
if garbage := binary.BigEndian.Uint64(topics[i][0:8]); garbage != 0 {
|
||||||
|
return fmt.Errorf("bind: got improperly encoded function type, got %v", topics[i].Bytes())
|
||||||
|
}
|
||||||
|
var tmp [24]byte
|
||||||
|
copy(tmp[:], topics[i][8:32])
|
||||||
|
reconstr = tmp
|
||||||
|
default:
|
||||||
|
var err error
|
||||||
|
reconstr, err = toGoType(0, arg.Type, topics[i].Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Use the setter function to store the value
|
||||||
|
setter(arg, reconstr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
381
accounts/abi/topics_test.go
Normal file
381
accounts/abi/topics_test.go
Normal file
@@ -0,0 +1,381 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMakeTopics(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
query [][]interface{}
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want [][]common.Hash
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"support fixed byte types, right padded to 32 bytes",
|
||||||
|
args{[][]interface{}{{[5]byte{1, 2, 3, 4, 5}}}},
|
||||||
|
[][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"support common hash types in topics",
|
||||||
|
args{[][]interface{}{{common.Hash{1, 2, 3, 4, 5}}}},
|
||||||
|
[][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"support address types in topics",
|
||||||
|
args{[][]interface{}{{common.Address{1, 2, 3, 4, 5}}}},
|
||||||
|
[][]common.Hash{{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5}}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"support *big.Int types in topics",
|
||||||
|
args{[][]interface{}{{big.NewInt(1).Lsh(big.NewInt(2), 254)}}},
|
||||||
|
[][]common.Hash{{common.Hash{128}}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"support boolean types in topics",
|
||||||
|
args{[][]interface{}{
|
||||||
|
{true},
|
||||||
|
{false},
|
||||||
|
}},
|
||||||
|
[][]common.Hash{
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
|
||||||
|
{common.Hash{0}},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"support int/uint(8/16/32/64) types in topics",
|
||||||
|
args{[][]interface{}{
|
||||||
|
{int8(-2)},
|
||||||
|
{int16(-3)},
|
||||||
|
{int32(-4)},
|
||||||
|
{int64(-5)},
|
||||||
|
{int8(1)},
|
||||||
|
{int16(256)},
|
||||||
|
{int32(65536)},
|
||||||
|
{int64(4294967296)},
|
||||||
|
{uint8(1)},
|
||||||
|
{uint16(256)},
|
||||||
|
{uint32(65536)},
|
||||||
|
{uint64(4294967296)},
|
||||||
|
}},
|
||||||
|
[][]common.Hash{
|
||||||
|
{common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254}},
|
||||||
|
{common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 253}},
|
||||||
|
{common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 252}},
|
||||||
|
{common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 251}},
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}},
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}},
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}},
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}},
|
||||||
|
{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"support string types in topics",
|
||||||
|
args{[][]interface{}{{"hello world"}}},
|
||||||
|
[][]common.Hash{{crypto.Keccak256Hash([]byte("hello world"))}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"support byte slice types in topics",
|
||||||
|
args{[][]interface{}{{[]byte{1, 2, 3}}}},
|
||||||
|
[][]common.Hash{{crypto.Keccak256Hash([]byte{1, 2, 3})}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := MakeTopics(tt.args.query...)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("makeTopics() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
createObj func() interface{}
|
||||||
|
resultObj func() interface{}
|
||||||
|
resultMap func() map[string]interface{}
|
||||||
|
fields Arguments
|
||||||
|
topics []common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
type bytesStruct struct {
|
||||||
|
StaticBytes [5]byte
|
||||||
|
}
|
||||||
|
type int8Struct struct {
|
||||||
|
Int8Value int8
|
||||||
|
}
|
||||||
|
type int256Struct struct {
|
||||||
|
Int256Value *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
type hashStruct struct {
|
||||||
|
HashValue common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
type funcStruct struct {
|
||||||
|
FuncValue [24]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type topicTest struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupTopicsTests() []topicTest {
|
||||||
|
bytesType, _ := NewType("bytes5", "", nil)
|
||||||
|
int8Type, _ := NewType("int8", "", nil)
|
||||||
|
int256Type, _ := NewType("int256", "", nil)
|
||||||
|
tupleType, _ := NewType("tuple(int256,int8)", "", nil)
|
||||||
|
stringType, _ := NewType("string", "", nil)
|
||||||
|
funcType, _ := NewType("function", "", nil)
|
||||||
|
|
||||||
|
tests := []topicTest{
|
||||||
|
{
|
||||||
|
name: "support fixed byte types, right padded to 32 bytes",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return &bytesStruct{} },
|
||||||
|
resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} },
|
||||||
|
resultMap: func() map[string]interface{} {
|
||||||
|
return map[string]interface{}{"staticBytes": [5]byte{1, 2, 3, 4, 5}}
|
||||||
|
},
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "staticBytes",
|
||||||
|
Type: bytesType,
|
||||||
|
Indexed: true,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{
|
||||||
|
{1, 2, 3, 4, 5},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "int8 with negative value",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return &int8Struct{} },
|
||||||
|
resultObj: func() interface{} { return &int8Struct{Int8Value: -1} },
|
||||||
|
resultMap: func() map[string]interface{} {
|
||||||
|
return map[string]interface{}{"int8Value": int8(-1)}
|
||||||
|
},
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "int8Value",
|
||||||
|
Type: int8Type,
|
||||||
|
Indexed: true,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{
|
||||||
|
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "int256 with negative value",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return &int256Struct{} },
|
||||||
|
resultObj: func() interface{} { return &int256Struct{Int256Value: big.NewInt(-1)} },
|
||||||
|
resultMap: func() map[string]interface{} {
|
||||||
|
return map[string]interface{}{"int256Value": big.NewInt(-1)}
|
||||||
|
},
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "int256Value",
|
||||||
|
Type: int256Type,
|
||||||
|
Indexed: true,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{
|
||||||
|
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hash type",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return &hashStruct{} },
|
||||||
|
resultObj: func() interface{} { return &hashStruct{crypto.Keccak256Hash([]byte("stringtopic"))} },
|
||||||
|
resultMap: func() map[string]interface{} {
|
||||||
|
return map[string]interface{}{"hashValue": crypto.Keccak256Hash([]byte("stringtopic"))}
|
||||||
|
},
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "hashValue",
|
||||||
|
Type: stringType,
|
||||||
|
Indexed: true,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{
|
||||||
|
crypto.Keccak256Hash([]byte("stringtopic")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "function type",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return &funcStruct{} },
|
||||||
|
resultObj: func() interface{} {
|
||||||
|
return &funcStruct{[24]byte{255, 255, 255, 255, 255, 255, 255, 255,
|
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
|
||||||
|
},
|
||||||
|
resultMap: func() map[string]interface{} {
|
||||||
|
return map[string]interface{}{"funcValue": [24]byte{255, 255, 255, 255, 255, 255, 255, 255,
|
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
|
||||||
|
},
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "funcValue",
|
||||||
|
Type: funcType,
|
||||||
|
Indexed: true,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{
|
||||||
|
{0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error on topic/field count mismatch",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return nil },
|
||||||
|
resultObj: func() interface{} { return nil },
|
||||||
|
resultMap: func() map[string]interface{} { return make(map[string]interface{}) },
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "tupletype",
|
||||||
|
Type: tupleType,
|
||||||
|
Indexed: true,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{},
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error on unindexed arguments",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return &int256Struct{} },
|
||||||
|
resultObj: func() interface{} { return &int256Struct{} },
|
||||||
|
resultMap: func() map[string]interface{} { return make(map[string]interface{}) },
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "int256Value",
|
||||||
|
Type: int256Type,
|
||||||
|
Indexed: false,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{
|
||||||
|
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error on tuple in topic reconstruction",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return &tupleType },
|
||||||
|
resultObj: func() interface{} { return &tupleType },
|
||||||
|
resultMap: func() map[string]interface{} { return make(map[string]interface{}) },
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "tupletype",
|
||||||
|
Type: tupleType,
|
||||||
|
Indexed: true,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{{0}},
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error on improper encoded function",
|
||||||
|
args: args{
|
||||||
|
createObj: func() interface{} { return &funcStruct{} },
|
||||||
|
resultObj: func() interface{} { return &funcStruct{} },
|
||||||
|
resultMap: func() map[string]interface{} {
|
||||||
|
return make(map[string]interface{})
|
||||||
|
},
|
||||||
|
fields: Arguments{Argument{
|
||||||
|
Name: "funcValue",
|
||||||
|
Type: funcType,
|
||||||
|
Indexed: true,
|
||||||
|
}},
|
||||||
|
topics: []common.Hash{
|
||||||
|
{0, 0, 0, 0, 0, 0, 0, 128, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return tests
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseTopics(t *testing.T) {
|
||||||
|
tests := setupTopicsTests()
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
createObj := tt.args.createObj()
|
||||||
|
if err := ParseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
resultObj := tt.args.resultObj()
|
||||||
|
if !reflect.DeepEqual(createObj, resultObj) {
|
||||||
|
t.Errorf("parseTopics() = %v, want %v", createObj, resultObj)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseTopicsIntoMap(t *testing.T) {
|
||||||
|
tests := setupTopicsTests()
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
outMap := make(map[string]interface{})
|
||||||
|
if err := ParseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
resultMap := tt.args.resultMap()
|
||||||
|
if !reflect.DeepEqual(outMap, resultMap) {
|
||||||
|
t.Errorf("parseTopicsIntoMap() = %v, want %v", outMap, resultMap)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -23,6 +23,8 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Type enumerator
|
// Type enumerator
|
||||||
@@ -45,17 +47,16 @@ const (
|
|||||||
// Type is the reflection of the supported argument type
|
// Type is the reflection of the supported argument type
|
||||||
type Type struct {
|
type Type struct {
|
||||||
Elem *Type
|
Elem *Type
|
||||||
Kind reflect.Kind
|
|
||||||
Type reflect.Type
|
|
||||||
Size int
|
Size int
|
||||||
T byte // Our own type checking
|
T byte // Our own type checking
|
||||||
|
|
||||||
stringKind string // holds the unparsed string for deriving signatures
|
stringKind string // holds the unparsed string for deriving signatures
|
||||||
|
|
||||||
// Tuple relative fields
|
// Tuple relative fields
|
||||||
TupleRawName string // Raw struct name defined in source code, may be empty.
|
TupleRawName string // Raw struct name defined in source code, may be empty.
|
||||||
TupleElems []*Type // Type information of all tuple fields
|
TupleElems []*Type // Type information of all tuple fields
|
||||||
TupleRawNames []string // Raw field name of all tuple fields
|
TupleRawNames []string // Raw field name of all tuple fields
|
||||||
|
TupleType reflect.Type // Underlying struct of the tuple
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -94,20 +95,16 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
|
|||||||
if len(intz) == 0 {
|
if len(intz) == 0 {
|
||||||
// is a slice
|
// is a slice
|
||||||
typ.T = SliceTy
|
typ.T = SliceTy
|
||||||
typ.Kind = reflect.Slice
|
|
||||||
typ.Elem = &embeddedType
|
typ.Elem = &embeddedType
|
||||||
typ.Type = reflect.SliceOf(embeddedType.Type)
|
|
||||||
typ.stringKind = embeddedType.stringKind + sliced
|
typ.stringKind = embeddedType.stringKind + sliced
|
||||||
} else if len(intz) == 1 {
|
} else if len(intz) == 1 {
|
||||||
// is a array
|
// is an array
|
||||||
typ.T = ArrayTy
|
typ.T = ArrayTy
|
||||||
typ.Kind = reflect.Array
|
|
||||||
typ.Elem = &embeddedType
|
typ.Elem = &embeddedType
|
||||||
typ.Size, err = strconv.Atoi(intz[0])
|
typ.Size, err = strconv.Atoi(intz[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||||
}
|
}
|
||||||
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
|
|
||||||
typ.stringKind = embeddedType.stringKind + sliced
|
typ.stringKind = embeddedType.stringKind + sliced
|
||||||
} else {
|
} else {
|
||||||
return Type{}, fmt.Errorf("invalid formatting of array type")
|
return Type{}, fmt.Errorf("invalid formatting of array type")
|
||||||
@@ -139,36 +136,24 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
|
|||||||
// varType is the parsed abi type
|
// varType is the parsed abi type
|
||||||
switch varType := parsedType[1]; varType {
|
switch varType := parsedType[1]; varType {
|
||||||
case "int":
|
case "int":
|
||||||
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
|
|
||||||
typ.Size = varSize
|
typ.Size = varSize
|
||||||
typ.T = IntTy
|
typ.T = IntTy
|
||||||
case "uint":
|
case "uint":
|
||||||
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
|
|
||||||
typ.Size = varSize
|
typ.Size = varSize
|
||||||
typ.T = UintTy
|
typ.T = UintTy
|
||||||
case "bool":
|
case "bool":
|
||||||
typ.Kind = reflect.Bool
|
|
||||||
typ.T = BoolTy
|
typ.T = BoolTy
|
||||||
typ.Type = reflect.TypeOf(bool(false))
|
|
||||||
case "address":
|
case "address":
|
||||||
typ.Kind = reflect.Array
|
|
||||||
typ.Type = addressT
|
|
||||||
typ.Size = 20
|
typ.Size = 20
|
||||||
typ.T = AddressTy
|
typ.T = AddressTy
|
||||||
case "string":
|
case "string":
|
||||||
typ.Kind = reflect.String
|
|
||||||
typ.Type = reflect.TypeOf("")
|
|
||||||
typ.T = StringTy
|
typ.T = StringTy
|
||||||
case "bytes":
|
case "bytes":
|
||||||
if varSize == 0 {
|
if varSize == 0 {
|
||||||
typ.T = BytesTy
|
typ.T = BytesTy
|
||||||
typ.Kind = reflect.Slice
|
|
||||||
typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0)))
|
|
||||||
} else {
|
} else {
|
||||||
typ.T = FixedBytesTy
|
typ.T = FixedBytesTy
|
||||||
typ.Kind = reflect.Array
|
|
||||||
typ.Size = varSize
|
typ.Size = varSize
|
||||||
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
|
|
||||||
}
|
}
|
||||||
case "tuple":
|
case "tuple":
|
||||||
var (
|
var (
|
||||||
@@ -178,17 +163,20 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
|
|||||||
expression string // canonical parameter expression
|
expression string // canonical parameter expression
|
||||||
)
|
)
|
||||||
expression += "("
|
expression += "("
|
||||||
|
overloadedNames := make(map[string]string)
|
||||||
for idx, c := range components {
|
for idx, c := range components {
|
||||||
cType, err := NewType(c.Type, c.InternalType, c.Components)
|
cType, err := NewType(c.Type, c.InternalType, c.Components)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Type{}, err
|
return Type{}, err
|
||||||
}
|
}
|
||||||
if ToCamelCase(c.Name) == "" {
|
fieldName, err := overloadedArgName(c.Name, overloadedNames)
|
||||||
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
|
if err != nil {
|
||||||
|
return Type{}, err
|
||||||
}
|
}
|
||||||
|
overloadedNames[fieldName] = fieldName
|
||||||
fields = append(fields, reflect.StructField{
|
fields = append(fields, reflect.StructField{
|
||||||
Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field.
|
Name: fieldName, // reflect.StructOf will panic for any exported field.
|
||||||
Type: cType.Type,
|
Type: cType.GetType(),
|
||||||
Tag: reflect.StructTag("json:\"" + c.Name + "\""),
|
Tag: reflect.StructTag("json:\"" + c.Name + "\""),
|
||||||
})
|
})
|
||||||
elems = append(elems, &cType)
|
elems = append(elems, &cType)
|
||||||
@@ -199,8 +187,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
expression += ")"
|
expression += ")"
|
||||||
typ.Kind = reflect.Struct
|
|
||||||
typ.Type = reflect.StructOf(fields)
|
typ.TupleType = reflect.StructOf(fields)
|
||||||
typ.TupleElems = elems
|
typ.TupleElems = elems
|
||||||
typ.TupleRawNames = names
|
typ.TupleRawNames = names
|
||||||
typ.T = TupleTy
|
typ.T = TupleTy
|
||||||
@@ -217,10 +205,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
|
|||||||
}
|
}
|
||||||
|
|
||||||
case "function":
|
case "function":
|
||||||
typ.Kind = reflect.Array
|
|
||||||
typ.T = FunctionTy
|
typ.T = FunctionTy
|
||||||
typ.Size = 24
|
typ.Size = 24
|
||||||
typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
|
|
||||||
default:
|
default:
|
||||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||||
}
|
}
|
||||||
@@ -228,6 +214,56 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetType returns the reflection type of the ABI type.
|
||||||
|
func (t Type) GetType() reflect.Type {
|
||||||
|
switch t.T {
|
||||||
|
case IntTy:
|
||||||
|
return reflectIntType(false, t.Size)
|
||||||
|
case UintTy:
|
||||||
|
return reflectIntType(true, t.Size)
|
||||||
|
case BoolTy:
|
||||||
|
return reflect.TypeOf(false)
|
||||||
|
case StringTy:
|
||||||
|
return reflect.TypeOf("")
|
||||||
|
case SliceTy:
|
||||||
|
return reflect.SliceOf(t.Elem.GetType())
|
||||||
|
case ArrayTy:
|
||||||
|
return reflect.ArrayOf(t.Size, t.Elem.GetType())
|
||||||
|
case TupleTy:
|
||||||
|
return t.TupleType
|
||||||
|
case AddressTy:
|
||||||
|
return reflect.TypeOf(common.Address{})
|
||||||
|
case FixedBytesTy:
|
||||||
|
return reflect.ArrayOf(t.Size, reflect.TypeOf(byte(0)))
|
||||||
|
case BytesTy:
|
||||||
|
return reflect.SliceOf(reflect.TypeOf(byte(0)))
|
||||||
|
case HashTy:
|
||||||
|
// hashtype currently not used
|
||||||
|
return reflect.ArrayOf(32, reflect.TypeOf(byte(0)))
|
||||||
|
case FixedPointTy:
|
||||||
|
// fixedpoint type currently not used
|
||||||
|
return reflect.ArrayOf(32, reflect.TypeOf(byte(0)))
|
||||||
|
case FunctionTy:
|
||||||
|
return reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
|
||||||
|
default:
|
||||||
|
panic("Invalid type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func overloadedArgName(rawName string, names map[string]string) (string, error) {
|
||||||
|
fieldName := ToCamelCase(rawName)
|
||||||
|
if fieldName == "" {
|
||||||
|
return "", errors.New("abi: purely anonymous or underscored field is not supported")
|
||||||
|
}
|
||||||
|
// Handle overloaded fieldNames
|
||||||
|
_, ok := names[fieldName]
|
||||||
|
for idx := 0; ok; idx++ {
|
||||||
|
fieldName = fmt.Sprintf("%s%d", ToCamelCase(rawName), idx)
|
||||||
|
_, ok = names[fieldName]
|
||||||
|
}
|
||||||
|
return fieldName, nil
|
||||||
|
}
|
||||||
|
|
||||||
// String implements Stringer
|
// String implements Stringer
|
||||||
func (t Type) String() (out string) {
|
func (t Type) String() (out string) {
|
||||||
return t.stringKind
|
return t.stringKind
|
||||||
|
|||||||
@@ -36,58 +36,58 @@ func TestTypeRegexp(t *testing.T) {
|
|||||||
components []ArgumentMarshaling
|
components []ArgumentMarshaling
|
||||||
kind Type
|
kind Type
|
||||||
}{
|
}{
|
||||||
{"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
|
{"bool", nil, Type{T: BoolTy, stringKind: "bool"}},
|
||||||
{"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
|
{"bool[]", nil, Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}},
|
||||||
{"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
|
{"bool[2]", nil, Type{Size: 2, T: ArrayTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||||
{"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
|
{"bool[2][]", nil, Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
|
||||||
{"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
|
{"bool[][]", nil, Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
|
||||||
{"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
|
{"bool[][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
|
||||||
{"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
|
{"bool[2][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
|
||||||
{"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
|
{"bool[2][][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
|
||||||
{"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
{"bool[2][2][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
||||||
{"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
{"bool[][][]", nil, Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
||||||
{"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
{"bool[][2][]", nil, Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
||||||
{"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
|
{"int8", nil, Type{Size: 8, T: IntTy, stringKind: "int8"}},
|
||||||
{"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
|
{"int16", nil, Type{Size: 16, T: IntTy, stringKind: "int16"}},
|
||||||
{"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
|
{"int32", nil, Type{Size: 32, T: IntTy, stringKind: "int32"}},
|
||||||
{"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
|
{"int64", nil, Type{Size: 64, T: IntTy, stringKind: "int64"}},
|
||||||
{"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
|
{"int256", nil, Type{Size: 256, T: IntTy, stringKind: "int256"}},
|
||||||
{"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
{"int8[]", nil, Type{T: SliceTy, Elem: &Type{Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||||
{"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
{"int8[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||||
{"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
{"int16[]", nil, Type{T: SliceTy, Elem: &Type{Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||||
{"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
{"int16[2]", nil, Type{Size: 2, T: ArrayTy, Elem: &Type{Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||||
{"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
{"int32[]", nil, Type{T: SliceTy, Elem: &Type{Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||||
{"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
{"int32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||||
{"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
{"int64[]", nil, Type{T: SliceTy, Elem: &Type{Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||||
{"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
{"int64[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||||
{"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
{"int256[]", nil, Type{T: SliceTy, Elem: &Type{Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||||
{"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
{"int256[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||||
{"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
|
{"uint8", nil, Type{Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||||
{"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
|
{"uint16", nil, Type{Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||||
{"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
|
{"uint32", nil, Type{Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||||
{"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
|
{"uint64", nil, Type{Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||||
{"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
|
{"uint256", nil, Type{Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||||
{"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
{"uint8[]", nil, Type{T: SliceTy, Elem: &Type{Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||||
{"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
{"uint8[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||||
{"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
{"uint16[]", nil, Type{T: SliceTy, Elem: &Type{Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||||
{"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
{"uint16[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||||
{"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
{"uint32[]", nil, Type{T: SliceTy, Elem: &Type{Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||||
{"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
{"uint32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||||
{"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
{"uint64[]", nil, Type{T: SliceTy, Elem: &Type{Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||||
{"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
{"uint64[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||||
{"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
{"uint256[]", nil, Type{T: SliceTy, Elem: &Type{Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||||
{"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
{"uint256[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||||
{"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
|
{"bytes32", nil, Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}},
|
||||||
{"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
{"bytes[]", nil, Type{T: SliceTy, Elem: &Type{T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||||
{"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
{"bytes[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||||
{"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
{"bytes32[]", nil, Type{T: SliceTy, Elem: &Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||||
{"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
{"bytes32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||||
{"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
|
{"string", nil, Type{T: StringTy, stringKind: "string"}},
|
||||||
{"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
{"string[]", nil, Type{T: SliceTy, Elem: &Type{T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
||||||
{"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
|
{"string[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: StringTy, stringKind: "string"}, stringKind: "string[2]"}},
|
||||||
{"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
|
{"address", nil, Type{Size: 20, T: AddressTy, stringKind: "address"}},
|
||||||
{"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
{"address[]", nil, Type{T: SliceTy, Elem: &Type{Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||||
{"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
{"address[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||||
// TODO when fixed types are implemented properly
|
// TODO when fixed types are implemented properly
|
||||||
// {"fixed", nil, Type{}},
|
// {"fixed", nil, Type{}},
|
||||||
// {"fixed128x128", nil, Type{}},
|
// {"fixed128x128", nil, Type{}},
|
||||||
@@ -95,14 +95,14 @@ func TestTypeRegexp(t *testing.T) {
|
|||||||
// {"fixed[2]", nil, Type{}},
|
// {"fixed[2]", nil, Type{}},
|
||||||
// {"fixed128x128[]", nil, Type{}},
|
// {"fixed128x128[]", nil, Type{}},
|
||||||
// {"fixed128x128[2]", nil, Type{}},
|
// {"fixed128x128[2]", nil, Type{}},
|
||||||
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct {
|
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{T: TupleTy, TupleType: reflect.TypeOf(struct {
|
||||||
A int64 `json:"a"`
|
A int64 `json:"a"`
|
||||||
}{}), stringKind: "(int64)",
|
}{}), stringKind: "(int64)",
|
||||||
TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
|
TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
|
||||||
{"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct {
|
{"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{T: TupleTy, TupleType: reflect.TypeOf(struct {
|
||||||
ATypicalParamName int64 `json:"aTypicalParamName"`
|
ATypicalParamName int64 `json:"aTypicalParamName"`
|
||||||
}{}), stringKind: "(int64)",
|
}{}), stringKind: "(int64)",
|
||||||
TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}},
|
TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -306,3 +306,27 @@ func TestTypeCheck(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInternalType(t *testing.T) {
|
||||||
|
components := []ArgumentMarshaling{{Name: "a", Type: "int64"}}
|
||||||
|
internalType := "struct a.b[]"
|
||||||
|
kind := Type{
|
||||||
|
T: TupleTy,
|
||||||
|
TupleType: reflect.TypeOf(struct {
|
||||||
|
A int64 `json:"a"`
|
||||||
|
}{}),
|
||||||
|
stringKind: "(int64)",
|
||||||
|
TupleRawName: "ab[]",
|
||||||
|
TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}},
|
||||||
|
TupleRawNames: []string{"a"},
|
||||||
|
}
|
||||||
|
|
||||||
|
blob := "tuple"
|
||||||
|
typ, err := NewType(blob, internalType, components)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("type %q: failed to parse type string: %v", blob, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(typ, kind) {
|
||||||
|
t.Errorf("type %q: parsed type mismatch:\nGOT %s\nWANT %s ", blob, spew.Sdump(typeWithoutStringer(typ)), spew.Sdump(typeWithoutStringer(kind)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -26,45 +26,47 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
maxUint256 = big.NewInt(0).Add(
|
// MaxUint256 is the maximum value that can be represented by a uint256
|
||||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil),
|
MaxUint256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 256), common.Big1)
|
||||||
big.NewInt(-1))
|
// MaxInt256 is the maximum value that can be represented by a int256
|
||||||
maxInt256 = big.NewInt(0).Add(
|
MaxInt256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 255), common.Big1)
|
||||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil),
|
|
||||||
big.NewInt(-1))
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// reads the integer based on its kind
|
// ReadInteger reads the integer based on its kind and returns the appropriate value
|
||||||
func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
func ReadInteger(typ Type, b []byte) interface{} {
|
||||||
switch kind {
|
if typ.T == UintTy {
|
||||||
case reflect.Uint8:
|
switch typ.Size {
|
||||||
return b[len(b)-1]
|
case 8:
|
||||||
case reflect.Uint16:
|
return b[len(b)-1]
|
||||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
case 16:
|
||||||
case reflect.Uint32:
|
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||||
return binary.BigEndian.Uint32(b[len(b)-4:])
|
case 32:
|
||||||
case reflect.Uint64:
|
return binary.BigEndian.Uint32(b[len(b)-4:])
|
||||||
return binary.BigEndian.Uint64(b[len(b)-8:])
|
case 64:
|
||||||
case reflect.Int8:
|
return binary.BigEndian.Uint64(b[len(b)-8:])
|
||||||
|
default:
|
||||||
|
// the only case left for unsigned integer is uint256.
|
||||||
|
return new(big.Int).SetBytes(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch typ.Size {
|
||||||
|
case 8:
|
||||||
return int8(b[len(b)-1])
|
return int8(b[len(b)-1])
|
||||||
case reflect.Int16:
|
case 16:
|
||||||
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
|
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
|
||||||
case reflect.Int32:
|
case 32:
|
||||||
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
|
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
|
||||||
case reflect.Int64:
|
case 64:
|
||||||
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||||
default:
|
default:
|
||||||
// the only case lefts for integer is int256/uint256.
|
// the only case left for integer is int256
|
||||||
// big.SetBytes can't tell if a number is negative, positive on itself.
|
// big.SetBytes can't tell if a number is negative or positive in itself.
|
||||||
// On EVM, if the returned number > max int256, it is negative.
|
// On EVM, if the returned number > max int256, it is negative.
|
||||||
|
// A number is > max int256 if the bit at position 255 is set.
|
||||||
ret := new(big.Int).SetBytes(b)
|
ret := new(big.Int).SetBytes(b)
|
||||||
if typ == UintTy {
|
if ret.Bit(255) == 1 {
|
||||||
return ret
|
ret.Add(MaxUint256, new(big.Int).Neg(ret))
|
||||||
}
|
ret.Add(ret, common.Big1)
|
||||||
|
|
||||||
if ret.Cmp(maxInt256) > 0 {
|
|
||||||
ret.Add(maxUint256, big.NewInt(0).Neg(ret))
|
|
||||||
ret.Add(ret, big.NewInt(1))
|
|
||||||
ret.Neg(ret)
|
ret.Neg(ret)
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
@@ -102,13 +104,13 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// through reflection, creates a fixed array to be read from
|
// ReadFixedBytes uses reflection to create a fixed array to be read from
|
||||||
func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||||
if t.T != FixedBytesTy {
|
if t.T != FixedBytesTy {
|
||||||
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
|
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
|
||||||
}
|
}
|
||||||
// convert
|
// convert
|
||||||
array := reflect.New(t.Type).Elem()
|
array := reflect.New(t.GetType()).Elem()
|
||||||
|
|
||||||
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
|
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
|
||||||
return array.Interface(), nil
|
return array.Interface(), nil
|
||||||
@@ -129,10 +131,10 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
|
|||||||
|
|
||||||
if t.T == SliceTy {
|
if t.T == SliceTy {
|
||||||
// declare our slice
|
// declare our slice
|
||||||
refSlice = reflect.MakeSlice(t.Type, size, size)
|
refSlice = reflect.MakeSlice(t.GetType(), size, size)
|
||||||
} else if t.T == ArrayTy {
|
} else if t.T == ArrayTy {
|
||||||
// declare our array
|
// declare our array
|
||||||
refSlice = reflect.New(t.Type).Elem()
|
refSlice = reflect.New(t.GetType()).Elem()
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
|
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
|
||||||
}
|
}
|
||||||
@@ -156,7 +158,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func forTupleUnpack(t Type, output []byte) (interface{}, error) {
|
func forTupleUnpack(t Type, output []byte) (interface{}, error) {
|
||||||
retval := reflect.New(t.Type).Elem()
|
retval := reflect.New(t.GetType()).Elem()
|
||||||
virtualArgs := 0
|
virtualArgs := 0
|
||||||
for index, elem := range t.TupleElems {
|
for index, elem := range t.TupleElems {
|
||||||
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
|
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
|
||||||
@@ -216,9 +218,8 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return forTupleUnpack(t, output[begin:])
|
return forTupleUnpack(t, output[begin:])
|
||||||
} else {
|
|
||||||
return forTupleUnpack(t, output[index:])
|
|
||||||
}
|
}
|
||||||
|
return forTupleUnpack(t, output[index:])
|
||||||
case SliceTy:
|
case SliceTy:
|
||||||
return forEachUnpack(t, output[begin:], 0, length)
|
return forEachUnpack(t, output[begin:], 0, length)
|
||||||
case ArrayTy:
|
case ArrayTy:
|
||||||
@@ -230,7 +231,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
case StringTy: // variable arrays are written at the end of the return bytes
|
case StringTy: // variable arrays are written at the end of the return bytes
|
||||||
return string(output[begin : begin+length]), nil
|
return string(output[begin : begin+length]), nil
|
||||||
case IntTy, UintTy:
|
case IntTy, UintTy:
|
||||||
return readInteger(t.T, t.Kind, returnOutput), nil
|
return ReadInteger(t, returnOutput), nil
|
||||||
case BoolTy:
|
case BoolTy:
|
||||||
return readBool(returnOutput)
|
return readBool(returnOutput)
|
||||||
case AddressTy:
|
case AddressTy:
|
||||||
@@ -240,7 +241,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
case BytesTy:
|
case BytesTy:
|
||||||
return output[begin : begin+length], nil
|
return output[begin : begin+length], nil
|
||||||
case FixedBytesTy:
|
case FixedBytesTy:
|
||||||
return readFixedBytes(t, returnOutput)
|
return ReadFixedBytes(t, returnOutput)
|
||||||
case FunctionTy:
|
case FunctionTy:
|
||||||
return readFunctionType(t, returnOutput)
|
return readFunctionType(t, returnOutput)
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -30,6 +30,34 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestUnpack tests the general pack/unpack tests in packing_test.go
|
||||||
|
func TestUnpack(t *testing.T) {
|
||||||
|
for i, test := range packUnpackTests {
|
||||||
|
t.Run(strconv.Itoa(i)+" "+test.def, func(t *testing.T) {
|
||||||
|
//Unpack
|
||||||
|
def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def)
|
||||||
|
abi, err := JSON(strings.NewReader(def))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||||
|
}
|
||||||
|
encb, err := hex.DecodeString(test.packed)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invalid hex %s: %v", test.packed, err)
|
||||||
|
}
|
||||||
|
outptr := reflect.New(reflect.TypeOf(test.unpacked))
|
||||||
|
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d (%v) failed: %v", i, test.def, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out := outptr.Elem().Interface()
|
||||||
|
if !reflect.DeepEqual(test.unpacked, out) {
|
||||||
|
t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.unpacked, out)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type unpackTest struct {
|
type unpackTest struct {
|
||||||
def string // ABI definition JSON
|
def string // ABI definition JSON
|
||||||
enc string // evm return data
|
enc string // evm return data
|
||||||
@@ -52,16 +80,6 @@ func (test unpackTest) checkError(err error) error {
|
|||||||
|
|
||||||
var unpackTests = []unpackTest{
|
var unpackTests = []unpackTest{
|
||||||
// Bools
|
// Bools
|
||||||
{
|
|
||||||
def: `[{ "type": "bool" }]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
|
||||||
want: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{ "type": "bool" }]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: false,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
def: `[{ "type": "bool" }]`,
|
def: `[{ "type": "bool" }]`,
|
||||||
enc: "0000000000000000000000000000000000000000000000000001000000000001",
|
enc: "0000000000000000000000000000000000000000000000000001000000000001",
|
||||||
@@ -75,11 +93,6 @@ var unpackTests = []unpackTest{
|
|||||||
err: "abi: improperly encoded boolean value",
|
err: "abi: improperly encoded boolean value",
|
||||||
},
|
},
|
||||||
// Integers
|
// Integers
|
||||||
{
|
|
||||||
def: `[{"type": "uint32"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
|
||||||
want: uint32(1),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
def: `[{"type": "uint32"}]`,
|
def: `[{"type": "uint32"}]`,
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
@@ -92,16 +105,6 @@ var unpackTests = []unpackTest{
|
|||||||
want: uint16(0),
|
want: uint16(0),
|
||||||
err: "abi: cannot unmarshal *big.Int in to uint16",
|
err: "abi: cannot unmarshal *big.Int in to uint16",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
def: `[{"type": "uint17"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
|
||||||
want: big.NewInt(1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int32"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
|
||||||
want: int32(1),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
def: `[{"type": "int32"}]`,
|
def: `[{"type": "int32"}]`,
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
@@ -114,38 +117,10 @@ var unpackTests = []unpackTest{
|
|||||||
want: int16(0),
|
want: int16(0),
|
||||||
err: "abi: cannot unmarshal *big.Int in to int16",
|
err: "abi: cannot unmarshal *big.Int in to int16",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
def: `[{"type": "int17"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
|
||||||
want: big.NewInt(1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int256"}]`,
|
|
||||||
enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
|
||||||
want: big.NewInt(-1),
|
|
||||||
},
|
|
||||||
// Address
|
|
||||||
{
|
|
||||||
def: `[{"type": "address"}]`,
|
|
||||||
enc: "0000000000000000000000000100000000000000000000000000000000000000",
|
|
||||||
want: common.Address{1},
|
|
||||||
},
|
|
||||||
// Bytes
|
|
||||||
{
|
|
||||||
def: `[{"type": "bytes32"}]`,
|
|
||||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
def: `[{"type": "bytes"}]`,
|
def: `[{"type": "bytes"}]`,
|
||||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
||||||
want: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
want: [32]byte{1},
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "bytes"}]`,
|
|
||||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: [32]byte{},
|
|
||||||
err: "abi: cannot unmarshal []uint8 in to [32]uint8",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type": "bytes32"}]`,
|
def: `[{"type": "bytes32"}]`,
|
||||||
@@ -153,245 +128,13 @@ var unpackTests = []unpackTest{
|
|||||||
want: []byte(nil),
|
want: []byte(nil),
|
||||||
err: "abi: cannot unmarshal [32]uint8 in to []uint8",
|
err: "abi: cannot unmarshal [32]uint8 in to []uint8",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
def: `[{"type": "bytes32"}]`,
|
|
||||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
},
|
|
||||||
// Functions
|
|
||||||
{
|
|
||||||
def: `[{"type": "function"}]`,
|
|
||||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: [24]byte{1},
|
|
||||||
},
|
|
||||||
// Slice and Array
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []uint8{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: []uint8{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint256[]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: []*big.Int{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2]uint8{1, 2},
|
|
||||||
},
|
|
||||||
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[][]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: [][]uint8{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[][]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [][]uint8{{1, 2}, {1, 2}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[][]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
|
||||||
want: [][]uint8{{1, 2}, {1, 2, 3}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[2][2]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2][2]uint8{{1, 2}, {1, 2}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[][2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: [2][]uint8{{}, {}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[][2]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
|
||||||
want: [2][]uint8{{1}, {1}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[2][]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: [][2]uint8{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[2][]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [][2]uint8{{1, 2}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint8[2][]"}]`,
|
|
||||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [][2]uint8{{1, 2}, {1, 2}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint16[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []uint16{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint16[2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2]uint16{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint32[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []uint32{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint32[2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2]uint32{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint32[2][3][4]"}]`,
|
|
||||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018",
|
|
||||||
want: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint64[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []uint64{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint64[2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2]uint64{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint256[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint256[3]"}]`,
|
|
||||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
|
||||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "string[4]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000",
|
|
||||||
want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "string[]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000",
|
|
||||||
want: []string{"Ethereum", "go-ethereum"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "bytes[]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000",
|
|
||||||
want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "uint256[2][][]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8",
|
|
||||||
want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int8[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []int8{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int8[2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2]int8{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int16[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []int16{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int16[2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2]int16{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int32[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []int32{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int32[2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2]int32{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int64[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []int64{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int64[2]"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: [2]int64{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int256[]"}]`,
|
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"type": "int256[3]"}]`,
|
|
||||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
|
||||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
|
||||||
},
|
|
||||||
// struct outputs
|
|
||||||
{
|
|
||||||
def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: struct {
|
|
||||||
Int1 *big.Int
|
|
||||||
Int2 *big.Int
|
|
||||||
}{big.NewInt(1), big.NewInt(2)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"name":"int_one","type":"int256"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: struct {
|
|
||||||
IntOne *big.Int
|
|
||||||
}{big.NewInt(1)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"name":"int__one","type":"int256"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: struct {
|
|
||||||
IntOne *big.Int
|
|
||||||
}{big.NewInt(1)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"name":"int_one_","type":"int256"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: struct {
|
|
||||||
IntOne *big.Int
|
|
||||||
}{big.NewInt(1)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
|
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
|
||||||
want: struct {
|
|
||||||
IntOne *big.Int
|
|
||||||
Intone *big.Int
|
|
||||||
}{big.NewInt(1), big.NewInt(2)},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
def: `[{"name":"___","type":"int256"}]`,
|
def: `[{"name":"___","type":"int256"}]`,
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
want: struct {
|
want: struct {
|
||||||
IntOne *big.Int
|
IntOne *big.Int
|
||||||
Intone *big.Int
|
Intone *big.Int
|
||||||
}{},
|
}{IntOne: big.NewInt(1)},
|
||||||
err: "abi: purely underscored output cannot unpack to struct",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`,
|
def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`,
|
||||||
@@ -438,12 +181,37 @@ var unpackTests = []unpackTest{
|
|||||||
}{},
|
}{},
|
||||||
err: "abi: purely underscored output cannot unpack to struct",
|
err: "abi: purely underscored output cannot unpack to struct",
|
||||||
},
|
},
|
||||||
|
// Make sure only the first argument is consumed
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int__one","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one_","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpack(t *testing.T) {
|
// TestLocalUnpackTests runs test specially designed only for unpacking.
|
||||||
|
// All test cases that can be used to test packing and unpacking should move to packing_test.go
|
||||||
|
func TestLocalUnpackTests(t *testing.T) {
|
||||||
for i, test := range unpackTests {
|
for i, test := range unpackTests {
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
//Unpack
|
||||||
|
def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def)
|
||||||
abi, err := JSON(strings.NewReader(def))
|
abi, err := JSON(strings.NewReader(def))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||||
@@ -522,7 +290,7 @@ type methodMultiOutput struct {
|
|||||||
|
|
||||||
func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOutput) {
|
func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOutput) {
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
{ "name" : "multi", "type": "function", "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||||
var expected = methodMultiOutput{big.NewInt(1), "hello"}
|
var expected = methodMultiOutput{big.NewInt(1), "hello"}
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
@@ -592,7 +360,7 @@ func TestMethodMultiReturn(t *testing.T) {
|
|||||||
}, {
|
}, {
|
||||||
&[]interface{}{new(int)},
|
&[]interface{}{new(int)},
|
||||||
&[]interface{}{},
|
&[]interface{}{},
|
||||||
"abi: insufficient number of elements in the list/array for unpack, want 2, got 1",
|
"abi: insufficient number of arguments for unpack, want 2, got 1",
|
||||||
"Can not unpack into a slice with wrong types",
|
"Can not unpack into a slice with wrong types",
|
||||||
}}
|
}}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@@ -611,7 +379,7 @@ func TestMethodMultiReturn(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithArray(t *testing.T) {
|
func TestMultiReturnWithArray(t *testing.T) {
|
||||||
const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -634,7 +402,7 @@ func TestMultiReturnWithArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithStringArray(t *testing.T) {
|
func TestMultiReturnWithStringArray(t *testing.T) {
|
||||||
const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -664,7 +432,7 @@ func TestMultiReturnWithStringArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithStringSlice(t *testing.T) {
|
func TestMultiReturnWithStringSlice(t *testing.T) {
|
||||||
const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -700,7 +468,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
|||||||
// values of nested static arrays count towards the size as well, and any element following
|
// values of nested static arrays count towards the size as well, and any element following
|
||||||
// after such nested array argument should be read with the correct offset,
|
// after such nested array argument should be read with the correct offset,
|
||||||
// so that it does not read content from the previous array argument.
|
// so that it does not read content from the previous array argument.
|
||||||
const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -737,15 +505,15 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
|||||||
|
|
||||||
func TestUnmarshal(t *testing.T) {
|
func TestUnmarshal(t *testing.T) {
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
{ "name" : "int", "type": "function", "outputs": [ { "type": "uint256" } ] },
|
||||||
{ "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] },
|
{ "name" : "bool", "type": "function", "outputs": [ { "type": "bool" } ] },
|
||||||
{ "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] },
|
{ "name" : "bytes", "type": "function", "outputs": [ { "type": "bytes" } ] },
|
||||||
{ "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
{ "name" : "fixed", "type": "function", "outputs": [ { "type": "bytes32" } ] },
|
||||||
{ "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
{ "name" : "multi", "type": "function", "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||||
{ "name" : "intArraySingle", "constant" : false, "outputs": [ { "type": "uint256[3]" } ] },
|
{ "name" : "intArraySingle", "type": "function", "outputs": [ { "type": "uint256[3]" } ] },
|
||||||
{ "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] },
|
{ "name" : "addressSliceSingle", "type": "function", "outputs": [ { "type": "address[]" } ] },
|
||||||
{ "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
{ "name" : "addressSliceDouble", "type": "function", "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||||
{ "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
{ "name" : "mixedBytes", "type": "function", "stateMutability" : "view", "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -985,7 +753,7 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackTuple(t *testing.T) {
|
func TestUnpackTuple(t *testing.T) {
|
||||||
const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
|
const simpleTuple = `[{"name":"tuple","type":"function","outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
|
||||||
abi, err := JSON(strings.NewReader(simpleTuple))
|
abi, err := JSON(strings.NewReader(simpleTuple))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -1009,12 +777,12 @@ func TestUnpackTuple(t *testing.T) {
|
|||||||
t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.A)
|
t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.A)
|
||||||
}
|
}
|
||||||
if v.B.Cmp(big.NewInt(-1)) != 0 {
|
if v.B.Cmp(big.NewInt(-1)) != 0 {
|
||||||
t.Errorf("unexpected value unpacked: want %x, got %x", v.B, -1)
|
t.Errorf("unexpected value unpacked: want %x, got %x", -1, v.B)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test nested tuple
|
// Test nested tuple
|
||||||
const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[
|
const nestedTuple = `[{"name":"tuple","type":"function","outputs":[
|
||||||
{"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]},
|
{"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]},
|
||||||
{"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]},
|
{"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]},
|
||||||
{"type":"uint256","name":"a"}
|
{"type":"uint256","name":"a"}
|
||||||
@@ -1136,7 +904,7 @@ func TestOOMMaliciousInput(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, test := range oomTests {
|
for i, test := range oomTests {
|
||||||
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def)
|
||||||
abi, err := JSON(strings.NewReader(def))
|
abi, err := JSON(strings.NewReader(def))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||||
|
|||||||
@@ -129,6 +129,8 @@ type Wallet interface {
|
|||||||
// about which fields or actions are needed. The user may retry by providing
|
// about which fields or actions are needed. The user may retry by providing
|
||||||
// the needed details via SignHashWithPassphrase, or by other means (e.g. unlock
|
// the needed details via SignHashWithPassphrase, or by other means (e.g. unlock
|
||||||
// the account in a keystore).
|
// the account in a keystore).
|
||||||
|
//
|
||||||
|
// This method should return the signature in 'canonical' format, with v 0 or 1
|
||||||
SignText(account Account, text []byte) ([]byte, error)
|
SignText(account Account, text []byte) ([]byte, error)
|
||||||
|
|
||||||
// SignTextWithPassphrase is identical to Signtext, but also takes a password
|
// SignTextWithPassphrase is identical to Signtext, but also takes a password
|
||||||
|
|||||||
28
accounts/external/backend.go
vendored
28
accounts/external/backend.go
vendored
@@ -27,7 +27,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/signer/core"
|
"github.com/ethereum/go-ethereum/signer/core"
|
||||||
@@ -131,6 +130,12 @@ func (api *ExternalSigner) Accounts() []accounts.Account {
|
|||||||
func (api *ExternalSigner) Contains(account accounts.Account) bool {
|
func (api *ExternalSigner) Contains(account accounts.Account) bool {
|
||||||
api.cacheMu.RLock()
|
api.cacheMu.RLock()
|
||||||
defer api.cacheMu.RUnlock()
|
defer api.cacheMu.RUnlock()
|
||||||
|
if api.cache == nil {
|
||||||
|
// If we haven't already fetched the accounts, it's time to do so now
|
||||||
|
api.cacheMu.RUnlock()
|
||||||
|
api.Accounts()
|
||||||
|
api.cacheMu.RLock()
|
||||||
|
}
|
||||||
for _, a := range api.cache {
|
for _, a := range api.cache {
|
||||||
if a.Address == account.Address && (account.URL == (accounts.URL{}) || account.URL == api.URL()) {
|
if a.Address == account.Address && (account.URL == (accounts.URL{}) || account.URL == api.URL()) {
|
||||||
return true
|
return true
|
||||||
@@ -161,7 +166,7 @@ func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, d
|
|||||||
hexutil.Encode(data)); err != nil {
|
hexutil.Encode(data)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// If V is on 27/28-form, convert to to 0/1 for Clique
|
// If V is on 27/28-form, convert to 0/1 for Clique
|
||||||
if mimeType == accounts.MimetypeClique && (res[64] == 27 || res[64] == 28) {
|
if mimeType == accounts.MimetypeClique && (res[64] == 27 || res[64] == 28) {
|
||||||
res[64] -= 27 // Transform V from 27/28 to 0/1 for Clique use
|
res[64] -= 27 // Transform V from 27/28 to 0/1 for Clique use
|
||||||
}
|
}
|
||||||
@@ -169,19 +174,29 @@ func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, d
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (api *ExternalSigner) SignText(account accounts.Account, text []byte) ([]byte, error) {
|
func (api *ExternalSigner) SignText(account accounts.Account, text []byte) ([]byte, error) {
|
||||||
var res hexutil.Bytes
|
var signature hexutil.Bytes
|
||||||
var signAddress = common.NewMixedcaseAddress(account.Address)
|
var signAddress = common.NewMixedcaseAddress(account.Address)
|
||||||
if err := api.client.Call(&res, "account_signData",
|
if err := api.client.Call(&signature, "account_signData",
|
||||||
accounts.MimetypeTextPlain,
|
accounts.MimetypeTextPlain,
|
||||||
&signAddress, // Need to use the pointer here, because of how MarshalJSON is defined
|
&signAddress, // Need to use the pointer here, because of how MarshalJSON is defined
|
||||||
hexutil.Encode(text)); err != nil {
|
hexutil.Encode(text)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return res, nil
|
if signature[64] == 27 || signature[64] == 28 {
|
||||||
|
// If clef is used as a backend, it may already have transformed
|
||||||
|
// the signature to ethereum-type signature.
|
||||||
|
signature[64] -= 27 // Transform V from Ethereum-legacy to 0/1
|
||||||
|
}
|
||||||
|
return signature, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// signTransactionResult represents the signinig result returned by clef.
|
||||||
|
type signTransactionResult struct {
|
||||||
|
Raw hexutil.Bytes `json:"raw"`
|
||||||
|
Tx *types.Transaction `json:"tx"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||||
res := ethapi.SignTransactionResult{}
|
|
||||||
data := hexutil.Bytes(tx.Data())
|
data := hexutil.Bytes(tx.Data())
|
||||||
var to *common.MixedcaseAddress
|
var to *common.MixedcaseAddress
|
||||||
if tx.To() != nil {
|
if tx.To() != nil {
|
||||||
@@ -197,6 +212,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
|
|||||||
To: to,
|
To: to,
|
||||||
From: common.NewMixedcaseAddress(account.Address),
|
From: common.NewMixedcaseAddress(account.Address),
|
||||||
}
|
}
|
||||||
|
var res signTransactionResult
|
||||||
if err := api.client.Call(&res, "account_signTransaction", args); err != nil {
|
if err := api.client.Call(&res, "account_signTransaction", args); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ func TestHDPathParsing(t *testing.T) {
|
|||||||
// Weird inputs just to ensure they work
|
// Weird inputs just to ensure they work
|
||||||
{" m / 44 '\n/\n 60 \n\n\t' /\n0 ' /\t\t 0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
{" m / 44 '\n/\n 60 \n\n\t' /\n0 ' /\t\t 0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||||
|
|
||||||
// Invaid derivation paths
|
// Invalid derivation paths
|
||||||
{"", nil}, // Empty relative derivation path
|
{"", nil}, // Empty relative derivation path
|
||||||
{"m", nil}, // Empty absolute derivation path
|
{"m", nil}, // Empty absolute derivation path
|
||||||
{"m/", nil}, // Missing last derivation component
|
{"m/", nil}, // Missing last derivation component
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ import (
|
|||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
crand "crypto/rand"
|
crand "crypto/rand"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -44,6 +43,10 @@ var (
|
|||||||
ErrLocked = accounts.NewAuthNeededError("password or unlock")
|
ErrLocked = accounts.NewAuthNeededError("password or unlock")
|
||||||
ErrNoMatch = errors.New("no key for given address or file")
|
ErrNoMatch = errors.New("no key for given address or file")
|
||||||
ErrDecrypt = errors.New("could not decrypt key with given password")
|
ErrDecrypt = errors.New("could not decrypt key with given password")
|
||||||
|
|
||||||
|
// ErrAccountAlreadyExists is returned if an account attempted to import is
|
||||||
|
// already present in the keystore.
|
||||||
|
ErrAccountAlreadyExists = errors.New("account already exists")
|
||||||
)
|
)
|
||||||
|
|
||||||
// KeyStoreType is the reflect type of a keystore backend.
|
// KeyStoreType is the reflect type of a keystore backend.
|
||||||
@@ -67,7 +70,8 @@ type KeyStore struct {
|
|||||||
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
|
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
|
||||||
updating bool // Whether the event notification loop is running
|
updating bool // Whether the event notification loop is running
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
importMu sync.Mutex // Import Mutex locks the import to prevent two insertions from racing
|
||||||
}
|
}
|
||||||
|
|
||||||
type unlocked struct {
|
type unlocked struct {
|
||||||
@@ -443,14 +447,27 @@ func (ks *KeyStore) Import(keyJSON []byte, passphrase, newPassphrase string) (ac
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return accounts.Account{}, err
|
return accounts.Account{}, err
|
||||||
}
|
}
|
||||||
|
ks.importMu.Lock()
|
||||||
|
defer ks.importMu.Unlock()
|
||||||
|
|
||||||
|
if ks.cache.hasAddress(key.Address) {
|
||||||
|
return accounts.Account{
|
||||||
|
Address: key.Address,
|
||||||
|
}, ErrAccountAlreadyExists
|
||||||
|
}
|
||||||
return ks.importKey(key, newPassphrase)
|
return ks.importKey(key, newPassphrase)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase.
|
// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase.
|
||||||
func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (accounts.Account, error) {
|
func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (accounts.Account, error) {
|
||||||
|
ks.importMu.Lock()
|
||||||
|
defer ks.importMu.Unlock()
|
||||||
|
|
||||||
key := newKeyFromECDSA(priv)
|
key := newKeyFromECDSA(priv)
|
||||||
if ks.cache.hasAddress(key.Address) {
|
if ks.cache.hasAddress(key.Address) {
|
||||||
return accounts.Account{}, fmt.Errorf("account already exists")
|
return accounts.Account{
|
||||||
|
Address: key.Address,
|
||||||
|
}, ErrAccountAlreadyExists
|
||||||
}
|
}
|
||||||
return ks.importKey(key, passphrase)
|
return ks.importKey(key, passphrase)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,11 +23,14 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -338,6 +341,88 @@ func TestWalletNotifications(t *testing.T) {
|
|||||||
checkEvents(t, wantEvents, events)
|
checkEvents(t, wantEvents, events)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestImportExport tests the import functionality of a keystore.
|
||||||
|
func TestImportECDSA(t *testing.T) {
|
||||||
|
dir, ks := tmpKeyStore(t, true)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
key, err := crypto.GenerateKey()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to generate key: %v", key)
|
||||||
|
}
|
||||||
|
if _, err = ks.ImportECDSA(key, "old"); err != nil {
|
||||||
|
t.Errorf("importing failed: %v", err)
|
||||||
|
}
|
||||||
|
if _, err = ks.ImportECDSA(key, "old"); err == nil {
|
||||||
|
t.Errorf("importing same key twice succeeded")
|
||||||
|
}
|
||||||
|
if _, err = ks.ImportECDSA(key, "new"); err == nil {
|
||||||
|
t.Errorf("importing same key twice succeeded")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestImportECDSA tests the import and export functionality of a keystore.
|
||||||
|
func TestImportExport(t *testing.T) {
|
||||||
|
dir, ks := tmpKeyStore(t, true)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
acc, err := ks.NewAccount("old")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create account: %v", acc)
|
||||||
|
}
|
||||||
|
json, err := ks.Export(acc, "old", "new")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to export account: %v", acc)
|
||||||
|
}
|
||||||
|
dir2, ks2 := tmpKeyStore(t, true)
|
||||||
|
defer os.RemoveAll(dir2)
|
||||||
|
if _, err = ks2.Import(json, "old", "old"); err == nil {
|
||||||
|
t.Errorf("importing with invalid password succeeded")
|
||||||
|
}
|
||||||
|
acc2, err := ks2.Import(json, "new", "new")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("importing failed: %v", err)
|
||||||
|
}
|
||||||
|
if acc.Address != acc2.Address {
|
||||||
|
t.Error("imported account does not match exported account")
|
||||||
|
}
|
||||||
|
if _, err = ks2.Import(json, "new", "new"); err == nil {
|
||||||
|
t.Errorf("importing a key twice succeeded")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestImportRace tests the keystore on races.
|
||||||
|
// This test should fail under -race if importing races.
|
||||||
|
func TestImportRace(t *testing.T) {
|
||||||
|
dir, ks := tmpKeyStore(t, true)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
acc, err := ks.NewAccount("old")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create account: %v", acc)
|
||||||
|
}
|
||||||
|
json, err := ks.Export(acc, "old", "new")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to export account: %v", acc)
|
||||||
|
}
|
||||||
|
dir2, ks2 := tmpKeyStore(t, true)
|
||||||
|
defer os.RemoveAll(dir2)
|
||||||
|
var atom uint32
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if _, err := ks2.Import(json, "new", "new"); err != nil {
|
||||||
|
atomic.AddUint32(&atom, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
if atom != 1 {
|
||||||
|
t.Errorf("Import is racy")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// checkAccounts checks that all known live accounts are present in the wallet list.
|
// checkAccounts checks that all known live accounts are present in the wallet list.
|
||||||
func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) {
|
func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) {
|
||||||
if len(live) != len(wallets) {
|
if len(live) != len(wallets) {
|
||||||
|
|||||||
@@ -141,6 +141,11 @@ func (am *Manager) Wallets() []Wallet {
|
|||||||
am.lock.RLock()
|
am.lock.RLock()
|
||||||
defer am.lock.RUnlock()
|
defer am.lock.RUnlock()
|
||||||
|
|
||||||
|
return am.walletsNoLock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// walletsNoLock returns all registered wallets. Callers must hold am.lock.
|
||||||
|
func (am *Manager) walletsNoLock() []Wallet {
|
||||||
cpy := make([]Wallet, len(am.wallets))
|
cpy := make([]Wallet, len(am.wallets))
|
||||||
copy(cpy, am.wallets)
|
copy(cpy, am.wallets)
|
||||||
return cpy
|
return cpy
|
||||||
@@ -155,7 +160,7 @@ func (am *Manager) Wallet(url string) (Wallet, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, wallet := range am.Wallets() {
|
for _, wallet := range am.walletsNoLock() {
|
||||||
if wallet.URL() == parsed {
|
if wallet.URL() == parsed {
|
||||||
return wallet, nil
|
return wallet, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -220,7 +220,7 @@ func (hub *Hub) refreshWallets() {
|
|||||||
// Mark the reader as present
|
// Mark the reader as present
|
||||||
seen[reader] = struct{}{}
|
seen[reader] = struct{}{}
|
||||||
|
|
||||||
// If we alreay know about this card, skip to the next reader, otherwise clean up
|
// If we already know about this card, skip to the next reader, otherwise clean up
|
||||||
if wallet, ok := hub.wallets[reader]; ok {
|
if wallet, ok := hub.wallets[reader]; ok {
|
||||||
if err := wallet.ping(); err == nil {
|
if err := wallet.ping(); err == nil {
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSes
|
|||||||
|
|
||||||
cardPublic, ok := gen.Unmarshal(keyData)
|
cardPublic, ok := gen.Unmarshal(keyData)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Could not unmarshal public key from card")
|
return nil, fmt.Errorf("could not unmarshal public key from card")
|
||||||
}
|
}
|
||||||
|
|
||||||
secret, err := gen.GenerateSharedSecret(private, cardPublic)
|
secret, err := gen.GenerateSharedSecret(private, cardPublic)
|
||||||
@@ -109,7 +109,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
|
|||||||
cardChallenge := response.Data[32:64]
|
cardChallenge := response.Data[32:64]
|
||||||
|
|
||||||
if !bytes.Equal(expectedCryptogram, cardCryptogram) {
|
if !bytes.Equal(expectedCryptogram, cardCryptogram) {
|
||||||
return fmt.Errorf("Invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
|
return fmt.Errorf("invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
|
||||||
}
|
}
|
||||||
|
|
||||||
md.Reset()
|
md.Reset()
|
||||||
@@ -132,7 +132,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
|
|||||||
// Unpair disestablishes an existing pairing.
|
// Unpair disestablishes an existing pairing.
|
||||||
func (s *SecureChannelSession) Unpair() error {
|
func (s *SecureChannelSession) Unpair() error {
|
||||||
if s.PairingKey == nil {
|
if s.PairingKey == nil {
|
||||||
return fmt.Errorf("Cannot unpair: not paired")
|
return fmt.Errorf("cannot unpair: not paired")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
|
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
|
||||||
@@ -148,7 +148,7 @@ func (s *SecureChannelSession) Unpair() error {
|
|||||||
// Open initializes the secure channel.
|
// Open initializes the secure channel.
|
||||||
func (s *SecureChannelSession) Open() error {
|
func (s *SecureChannelSession) Open() error {
|
||||||
if s.iv != nil {
|
if s.iv != nil {
|
||||||
return fmt.Errorf("Session already opened")
|
return fmt.Errorf("session already opened")
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := s.open()
|
response, err := s.open()
|
||||||
@@ -185,11 +185,11 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
|
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
|
||||||
return fmt.Errorf("Got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(response.Data) != scSecretLength {
|
if len(response.Data) != scSecretLength {
|
||||||
return fmt.Errorf("Response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
|
return fmt.Errorf("response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -222,7 +222,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
|
|||||||
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
|
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
|
||||||
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
|
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
|
||||||
if s.iv == nil {
|
if s.iv == nil {
|
||||||
return nil, fmt.Errorf("Channel not open")
|
return nil, fmt.Errorf("channel not open")
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := s.encryptAPDU(data)
|
data, err := s.encryptAPDU(data)
|
||||||
@@ -261,14 +261,14 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !bytes.Equal(s.iv, rmac) {
|
if !bytes.Equal(s.iv, rmac) {
|
||||||
return nil, fmt.Errorf("Invalid MAC in response")
|
return nil, fmt.Errorf("invalid MAC in response")
|
||||||
}
|
}
|
||||||
|
|
||||||
rapdu := &responseAPDU{}
|
rapdu := &responseAPDU{}
|
||||||
rapdu.deserialize(plainData)
|
rapdu.deserialize(plainData)
|
||||||
|
|
||||||
if rapdu.Sw1 != sw1Ok {
|
if rapdu.Sw1 != sw1Ok {
|
||||||
return nil, fmt.Errorf("Unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rapdu, nil
|
return rapdu, nil
|
||||||
@@ -277,7 +277,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
|||||||
// encryptAPDU is an internal method that serializes and encrypts an APDU.
|
// encryptAPDU is an internal method that serializes and encrypts an APDU.
|
||||||
func (s *SecureChannelSession) encryptAPDU(data []byte) ([]byte, error) {
|
func (s *SecureChannelSession) encryptAPDU(data []byte) ([]byte, error) {
|
||||||
if len(data) > maxPayloadSize {
|
if len(data) > maxPayloadSize {
|
||||||
return nil, fmt.Errorf("Payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
|
return nil, fmt.Errorf("payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
|
||||||
}
|
}
|
||||||
data = pad(data, 0x80)
|
data = pad(data, 0x80)
|
||||||
|
|
||||||
@@ -323,10 +323,10 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
|
|||||||
case terminator:
|
case terminator:
|
||||||
return data[:len(data)-i], nil
|
return data[:len(data)-i], nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Expected end of padding, got %d", data[len(data)-i])
|
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("Expected end of padding, got 0")
|
return nil, fmt.Errorf("expected end of padding, got 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateIV is an internal method that updates the initialization vector after
|
// updateIV is an internal method that updates the initialization vector after
|
||||||
|
|||||||
@@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if response.Sw1 != sw1Ok {
|
if response.Sw1 != sw1Ok {
|
||||||
return nil, fmt.Errorf("Unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
@@ -252,7 +252,7 @@ func (w *Wallet) release() error {
|
|||||||
// with the wallet.
|
// with the wallet.
|
||||||
func (w *Wallet) pair(puk []byte) error {
|
func (w *Wallet) pair(puk []byte) error {
|
||||||
if w.session.paired() {
|
if w.session.paired() {
|
||||||
return fmt.Errorf("Wallet already paired")
|
return fmt.Errorf("wallet already paired")
|
||||||
}
|
}
|
||||||
pairing, err := w.session.pair(puk)
|
pairing, err := w.session.pair(puk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -312,15 +312,15 @@ func (w *Wallet) Status() (string, error) {
|
|||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case !w.session.verified && status.PinRetryCount == 0 && status.PukRetryCount == 0:
|
case !w.session.verified && status.PinRetryCount == 0 && status.PukRetryCount == 0:
|
||||||
return fmt.Sprintf("Bricked, waiting for full wipe"), nil
|
return "Bricked, waiting for full wipe", nil
|
||||||
case !w.session.verified && status.PinRetryCount == 0:
|
case !w.session.verified && status.PinRetryCount == 0:
|
||||||
return fmt.Sprintf("Blocked, waiting for PUK (%d attempts left) and new PIN", status.PukRetryCount), nil
|
return fmt.Sprintf("Blocked, waiting for PUK (%d attempts left) and new PIN", status.PukRetryCount), nil
|
||||||
case !w.session.verified:
|
case !w.session.verified:
|
||||||
return fmt.Sprintf("Locked, waiting for PIN (%d attempts left)", status.PinRetryCount), nil
|
return fmt.Sprintf("Locked, waiting for PIN (%d attempts left)", status.PinRetryCount), nil
|
||||||
case !status.Initialized:
|
case !status.Initialized:
|
||||||
return fmt.Sprintf("Empty, waiting for initialization"), nil
|
return "Empty, waiting for initialization", nil
|
||||||
default:
|
default:
|
||||||
return fmt.Sprintf("Online"), nil
|
return "Online", nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -362,7 +362,7 @@ func (w *Wallet) Open(passphrase string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Pairing succeeded, fall through to PIN checks. This will of course fail,
|
// Pairing succeeded, fall through to PIN checks. This will of course fail,
|
||||||
// but we can't return ErrPINNeeded directly here becase we don't know whether
|
// but we can't return ErrPINNeeded directly here because we don't know whether
|
||||||
// a PIN check or a PIN reset is needed.
|
// a PIN check or a PIN reset is needed.
|
||||||
passphrase = ""
|
passphrase = ""
|
||||||
}
|
}
|
||||||
@@ -773,12 +773,12 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
|
|||||||
|
|
||||||
// Look for the path in the URL
|
// Look for the path in the URL
|
||||||
if account.URL.Scheme != w.Hub.scheme {
|
if account.URL.Scheme != w.Hub.scheme {
|
||||||
return nil, fmt.Errorf("Scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := strings.SplitN(account.URL.Path, "/", 2)
|
parts := strings.SplitN(account.URL.Path, "/", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return nil, fmt.Errorf("Invalid URL format: %s", account.URL)
|
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
||||||
@@ -813,7 +813,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
|
|||||||
// unpair deletes an existing pairing.
|
// unpair deletes an existing pairing.
|
||||||
func (s *Session) unpair() error {
|
func (s *Session) unpair() error {
|
||||||
if !s.verified {
|
if !s.verified {
|
||||||
return fmt.Errorf("Unpair requires that the PIN be verified")
|
return fmt.Errorf("unpair requires that the PIN be verified")
|
||||||
}
|
}
|
||||||
return s.Channel.Unpair()
|
return s.Channel.Unpair()
|
||||||
}
|
}
|
||||||
@@ -850,7 +850,7 @@ func (s *Session) paired() bool {
|
|||||||
// authenticate uses an existing pairing to establish a secure channel.
|
// authenticate uses an existing pairing to establish a secure channel.
|
||||||
func (s *Session) authenticate(pairing smartcardPairing) error {
|
func (s *Session) authenticate(pairing smartcardPairing) error {
|
||||||
if !bytes.Equal(s.Wallet.PublicKey, pairing.PublicKey) {
|
if !bytes.Equal(s.Wallet.PublicKey, pairing.PublicKey) {
|
||||||
return fmt.Errorf("Cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
|
return fmt.Errorf("cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
|
||||||
}
|
}
|
||||||
s.Channel.PairingKey = pairing.PairingKey
|
s.Channel.PairingKey = pairing.PairingKey
|
||||||
s.Channel.PairingIndex = pairing.PairingIndex
|
s.Channel.PairingIndex = pairing.PairingIndex
|
||||||
@@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// derivationPath fetches the wallet's current derivation path from the card.
|
// derivationPath fetches the wallet's current derivation path from the card.
|
||||||
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
||||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -993,12 +994,14 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// keyExport contains information on an exported keypair.
|
// keyExport contains information on an exported keypair.
|
||||||
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
type keyExport struct {
|
type keyExport struct {
|
||||||
PublicKey []byte `asn1:"tag:0"`
|
PublicKey []byte `asn1:"tag:0"`
|
||||||
PrivateKey []byte `asn1:"tag:1,optional"`
|
PrivateKey []byte `asn1:"tag:1,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// publicKey returns the public key for the current derivation path.
|
// publicKey returns the public key for the current derivation path.
|
||||||
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
func (s *Session) publicKey() ([]byte, error) {
|
func (s *Session) publicKey() ([]byte, error) {
|
||||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -162,7 +162,8 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio
|
|||||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||||
}
|
}
|
||||||
// Ensure the wallet is capable of signing the given transaction
|
// Ensure the wallet is capable of signing the given transaction
|
||||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 {
|
||||||
|
//lint:ignore ST1005 brand name displayed on the console
|
||||||
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||||
}
|
}
|
||||||
// All infos gathered and metadata checks out, request signing
|
// All infos gathered and metadata checks out, request signing
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ clone_depth: 5
|
|||||||
version: "{branch}.{build}"
|
version: "{branch}.{build}"
|
||||||
environment:
|
environment:
|
||||||
global:
|
global:
|
||||||
|
GO111MODULE: on
|
||||||
GOPATH: C:\gopath
|
GOPATH: C:\gopath
|
||||||
CC: gcc.exe
|
CC: gcc.exe
|
||||||
matrix:
|
matrix:
|
||||||
@@ -23,8 +24,8 @@ environment:
|
|||||||
install:
|
install:
|
||||||
- git submodule update --init
|
- git submodule update --init
|
||||||
- rmdir C:\go /s /q
|
- rmdir C:\go /s /q
|
||||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-%GETH_ARCH%.zip
|
- appveyor DownloadFile https://dl.google.com/go/go1.14.2.windows-%GETH_ARCH%.zip
|
||||||
- 7z x go1.13.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
- 7z x go1.14.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||||
- go version
|
- go version
|
||||||
- gcc --version
|
- gcc --version
|
||||||
|
|
||||||
|
|||||||
@@ -1,19 +1,20 @@
|
|||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
95dbeab442ee2746b9acf0934c8e2fc26414a0565c008631b04addb8c02e7624 go1.13.4.src.tar.gz
|
98de84e69726a66da7b4e58eac41b99cbe274d7e8906eeb8a5b7eb0aadee7f7c go1.14.2.src.tar.gz
|
||||||
|
|
||||||
1fcbc9e36f4319eeed02beb8cfd1b3d425ffc2f90ddf09a80f18d5064c51e0cb golangci-lint-1.21.0-linux-386.tar.gz
|
d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35 golangci-lint-1.27.0-linux-armv7.tar.gz
|
||||||
267b4066e67139a38d29499331a002d6a29ad5be7aafc83db3b1e88f1b027f90 golangci-lint-1.21.0-linux-armv6.tar.gz
|
bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint.exe-1.27.0-windows-amd64.zip
|
||||||
a602c1f25f90e46e621019cff0a8cb3f4e1837011f3537f15e730d6a9ebf507b golangci-lint-1.21.0-freebsd-armv7.tar.gz
|
bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint-1.27.0-windows-amd64.zip
|
||||||
2c861f8dc56b560474aa27cab0c075991628cc01af3451e27ac82f5d10d5106b golangci-lint-1.21.0-linux-amd64.tar.gz
|
0e2a57d6ba709440d3ed018ef1037465fa010ed02595829092860e5cf863042e golangci-lint-1.27.0-freebsd-386.tar.gz
|
||||||
a1c39e055280e755acaa906e7abfc20b99a5c28be8af541c57fbc44abbb20dde golangci-lint-1.21.0-linux-arm64.tar.gz
|
90205fc42ab5ed0096413e790d88ac9b4ed60f4c47e576d13dc0660f7ed4b013 golangci-lint-1.27.0-linux-arm64.tar.gz
|
||||||
a8f8bda8c6a4136acf858091077830b1e83ad5612606cb69d5dced869ce00bd8 golangci-lint-1.21.0-linux-ppc64le.tar.gz
|
8d345e4e88520e21c113d81978e89ad77fc5b13bfdf20e5bca86b83fc4261272 golangci-lint-1.27.0-linux-amd64.tar.gz
|
||||||
0a8a8c3bc660ccbca668897ab520f7ee9878f16cc8e4dd24fe46236ceec97ba3 golangci-lint-1.21.0-freebsd-armv6.tar.gz
|
cc619634a77f18dc73df2a0725be13116d64328dc35131ca1737a850d6f76a59 golangci-lint-1.27.0-freebsd-armv7.tar.gz
|
||||||
699b07f45e216571f54002bcbd83b511c4801464a422162158e299587b095b18 golangci-lint-1.21.0-freebsd-amd64.tar.gz
|
fe683583cfc9eeec83e498c0d6159d87b5e1919dbe4b6c3b3913089642906069 golangci-lint-1.27.0-linux-s390x.tar.gz
|
||||||
980fb4993942154bb5c8129ea3b86de09574fe81b24384ebb58cd7a9d2f04483 golangci-lint-1.21.0-linux-armv7.tar.gz
|
058f5579bee75bdaacbaf75b75e1369f7ad877fd8b3b145aed17a17545de913e golangci-lint-1.27.0-freebsd-armv6.tar.gz
|
||||||
f15b689088a47f20d5d3c1d945e9ee7c6238f2b84ea468b5f886cf8713dce62e golangci-lint-1.21.0-windows-386.zip
|
38e1e3dadbe3f56ab62b4de82ee0b88e8fad966d8dfd740a26ef94c2edef9818 golangci-lint-1.27.0-linux-armv6.tar.gz
|
||||||
2e40ded7adcf11e59013cb15c24438b15a86526ca241edfcfdf1abd73a5280a8 golangci-lint-1.21.0-windows-amd64.zip
|
071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095 golangci-lint.exe-1.27.0-windows-386.zip
|
||||||
6052c7cfea4d6dc2fc722f6c12792a5ec087420198db495afffbc22052653bf7 golangci-lint-1.21.0-freebsd-386.tar.gz
|
071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095 golangci-lint-1.27.0-windows-386.zip
|
||||||
ca00b8eacf9af14a71b908b4149606c762aa5c0eac781e74ca0abedfdfdf6c8c golangci-lint-1.21.0-linux-s390x.tar.gz
|
5f37e2b33914ecddb7cad38186ef4ec61d88172fc04f930fa0267c91151ff306 golangci-lint-1.27.0-linux-386.tar.gz
|
||||||
1365455940c342f95718159d89d66ad2eef19f0846c3e87023e915a3527b929f golangci-lint-1.21.0-darwin-386.tar.gz
|
4d94cfb51fdebeb205f1d5a349ac2b683c30591c5150708073c1c329e15965f0 golangci-lint-1.27.0-freebsd-amd64.tar.gz
|
||||||
2b2713ec5007e67883aa501eebb81f22abfab0cf0909134ba90f60a066db3760 golangci-lint-1.21.0-darwin-amd64.tar.gz
|
52572ba8ff07d5169c2365d3de3fec26dc55a97522094d13d1596199580fa281 golangci-lint-1.27.0-linux-ppc64le.tar.gz
|
||||||
|
3fb1a1683a29c6c0a8cd76135f62b606fbdd538d5a7aeab94af1af70ffdc2fd4 golangci-lint-1.27.0-darwin-amd64.tar.gz
|
||||||
|
|||||||
24
build/ci.go
24
build/ci.go
@@ -145,6 +145,7 @@ var (
|
|||||||
"bionic": "golang-go",
|
"bionic": "golang-go",
|
||||||
"disco": "golang-go",
|
"disco": "golang-go",
|
||||||
"eoan": "golang-go",
|
"eoan": "golang-go",
|
||||||
|
"focal": "golang-go",
|
||||||
}
|
}
|
||||||
|
|
||||||
debGoBootPaths = map[string]string{
|
debGoBootPaths = map[string]string{
|
||||||
@@ -214,9 +215,9 @@ func doInstall(cmdline []string) {
|
|||||||
var minor int
|
var minor int
|
||||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||||
|
|
||||||
if minor < 9 {
|
if minor < 11 {
|
||||||
log.Println("You have Go version", runtime.Version())
|
log.Println("You have Go version", runtime.Version())
|
||||||
log.Println("go-ethereum requires at least Go version 1.9 and cannot")
|
log.Println("go-ethereum requires at least Go version 1.11 and cannot")
|
||||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -237,13 +238,6 @@ func doInstall(cmdline []string) {
|
|||||||
build.MustRun(goinstall)
|
build.MustRun(goinstall)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any previous builds
|
|
||||||
if *arch == "arm" {
|
|
||||||
os.RemoveAll(filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_arm"))
|
|
||||||
for _, path := range filepath.SplitList(build.GOPATH()) {
|
|
||||||
os.RemoveAll(filepath.Join(path, "pkg", runtime.GOOS+"_arm"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seems we are cross compiling, work around forbidden GOBIN
|
// Seems we are cross compiling, work around forbidden GOBIN
|
||||||
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
||||||
@@ -294,7 +288,6 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
|
|||||||
|
|
||||||
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
|
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
|
||||||
cmd := build.GoTool(subcmd, args...)
|
cmd := build.GoTool(subcmd, args...)
|
||||||
cmd.Env = []string{"GOPATH=" + build.GOPATH()}
|
|
||||||
if arch == "" || arch == runtime.GOARCH {
|
if arch == "" || arch == runtime.GOARCH {
|
||||||
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
|
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
|
||||||
} else {
|
} else {
|
||||||
@@ -305,7 +298,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
|
|||||||
cmd.Env = append(cmd.Env, "CC="+cc)
|
cmd.Env = append(cmd.Env, "CC="+cc)
|
||||||
}
|
}
|
||||||
for _, e := range os.Environ() {
|
for _, e := range os.Environ() {
|
||||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
if strings.HasPrefix(e, "GOBIN=") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cmd.Env = append(cmd.Env, e)
|
cmd.Env = append(cmd.Env, e)
|
||||||
@@ -332,7 +325,7 @@ func doTest(cmdline []string) {
|
|||||||
// Test a single package at a time. CI builders are slow
|
// Test a single package at a time. CI builders are slow
|
||||||
// and some tests run into timeouts under load.
|
// and some tests run into timeouts under load.
|
||||||
gotest := goTool("test", buildFlags(env)...)
|
gotest := goTool("test", buildFlags(env)...)
|
||||||
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m")
|
gotest.Args = append(gotest.Args, "-p", "1")
|
||||||
if *coverage {
|
if *coverage {
|
||||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||||
}
|
}
|
||||||
@@ -363,7 +356,7 @@ func doLint(cmdline []string) {
|
|||||||
|
|
||||||
// downloadLinter downloads and unpacks golangci-lint.
|
// downloadLinter downloads and unpacks golangci-lint.
|
||||||
func downloadLinter(cachedir string) string {
|
func downloadLinter(cachedir string) string {
|
||||||
const version = "1.21.0"
|
const version = "1.27.0"
|
||||||
|
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
||||||
@@ -888,7 +881,6 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd {
|
|||||||
cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
|
cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
|
||||||
cmd.Args = append(cmd.Args, args...)
|
cmd.Args = append(cmd.Args, args...)
|
||||||
cmd.Env = []string{
|
cmd.Env = []string{
|
||||||
"GOPATH=" + build.GOPATH(),
|
|
||||||
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
|
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
|
||||||
}
|
}
|
||||||
for _, e := range os.Environ() {
|
for _, e := range os.Environ() {
|
||||||
@@ -1078,7 +1070,6 @@ func xgoTool(args []string) *exec.Cmd {
|
|||||||
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
|
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
|
||||||
cmd.Env = os.Environ()
|
cmd.Env = os.Environ()
|
||||||
cmd.Env = append(cmd.Env, []string{
|
cmd.Env = append(cmd.Env, []string{
|
||||||
"GOPATH=" + build.GOPATH(),
|
|
||||||
"GOBIN=" + GOBIN,
|
"GOBIN=" + GOBIN,
|
||||||
}...)
|
}...)
|
||||||
return cmd
|
return cmd
|
||||||
@@ -1107,6 +1098,8 @@ func doPurge(cmdline []string) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
fmt.Printf("Found %d blobs\n", len(blobs))
|
||||||
|
|
||||||
// Iterate over the blobs, collect and sort all unstable builds
|
// Iterate over the blobs, collect and sort all unstable builds
|
||||||
for i := 0; i < len(blobs); i++ {
|
for i := 0; i < len(blobs); i++ {
|
||||||
if !strings.Contains(blobs[i].Name, "unstable") {
|
if !strings.Contains(blobs[i].Name, "unstable") {
|
||||||
@@ -1128,6 +1121,7 @@ func doPurge(cmdline []string) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fmt.Printf("Deleting %d blobs\n", len(blobs))
|
||||||
// Delete all marked as such and return
|
// Delete all marked as such and return
|
||||||
if err := build.AzureBlobstoreDelete(auth, blobs); err != nil {
|
if err := build.AzureBlobstoreDelete(auth, blobs); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
# Launchpad rejects Go's access to $HOME, use custom folders
|
# Launchpad rejects Go's access to $HOME, use custom folders
|
||||||
export GOCACHE=/tmp/go-build
|
export GOCACHE=/tmp/go-build
|
||||||
|
export GOPATH=/tmp/gopath
|
||||||
export GOROOT_BOOTSTRAP={{.GoBootPath}}
|
export GOROOT_BOOTSTRAP={{.GoBootPath}}
|
||||||
|
|
||||||
override_dh_auto_clean:
|
override_dh_auto_clean:
|
||||||
@@ -19,10 +20,11 @@ override_dh_auto_build:
|
|||||||
|
|
||||||
# We can't download external go modules within Launchpad, so we're shipping the
|
# We can't download external go modules within Launchpad, so we're shipping the
|
||||||
# entire dependency source cache with go-ethereum.
|
# entire dependency source cache with go-ethereum.
|
||||||
(mkdir -p build/_workspace/pkg/mod && mv .mod/* build/_workspace/pkg/mod)
|
mkdir -p $(GOPATH)/pkg
|
||||||
|
mv .mod $(GOPATH)/pkg/mod
|
||||||
|
|
||||||
# A fresh Go was built, all dependency downloads faked, hope build works now
|
# A fresh Go was built, all dependency downloads faked, hope build works now
|
||||||
build/env.sh ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||||
|
|
||||||
override_dh_auto_test:
|
override_dh_auto_test:
|
||||||
|
|
||||||
|
|||||||
30
build/env.sh
30
build/env.sh
@@ -1,30 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ ! -f "build/env.sh" ]; then
|
|
||||||
echo "$0 must be run from the root of the repository."
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create fake Go workspace if it doesn't exist yet.
|
|
||||||
workspace="$PWD/build/_workspace"
|
|
||||||
root="$PWD"
|
|
||||||
ethdir="$workspace/src/github.com/ethereum"
|
|
||||||
if [ ! -L "$ethdir/go-ethereum" ]; then
|
|
||||||
mkdir -p "$ethdir"
|
|
||||||
cd "$ethdir"
|
|
||||||
ln -s ../../../../../. go-ethereum
|
|
||||||
cd "$root"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set up the environment to use the workspace.
|
|
||||||
GOPATH="$workspace"
|
|
||||||
export GOPATH
|
|
||||||
|
|
||||||
# Run the command inside the workspace.
|
|
||||||
cd "$ethdir/go-ethereum"
|
|
||||||
PWD="$ethdir/go-ethereum"
|
|
||||||
|
|
||||||
# Launch the arguments with the configured environment.
|
|
||||||
exec "$@"
|
|
||||||
@@ -19,9 +19,9 @@ Section "Geth" GETH_IDX
|
|||||||
|
|
||||||
# Create start menu launcher
|
# Create start menu launcher
|
||||||
createDirectory "$SMPROGRAMS\${APPNAME}"
|
createDirectory "$SMPROGRAMS\${APPNAME}"
|
||||||
createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe" "--fast" "--cache=512"
|
createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe"
|
||||||
createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach" "" ""
|
createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach"
|
||||||
createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe" "" "" ""
|
createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe"
|
||||||
|
|
||||||
# Firewall - remove rules (if exists)
|
# Firewall - remove rules (if exists)
|
||||||
SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)"
|
SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)"
|
||||||
|
|||||||
74
cmd/abidump/main.go
Normal file
74
cmd/abidump/main.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/signer/core"
|
||||||
|
"github.com/ethereum/go-ethereum/signer/fourbyte"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.Usage = func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "<hexdata>")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
fmt.Fprintln(os.Stderr, `
|
||||||
|
Parses the given ABI data and tries to interpret it from the fourbyte database.`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parse(data []byte) {
|
||||||
|
db, err := fourbyte.New()
|
||||||
|
if err != nil {
|
||||||
|
die(err)
|
||||||
|
}
|
||||||
|
messages := core.ValidationMessages{}
|
||||||
|
db.ValidateCallData(nil, data, &messages)
|
||||||
|
for _, m := range messages.Messages {
|
||||||
|
fmt.Printf("%v: %v\n", m.Typ, m.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Example
|
||||||
|
// ./abidump a9059cbb000000000000000000000000ea0e2dc7d65a50e77fc7e84bff3fd2a9e781ff5c0000000000000000000000000000000000000000000000015af1d78b58c40000
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case flag.NArg() == 1:
|
||||||
|
hexdata := flag.Arg(0)
|
||||||
|
data, err := hex.DecodeString(strings.TrimPrefix(hexdata, "0x"))
|
||||||
|
if err != nil {
|
||||||
|
die(err)
|
||||||
|
}
|
||||||
|
parse(data)
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "Error: one argument needed")
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func die(args ...interface{}) {
|
||||||
|
fmt.Fprintln(os.Stderr, args...)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
@@ -21,30 +21,20 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common/compiler"
|
"github.com/ethereum/go-ethereum/common/compiler"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
|
||||||
{{if .Description}}{{.Description}}
|
|
||||||
{{end}}{{if .Subcommands}}
|
|
||||||
SUBCOMMANDS:
|
|
||||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
|
||||||
{{end}}{{end}}{{if .Flags}}
|
|
||||||
OPTIONS:
|
|
||||||
{{range $.Flags}}{{"\t"}}{{.}}
|
|
||||||
{{end}}
|
|
||||||
{{end}}`
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Git SHA1 commit hash of the release (set via linker flags)
|
// Git SHA1 commit hash of the release (set via linker flags)
|
||||||
gitCommit = ""
|
gitCommit = ""
|
||||||
@@ -111,7 +101,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app = utils.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
|
app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
|
||||||
app.Flags = []cli.Flag{
|
app.Flags = []cli.Flag{
|
||||||
abiFlag,
|
abiFlag,
|
||||||
binFlag,
|
binFlag,
|
||||||
@@ -128,7 +118,7 @@ func init() {
|
|||||||
aliasFlag,
|
aliasFlag,
|
||||||
}
|
}
|
||||||
app.Action = utils.MigrateFlags(abigen)
|
app.Action = utils.MigrateFlags(abigen)
|
||||||
cli.CommandHelpTemplate = commandHelperTemplate
|
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
|
||||||
}
|
}
|
||||||
|
|
||||||
func abigen(c *cli.Context) error {
|
func abigen(c *cli.Context) error {
|
||||||
@@ -206,10 +196,22 @@ func abigen(c *cli.Context) error {
|
|||||||
utils.Fatalf("Failed to build Solidity contract: %v", err)
|
utils.Fatalf("Failed to build Solidity contract: %v", err)
|
||||||
}
|
}
|
||||||
case c.GlobalIsSet(vyFlag.Name):
|
case c.GlobalIsSet(vyFlag.Name):
|
||||||
contracts, err = compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
|
output, err := compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to build Vyper contract: %v", err)
|
utils.Fatalf("Failed to build Vyper contract: %v", err)
|
||||||
}
|
}
|
||||||
|
contracts = make(map[string]*compiler.Contract)
|
||||||
|
for n, contract := range output {
|
||||||
|
name := n
|
||||||
|
// Sanitize the combined json names to match the
|
||||||
|
// format expected by solidity.
|
||||||
|
if !strings.Contains(n, ":") {
|
||||||
|
// Remove extra path components
|
||||||
|
name = abi.ToCamelCase(strings.TrimSuffix(filepath.Base(name), ".vy"))
|
||||||
|
}
|
||||||
|
contracts[name] = contract
|
||||||
|
}
|
||||||
|
|
||||||
case c.GlobalIsSet(jsonFlag.Name):
|
case c.GlobalIsSet(jsonFlag.Name):
|
||||||
jsonOutput, err := ioutil.ReadFile(c.GlobalString(jsonFlag.Name))
|
jsonOutput, err := ioutil.ReadFile(c.GlobalString(jsonFlag.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
103
cmd/checkpoint-admin/README.md
Normal file
103
cmd/checkpoint-admin/README.md
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
## Checkpoint-admin
|
||||||
|
|
||||||
|
Checkpoint-admin is a tool for updating checkpoint oracle status. It provides a series of functions including deploying checkpoint oracle contract, signing for new checkpoints, and updating checkpoints in the checkpoint oracle contract.
|
||||||
|
|
||||||
|
### Checkpoint
|
||||||
|
|
||||||
|
In the LES protocol, there is an important concept called checkpoint. In simple terms, whenever a certain number of blocks are generated on the blockchain, a new checkpoint is generated which contains some important information such as
|
||||||
|
|
||||||
|
* Block hash at checkpoint
|
||||||
|
* Canonical hash trie root at checkpoint
|
||||||
|
* Bloom trie root at checkpoint
|
||||||
|
|
||||||
|
*For a more detailed introduction to checkpoint, please see the LES [spec](https://github.com/ethereum/devp2p/blob/master/caps/les.md).*
|
||||||
|
|
||||||
|
Using this information, light clients can skip all historical block headers when synchronizing data and start synchronization from this checkpoint. Therefore, as long as the light client can obtain some latest and correct checkpoints, the amount of data and time for synchronization will be greatly reduced.
|
||||||
|
|
||||||
|
However, from a security perspective, the most critical step in a synchronization algorithm based on checkpoints is to determine whether the checkpoint used by the light client is correct. Otherwise, all blockchain data synchronized based on this checkpoint may be wrong. For this we provide two different ways to ensure the correctness of the checkpoint used by the light client.
|
||||||
|
|
||||||
|
#### Hardcoded checkpoint
|
||||||
|
|
||||||
|
There are several hardcoded checkpoints in the [source code](https://github.com/ethereum/go-ethereum/blob/master/params/config.go#L38) of the go-ethereum project. These checkpoints are updated by go-ethereum developers when new versions of software are released. Because light client users trust Geth developers to some extent, hardcoded checkpoints in the code can also be considered correct.
|
||||||
|
|
||||||
|
#### Checkpoint oracle
|
||||||
|
|
||||||
|
Hardcoded checkpoints can solve the problem of verifying the correctness of checkpoints (although this is a more centralized solution). But the pain point of this solution is that developers can only update checkpoints when a new version of software is released. In addition, light client users usually do not keep the Geth version they use always up to date. So hardcoded checkpoints used by users are generally stale. Therefore, it still needs to download a large amount of blockchain data during synchronization.
|
||||||
|
|
||||||
|
Checkpoint oracle is a more flexible solution. In simple terms, this is a smart contract that is deployed on the blockchain. The smart contract records several designated trusted signers. Whenever enough trusted signers have issued their signatures for the same checkpoint, it can be considered that the checkpoint has been authenticated by the signers. Checkpoints authenticated by trusted signers can be considered correct.
|
||||||
|
|
||||||
|
So this way, even without updating the software version, as long as the trusted signers regularly update the checkpoint in oracle on time, the light client can always use the latest and verified checkpoint for data synchronization.
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
Checkpoint-admin is a command line tool designed for checkpoint oracle. Users can easily deploy contracts and update checkpoints through this tool.
|
||||||
|
|
||||||
|
#### Install
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go get github.com/ethereum/go-ethereum/cmd/checkpoint-admin
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Deploy
|
||||||
|
|
||||||
|
Deploy checkpoint oracle contract. `--signers` indicates the specified trusted signer, and `--threshold` indicates the minimum number of signatures required by trusted signers to update a checkpoint.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
checkpoint-admin deploy --rpc <NODE_RPC_ENDPOINT> --clef <CLEF_ENDPOINT> --signer <SIGNER_TO_SIGN_TX> --signers <TRUSTED_SIGNER_LIST> --threshold 1
|
||||||
|
```
|
||||||
|
|
||||||
|
It is worth noting that checkpoint-admin only supports clef as a signer for transactions and plain text(checkpoint). For more clef usage, please see the clef [tutorial](https://geth.ethereum.org/docs/clef/tutorial) .
|
||||||
|
|
||||||
|
#### Sign
|
||||||
|
|
||||||
|
Checkpoint-admin provides two different modes of signing. You can automatically obtain the current stable checkpoint and sign it interactively, and you can also use the information provided by the command line flags to sign checkpoint offline.
|
||||||
|
|
||||||
|
**Interactive mode**
|
||||||
|
|
||||||
|
```shell
|
||||||
|
checkpoint-admin sign --clef <CLEF_ENDPOINT> --signer <SIGNER_TO_SIGN_CHECKPOINT> --rpc <NODE_RPC_ENDPOINT>
|
||||||
|
```
|
||||||
|
|
||||||
|
*It is worth noting that the connected Geth node can be a fullnode or a light client. If it is fullnode, you must enable the LES protocol. E.G. add `--light.serv 50` to the startup command line flags*.
|
||||||
|
|
||||||
|
**Offline mode**
|
||||||
|
|
||||||
|
```shell
|
||||||
|
checkpoint-admin sign --clef <CLEF_ENDPOINT> --signer <SIGNER_TO_SIGN_CHECKPOINT> --index <CHECKPOINT_INDEX> --hash <CHECKPOINT_HASH> --oracle <CHECKPOINT_ORACLE_ADDRESS>
|
||||||
|
```
|
||||||
|
|
||||||
|
*CHECKPOINT_HASH is obtained based on this [calculation method](https://github.com/ethereum/go-ethereum/blob/master/params/config.go#L251).*
|
||||||
|
|
||||||
|
#### Publish
|
||||||
|
|
||||||
|
Collect enough signatures from different trusted signers for the same checkpoint and submit them to oracle to update the "authenticated" checkpoint in the contract.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
checkpoint-admin publish --clef <CLEF_ENDPOINT> --rpc <NODE_RPC_ENDPOINT> --signer <SIGNER_TO_SIGN_TX> --index <CHECKPOINT_INDEX> --signatures <CHECKPOINT_SIGNATURE_LIST>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Status query
|
||||||
|
|
||||||
|
Check the latest status of checkpoint oracle.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
checkpoint-admin status --rpc <NODE_RPC_ENDPOINT>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Enable checkpoint oracle in your private network
|
||||||
|
|
||||||
|
Currently, only the Ethereum mainnet and the default supported test networks (ropsten, rinkeby, goerli) activate this feature. If you want to activate this feature in your private network, you can overwrite the relevant checkpoint oracle settings through the configuration file after deploying the oracle contract.
|
||||||
|
|
||||||
|
* Get your node configuration file `geth dumpconfig OTHER_COMMAND_LINE_OPTIONS > config.toml`
|
||||||
|
* Edit the configuration file and add the following information
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[Eth.CheckpointOracle]
|
||||||
|
Address = CHECKPOINT_ORACLE_ADDRESS
|
||||||
|
Signers = [TRUSTED_SIGNER_1, ..., TRUSTED_SIGNER_N]
|
||||||
|
Threshold = THRESHOLD
|
||||||
|
```
|
||||||
|
|
||||||
|
* Start geth with the modified configuration file
|
||||||
|
|
||||||
|
*In the private network, all fullnodes and light clients need to be started using the same checkpoint oracle settings.*
|
||||||
@@ -22,25 +22,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
||||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
|
||||||
{{if .Description}}{{.Description}}
|
|
||||||
{{end}}{{if .Subcommands}}
|
|
||||||
SUBCOMMANDS:
|
|
||||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
|
||||||
{{end}}{{end}}{{if .Flags}}
|
|
||||||
OPTIONS:
|
|
||||||
{{range $.Flags}}{{"\t"}}{{.}}
|
|
||||||
{{end}}
|
|
||||||
{{end}}`
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Git SHA1 commit hash of the release (set via linker flags)
|
// Git SHA1 commit hash of the release (set via linker flags)
|
||||||
gitCommit = ""
|
gitCommit = ""
|
||||||
@@ -50,7 +37,7 @@ var (
|
|||||||
var app *cli.App
|
var app *cli.App
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app = utils.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
|
app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
|
||||||
app.Commands = []cli.Command{
|
app.Commands = []cli.Command{
|
||||||
commandStatus,
|
commandStatus,
|
||||||
commandDeploy,
|
commandDeploy,
|
||||||
@@ -61,7 +48,7 @@ func init() {
|
|||||||
oracleFlag,
|
oracleFlag,
|
||||||
nodeURLFlag,
|
nodeURLFlag,
|
||||||
}
|
}
|
||||||
cli.CommandHelpTemplate = commandHelperTemplate
|
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commonly used command line flags.
|
// Commonly used command line flags.
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Clef can run as a daemon on the same machine, off a usb-stick like [USB armory](
|
|||||||
Check out the
|
Check out the
|
||||||
|
|
||||||
* [CLI tutorial](tutorial.md) for some concrete examples on how Clef works.
|
* [CLI tutorial](tutorial.md) for some concrete examples on how Clef works.
|
||||||
* [Setup docs](docs/setup.md) for infos on how to configure Clef on QubesOS or USB Armory.
|
* [Setup docs](docs/setup.md) for information on how to configure Clef on QubesOS or USB Armory.
|
||||||
* [Data types](datatypes.md) for details on the communication messages between Clef and an external UI.
|
* [Data types](datatypes.md) for details on the communication messages between Clef and an external UI.
|
||||||
|
|
||||||
## Command line flags
|
## Command line flags
|
||||||
@@ -33,12 +33,12 @@ GLOBAL OPTIONS:
|
|||||||
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
|
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
|
||||||
--nousb Disables monitoring for and managing USB hardware wallets
|
--nousb Disables monitoring for and managing USB hardware wallets
|
||||||
--pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm")
|
--pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm")
|
||||||
--rpcaddr value HTTP-RPC server listening interface (default: "localhost")
|
--http.addr value HTTP-RPC server listening interface (default: "localhost")
|
||||||
--rpcvhosts value Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: "localhost")
|
--http.vhosts value Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: "localhost")
|
||||||
--ipcdisable Disable the IPC-RPC server
|
--ipcdisable Disable the IPC-RPC server
|
||||||
--ipcpath Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
--ipcpath Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
||||||
--rpc Enable the HTTP-RPC server
|
--http Enable the HTTP-RPC server
|
||||||
--rpcport value HTTP-RPC server listening port (default: 8550)
|
--http.port value HTTP-RPC server listening port (default: 8550)
|
||||||
--signersecret value A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash
|
--signersecret value A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash
|
||||||
--4bytedb-custom value File used for writing new 4byte-identifiers submitted via API (default: "./4byte-custom.json")
|
--4bytedb-custom value File used for writing new 4byte-identifiers submitted via API (default: "./4byte-custom.json")
|
||||||
--auditlog value File used to emit audit logs. Set to "" to disable (default: "audit.log")
|
--auditlog value File used to emit audit logs. Set to "" to disable (default: "audit.log")
|
||||||
@@ -46,6 +46,7 @@ GLOBAL OPTIONS:
|
|||||||
--stdio-ui Use STDIN/STDOUT as a channel for an external UI. This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user interface, and can be used when Clef is started by an external process.
|
--stdio-ui Use STDIN/STDOUT as a channel for an external UI. This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user interface, and can be used when Clef is started by an external process.
|
||||||
--stdio-ui-test Mechanism to test interface between Clef and UI. Requires 'stdio-ui'.
|
--stdio-ui-test Mechanism to test interface between Clef and UI. Requires 'stdio-ui'.
|
||||||
--advanced If enabled, issues warnings instead of rejections for suspicious requests. Default off
|
--advanced If enabled, issues warnings instead of rejections for suspicious requests. Default off
|
||||||
|
--suppress-bootwarn If set, does not show the warning during boot
|
||||||
--help, -h show help
|
--help, -h show help
|
||||||
--version, -v print the version
|
--version, -v print the version
|
||||||
```
|
```
|
||||||
@@ -112,11 +113,11 @@ Some snags and todos
|
|||||||
|
|
||||||
### External API
|
### External API
|
||||||
|
|
||||||
Clef listens to HTTP requests on `rpcaddr`:`rpcport` (or to IPC on `ipcpath`), with the same JSON-RPC standard as Geth. The messages are expected to be [JSON-RPC 2.0 standard](https://www.jsonrpc.org/specification).
|
Clef listens to HTTP requests on `http.addr`:`http.port` (or to IPC on `ipcpath`), with the same JSON-RPC standard as Geth. The messages are expected to be [JSON-RPC 2.0 standard](https://www.jsonrpc.org/specification).
|
||||||
|
|
||||||
Some of these call can require user interaction. Clients must be aware that responses may be delayed significantly or may never be received if a users decides to ignore the confirmation request.
|
Some of these calls can require user interaction. Clients must be aware that responses may be delayed significantly or may never be received if a user decides to ignore the confirmation request.
|
||||||
|
|
||||||
The External API is **untrusted**: it does not accept credentials over this API, nor does it expect that requests have any authority.
|
The External API is **untrusted**: it does not accept credentials, nor does it expect that requests have any authority.
|
||||||
|
|
||||||
### Internal UI API
|
### Internal UI API
|
||||||
|
|
||||||
@@ -145,13 +146,11 @@ See the [external API changelog](extapi_changelog.md) for information about chan
|
|||||||
|
|
||||||
All hex encoded values must be prefixed with `0x`.
|
All hex encoded values must be prefixed with `0x`.
|
||||||
|
|
||||||
## Methods
|
|
||||||
|
|
||||||
### account_new
|
### account_new
|
||||||
|
|
||||||
#### Create new password protected account
|
#### Create new password protected account
|
||||||
|
|
||||||
The signer will generate a new private key, encrypts it according to [web3 keystore spec](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) and stores it in the keystore directory.
|
The signer will generate a new private key, encrypt it according to [web3 keystore spec](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) and store it in the keystore directory.
|
||||||
The client is responsible for creating a backup of the keystore. If the keystore is lost there is no method of retrieving lost accounts.
|
The client is responsible for creating a backup of the keystore. If the keystore is lost there is no method of retrieving lost accounts.
|
||||||
|
|
||||||
#### Arguments
|
#### Arguments
|
||||||
@@ -160,7 +159,6 @@ None
|
|||||||
|
|
||||||
#### Result
|
#### Result
|
||||||
- address [string]: account address that is derived from the generated key
|
- address [string]: account address that is derived from the generated key
|
||||||
- url [string]: location of the keyfile
|
|
||||||
|
|
||||||
#### Sample call
|
#### Sample call
|
||||||
```json
|
```json
|
||||||
@@ -172,14 +170,11 @@ None
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
Response
|
Response
|
||||||
```
|
```json
|
||||||
{
|
{
|
||||||
"id": 0,
|
"id": 0,
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"result": {
|
"result": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133"
|
||||||
"address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133",
|
|
||||||
"url": "keystore:///my/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -195,8 +190,6 @@ None
|
|||||||
#### Result
|
#### Result
|
||||||
- array with account records:
|
- array with account records:
|
||||||
- account.address [string]: account address that is derived from the generated key
|
- account.address [string]: account address that is derived from the generated key
|
||||||
- account.type [string]: type of the
|
|
||||||
- account.url [string]: location of the account
|
|
||||||
|
|
||||||
#### Sample call
|
#### Sample call
|
||||||
```json
|
```json
|
||||||
@@ -207,21 +200,13 @@ None
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
Response
|
Response
|
||||||
```
|
```json
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"result": [
|
"result": [
|
||||||
{
|
"0xafb2f771f58513609765698f65d3f2f0224a956f",
|
||||||
"address": "0xafb2f771f58513609765698f65d3f2f0224a956f",
|
"0xbea9183f8f4f03d427f6bcea17388bdff1cab133"
|
||||||
"type": "account",
|
|
||||||
"url": "keystore:///tmp/keystore/UTC--2017-08-24T07-26-47.162109726Z--afb2f771f58513609765698f65d3f2f0224a956f"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133",
|
|
||||||
"type": "account",
|
|
||||||
"url": "keystore:///tmp/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133"
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -229,10 +214,10 @@ Response
|
|||||||
### account_signTransaction
|
### account_signTransaction
|
||||||
|
|
||||||
#### Sign transactions
|
#### Sign transactions
|
||||||
Signs a transactions and responds with the signed transaction in RLP encoded form.
|
Signs a transaction and responds with the signed transaction in RLP-encoded and JSON forms.
|
||||||
|
|
||||||
#### Arguments
|
#### Arguments
|
||||||
2. transaction object:
|
1. transaction object:
|
||||||
- `from` [address]: account to send the transaction from
|
- `from` [address]: account to send the transaction from
|
||||||
- `to` [address]: receiver account. If omitted or `0x`, will cause contract creation.
|
- `to` [address]: receiver account. If omitted or `0x`, will cause contract creation.
|
||||||
- `gas` [number]: maximum amount of gas to burn
|
- `gas` [number]: maximum amount of gas to burn
|
||||||
@@ -240,12 +225,13 @@ Response
|
|||||||
- `value` [number:optional]: amount of Wei to send with the transaction
|
- `value` [number:optional]: amount of Wei to send with the transaction
|
||||||
- `data` [data:optional]: input data
|
- `data` [data:optional]: input data
|
||||||
- `nonce` [number]: account nonce
|
- `nonce` [number]: account nonce
|
||||||
3. method signature [string:optional]
|
1. method signature [string:optional]
|
||||||
- The method signature, if present, is to aid decoding the calldata. Should consist of `methodname(paramtype,...)`, e.g. `transfer(uint256,address)`. The signer may use this data to parse the supplied calldata, and show the user. The data, however, is considered totally untrusted, and reliability is not expected.
|
- The method signature, if present, is to aid decoding the calldata. Should consist of `methodname(paramtype,...)`, e.g. `transfer(uint256,address)`. The signer may use this data to parse the supplied calldata, and show the user. The data, however, is considered totally untrusted, and reliability is not expected.
|
||||||
|
|
||||||
|
|
||||||
#### Result
|
#### Result
|
||||||
- signed transaction in RLP encoded form [data]
|
- raw [data]: signed transaction in RLP encoded form
|
||||||
|
- tx [json]: signed transaction in JSON form
|
||||||
|
|
||||||
#### Sample call
|
#### Sample call
|
||||||
```json
|
```json
|
||||||
@@ -270,11 +256,22 @@ Response
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"id": 2,
|
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"error": {
|
"id": 2,
|
||||||
"code": -32000,
|
"result": {
|
||||||
"message": "Request denied"
|
"raw": "0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663",
|
||||||
|
"tx": {
|
||||||
|
"nonce": "0x0",
|
||||||
|
"gasPrice": "0x1234",
|
||||||
|
"gas": "0x55555",
|
||||||
|
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
|
||||||
|
"value": "0x1234",
|
||||||
|
"input": "0xabcd",
|
||||||
|
"v": "0x26",
|
||||||
|
"r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e",
|
||||||
|
"s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663",
|
||||||
|
"hash": "0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -326,7 +323,7 @@ Response
|
|||||||
|
|
||||||
Bash example:
|
Bash example:
|
||||||
```bash
|
```bash
|
||||||
#curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/
|
> curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/
|
||||||
|
|
||||||
{"jsonrpc":"2.0","id":67,"result":{"raw":"0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","tx":{"nonce":"0x0","gasPrice":"0x1","gas":"0x333","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0","value":"0x0","input":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012","v":"0x26","r":"0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e","s":"0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","hash":"0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"}}}
|
{"jsonrpc":"2.0","id":67,"result":{"raw":"0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","tx":{"nonce":"0x0","gasPrice":"0x1","gas":"0x333","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0","value":"0x0","input":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012","v":"0x26","r":"0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e","s":"0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","hash":"0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"}}}
|
||||||
```
|
```
|
||||||
@@ -373,7 +370,7 @@ Response
|
|||||||
### account_signTypedData
|
### account_signTypedData
|
||||||
|
|
||||||
#### Sign data
|
#### Sign data
|
||||||
Signs a chunk of structured data conformant to [EIP712]([EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md)) and returns the calculated signature.
|
Signs a chunk of structured data conformant to [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md) and returns the calculated signature.
|
||||||
|
|
||||||
#### Arguments
|
#### Arguments
|
||||||
- account [address]: account to sign with
|
- account [address]: account to sign with
|
||||||
@@ -469,7 +466,7 @@ Response
|
|||||||
|
|
||||||
### account_ecRecover
|
### account_ecRecover
|
||||||
|
|
||||||
#### Sign data
|
#### Recover the signing address
|
||||||
|
|
||||||
Derive the address from the account that was used to sign data with content type `text/plain` and the signature.
|
Derive the address from the account that was used to sign data with content type `text/plain` and the signature.
|
||||||
|
|
||||||
@@ -487,7 +484,6 @@ Derive the address from the account that was used to sign data with content type
|
|||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"method": "account_ecRecover",
|
"method": "account_ecRecover",
|
||||||
"params": [
|
"params": [
|
||||||
"data/plain",
|
|
||||||
"0xaabbccdd",
|
"0xaabbccdd",
|
||||||
"0x5b6693f153b48ec1c706ba4169960386dbaa6903e249cc79a8e6ddc434451d417e1e57327872c7f538beeb323c300afa9999a3d4a5de6caf3be0d5ef832b67ef1c"
|
"0x5b6693f153b48ec1c706ba4169960386dbaa6903e249cc79a8e6ddc434451d417e1e57327872c7f538beeb323c300afa9999a3d4a5de6caf3be0d5ef832b67ef1c"
|
||||||
]
|
]
|
||||||
@@ -503,117 +499,36 @@ Response
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### account_import
|
### account_version
|
||||||
|
|
||||||
#### Import account
|
#### Get external API version
|
||||||
Import a private key into the keystore. The imported key is expected to be encrypted according to the web3 keystore
|
|
||||||
format.
|
Get the version of the external API used by Clef.
|
||||||
|
|
||||||
#### Arguments
|
#### Arguments
|
||||||
- account [object]: key in [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) (retrieved with account_export)
|
|
||||||
|
None
|
||||||
|
|
||||||
#### Result
|
#### Result
|
||||||
- imported key [object]:
|
|
||||||
- key.address [address]: address of the imported key
|
* external API version [string]
|
||||||
- key.type [string]: type of the account
|
|
||||||
- key.url [string]: key URL
|
|
||||||
|
|
||||||
#### Sample call
|
#### Sample call
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"id": 6,
|
"id": 0,
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"method": "account_import",
|
"method": "account_version",
|
||||||
"params": [
|
"params": []
|
||||||
{
|
|
||||||
"address": "c7412fc59930fd90099c917a50e5f11d0934b2f5",
|
|
||||||
"crypto": {
|
|
||||||
"cipher": "aes-128-ctr",
|
|
||||||
"cipherparams": {
|
|
||||||
"iv": "401c39a7c7af0388491c3d3ecb39f532"
|
|
||||||
},
|
|
||||||
"ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204",
|
|
||||||
"kdf": "scrypt",
|
|
||||||
"kdfparams": {
|
|
||||||
"dklen": 32,
|
|
||||||
"n": 262144,
|
|
||||||
"p": 1,
|
|
||||||
"r": 8,
|
|
||||||
"salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a"
|
|
||||||
},
|
|
||||||
"mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806"
|
|
||||||
},
|
|
||||||
"id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9",
|
|
||||||
"version": 3
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Response
|
Response
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"id": 6,
|
"id": 0,
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"result": {
|
"result": "6.0.0"
|
||||||
"address": "0xc7412fc59930fd90099c917a50e5f11d0934b2f5",
|
|
||||||
"type": "account",
|
|
||||||
"url": "keystore:///tmp/keystore/UTC--2017-08-24T11-00-42.032024108Z--c7412fc59930fd90099c917a50e5f11d0934b2f5"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### account_export
|
|
||||||
|
|
||||||
#### Export account from keystore
|
|
||||||
Export a private key from the keystore. The exported private key is encrypted with the original password. When the
|
|
||||||
key is imported later this password is required.
|
|
||||||
|
|
||||||
#### Arguments
|
|
||||||
- account [address]: export private key that is associated with this account
|
|
||||||
|
|
||||||
#### Result
|
|
||||||
- exported key, see [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) for
|
|
||||||
more information
|
|
||||||
|
|
||||||
#### Sample call
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"id": 5,
|
|
||||||
"jsonrpc": "2.0",
|
|
||||||
"method": "account_export",
|
|
||||||
"params": [
|
|
||||||
"0xc7412fc59930fd90099c917a50e5f11d0934b2f5"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Response
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"id": 5,
|
|
||||||
"jsonrpc": "2.0",
|
|
||||||
"result": {
|
|
||||||
"address": "c7412fc59930fd90099c917a50e5f11d0934b2f5",
|
|
||||||
"crypto": {
|
|
||||||
"cipher": "aes-128-ctr",
|
|
||||||
"cipherparams": {
|
|
||||||
"iv": "401c39a7c7af0388491c3d3ecb39f532"
|
|
||||||
},
|
|
||||||
"ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204",
|
|
||||||
"kdf": "scrypt",
|
|
||||||
"kdfparams": {
|
|
||||||
"dklen": 32,
|
|
||||||
"n": 262144,
|
|
||||||
"p": 1,
|
|
||||||
"r": 8,
|
|
||||||
"salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a"
|
|
||||||
},
|
|
||||||
"mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806"
|
|
||||||
},
|
|
||||||
"id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9",
|
|
||||||
"version": 3
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -625,7 +540,7 @@ By starting the signer with the switch `--stdio-ui-test`, the signer will invoke
|
|||||||
denials. This can be used during development to ensure that the API is (at least somewhat) correctly implemented.
|
denials. This can be used during development to ensure that the API is (at least somewhat) correctly implemented.
|
||||||
See `pythonsigner`, which can be invoked via `python3 pythonsigner.py test` to perform the 'denial-handshake-test'.
|
See `pythonsigner`, which can be invoked via `python3 pythonsigner.py test` to perform the 'denial-handshake-test'.
|
||||||
|
|
||||||
All methods in this API uses object-based parameters, so that there can be no mixups of parameters: each piece of data is accessed by key.
|
All methods in this API use object-based parameters, so that there can be no mixup of parameters: each piece of data is accessed by key.
|
||||||
|
|
||||||
See the [ui API changelog](intapi_changelog.md) for information about changes to this API.
|
See the [ui API changelog](intapi_changelog.md) for information about changes to this API.
|
||||||
|
|
||||||
@@ -784,12 +699,10 @@ Invoked when a request for account listing has been made.
|
|||||||
{
|
{
|
||||||
"accounts": [
|
"accounts": [
|
||||||
{
|
{
|
||||||
"type": "Account",
|
|
||||||
"url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-20T14-44-54.089682944Z--123409812340981234098123409812deadbeef42",
|
"url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-20T14-44-54.089682944Z--123409812340981234098123409812deadbeef42",
|
||||||
"address": "0x123409812340981234098123409812deadbeef42"
|
"address": "0x123409812340981234098123409812deadbeef42"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "Account",
|
|
||||||
"url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-23T21-59-03.199240693Z--cafebabedeadbeef34098123409812deadbeef42",
|
"url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-23T21-59-03.199240693Z--cafebabedeadbeef34098123409812deadbeef42",
|
||||||
"address": "0xcafebabedeadbeef34098123409812deadbeef42"
|
"address": "0xcafebabedeadbeef34098123409812deadbeef42"
|
||||||
}
|
}
|
||||||
@@ -819,7 +732,13 @@ Invoked when a request for account listing has been made.
|
|||||||
{
|
{
|
||||||
"address": "0x123409812340981234098123409812deadbeef42",
|
"address": "0x123409812340981234098123409812deadbeef42",
|
||||||
"raw_data": "0x01020304",
|
"raw_data": "0x01020304",
|
||||||
"message": "\u0019Ethereum Signed Message:\n4\u0001\u0002\u0003\u0004",
|
"messages": [
|
||||||
|
{
|
||||||
|
"name": "message",
|
||||||
|
"value": "\u0019Ethereum Signed Message:\n4\u0001\u0002\u0003\u0004",
|
||||||
|
"type": "text/plain"
|
||||||
|
}
|
||||||
|
],
|
||||||
"hash": "0x7e3a4e7a9d1744bc5c675c25e1234ca8ed9162bd17f78b9085e48047c15ac310",
|
"hash": "0x7e3a4e7a9d1744bc5c675c25e1234ca8ed9162bd17f78b9085e48047c15ac310",
|
||||||
"meta": {
|
"meta": {
|
||||||
"remote": "signer binary",
|
"remote": "signer binary",
|
||||||
@@ -829,12 +748,34 @@ Invoked when a request for account listing has been made.
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ApproveNewAccount / `ui_approveNewAccount`
|
||||||
|
|
||||||
|
Invoked when a request for creating a new account has been made.
|
||||||
|
|
||||||
|
#### Sample call
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 4,
|
||||||
|
"method": "ui_approveNewAccount",
|
||||||
|
"params": [
|
||||||
|
{
|
||||||
|
"meta": {
|
||||||
|
"remote": "signer binary",
|
||||||
|
"local": "main",
|
||||||
|
"scheme": "in-proc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### ShowInfo / `ui_showInfo`
|
### ShowInfo / `ui_showInfo`
|
||||||
|
|
||||||
The UI should show the info to the user. Does not expect response.
|
The UI should show the info (a single message) to the user. Does not expect response.
|
||||||
|
|
||||||
#### Sample call
|
#### Sample call
|
||||||
|
|
||||||
@@ -844,9 +785,7 @@ The UI should show the info to the user. Does not expect response.
|
|||||||
"id": 9,
|
"id": 9,
|
||||||
"method": "ui_showInfo",
|
"method": "ui_showInfo",
|
||||||
"params": [
|
"params": [
|
||||||
{
|
"Tests completed"
|
||||||
"text": "Tests completed"
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -854,18 +793,16 @@ The UI should show the info to the user. Does not expect response.
|
|||||||
|
|
||||||
### ShowError / `ui_showError`
|
### ShowError / `ui_showError`
|
||||||
|
|
||||||
The UI should show the info to the user. Does not expect response.
|
The UI should show the error (a single message) to the user. Does not expect response.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
|
|
||||||
{
|
{
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"id": 2,
|
"id": 2,
|
||||||
"method": "ShowError",
|
"method": "ui_showError",
|
||||||
"params": [
|
"params": [
|
||||||
{
|
"Something bad happened!"
|
||||||
"text": "Testing 'ShowError'"
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -879,9 +816,36 @@ When implementing rate-limited rules, this callback should be used.
|
|||||||
|
|
||||||
TLDR; Use this method to keep track of signed transactions, instead of using the data in `ApproveTx`.
|
TLDR; Use this method to keep track of signed transactions, instead of using the data in `ApproveTx`.
|
||||||
|
|
||||||
|
Example call:
|
||||||
|
```json
|
||||||
|
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 1,
|
||||||
|
"method": "ui_onApprovedTx",
|
||||||
|
"params": [
|
||||||
|
{
|
||||||
|
"raw": "0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663",
|
||||||
|
"tx": {
|
||||||
|
"nonce": "0x0",
|
||||||
|
"gasPrice": "0x1",
|
||||||
|
"gas": "0x333",
|
||||||
|
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
|
||||||
|
"value": "0x0",
|
||||||
|
"input": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012",
|
||||||
|
"v": "0x26",
|
||||||
|
"r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e",
|
||||||
|
"s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663",
|
||||||
|
"hash": "0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### OnSignerStartup / `ui_onSignerStartup`
|
### OnSignerStartup / `ui_onSignerStartup`
|
||||||
|
|
||||||
This method provide the UI with information about what API version the signer uses (both internal and external) aswell as build-info and external API,
|
This method provides the UI with information about what API version the signer uses (both internal and external) as well as build-info and external API,
|
||||||
in k/v-form.
|
in k/v-form.
|
||||||
|
|
||||||
Example call:
|
Example call:
|
||||||
@@ -905,6 +869,27 @@ Example call:
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### OnInputRequired / `ui_onInputRequired`
|
||||||
|
|
||||||
|
Invoked when Clef requires user input (e.g. a password).
|
||||||
|
|
||||||
|
Example call:
|
||||||
|
```json
|
||||||
|
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 1,
|
||||||
|
"method": "ui_onInputRequired",
|
||||||
|
"params": [
|
||||||
|
{
|
||||||
|
"title": "Account password",
|
||||||
|
"prompt": "Please enter the password for account 0x694267f14675d7e1b9494fd8d72fefe1755710fa",
|
||||||
|
"isPassword": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Rules for UI apis
|
### Rules for UI apis
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
These data types are defined in the channel between clef and the UI
|
These data types are defined in the channel between clef and the UI
|
||||||
### SignDataRequest
|
### SignDataRequest
|
||||||
|
|
||||||
SignDataRequest contains information about a pending request to sign some data. The data to be signed can be of various types, defined by content-type. Clef has done most of the work in canonicalizing and making sense of the data, and it's up to the UI to presentthe user with the contents of the `message`
|
SignDataRequest contains information about a pending request to sign some data. The data to be signed can be of various types, defined by content-type. Clef has done most of the work in canonicalizing and making sense of the data, and it's up to the UI to present the user with the contents of the `message`
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```json
|
```json
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ There are two ways that this can be achieved: integrated via Qubes or integrated
|
|||||||
|
|
||||||
#### 1. Qubes Integrated
|
#### 1. Qubes Integrated
|
||||||
|
|
||||||
Qubes provdes a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request
|
Qubes provides a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request
|
||||||
to another qube. The OS then asks the user if the call is permitted.
|
to another qube. The OS then asks the user if the call is permitted.
|
||||||
|
|
||||||

|

|
||||||
@@ -48,7 +48,7 @@ This is how [Split GPG](https://www.qubes-os.org/doc/split-gpg/) is implemented.
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
On the `target` qubes, we need to define the rpc service.
|
On the `target` qubes, we need to define the RPC service.
|
||||||
|
|
||||||
[qubes.Clefsign](qubes/qubes.Clefsign):
|
[qubes.Clefsign](qubes/qubes.Clefsign):
|
||||||
|
|
||||||
@@ -135,11 +135,11 @@ $ cat newaccnt.json
|
|||||||
$ cat newaccnt.json| qrexec-client-vm debian-work qubes.Clefsign
|
$ cat newaccnt.json| qrexec-client-vm debian-work qubes.Clefsign
|
||||||
```
|
```
|
||||||
|
|
||||||
This should pop up first a dialog to allow the IPC call:
|
A dialog should pop up first to allow the IPC call:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Followed by a GTK-dialog to approve the operation
|
Followed by a GTK-dialog to approve the operation:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -169,7 +169,7 @@ However, it comes with a couple of drawbacks:
|
|||||||
- The `Origin` header must be forwarded
|
- The `Origin` header must be forwarded
|
||||||
- Information about the remote ip must be added as a `X-Forwarded-For`. However, Clef cannot always trust an `XFF` header,
|
- Information about the remote ip must be added as a `X-Forwarded-For`. However, Clef cannot always trust an `XFF` header,
|
||||||
since malicious clients may lie about `XFF` in order to fool the http server into believing it comes from another address.
|
since malicious clients may lie about `XFF` in order to fool the http server into believing it comes from another address.
|
||||||
- Even with a policy in place to allow rpc-calls between `caller` and `target`, there will be several popups:
|
- Even with a policy in place to allow RPC calls between `caller` and `target`, there will be several popups:
|
||||||
- One qubes-specific where the user specifies the `target` vm
|
- One qubes-specific where the user specifies the `target` vm
|
||||||
- One clef-specific to approve the transaction
|
- One clef-specific to approve the transaction
|
||||||
|
|
||||||
@@ -177,7 +177,7 @@ However, it comes with a couple of drawbacks:
|
|||||||
#### 2. Network integrated
|
#### 2. Network integrated
|
||||||
|
|
||||||
The second way to set up Clef on a qubes system is to allow networking, and have Clef listen to a port which is accessible
|
The second way to set up Clef on a qubes system is to allow networking, and have Clef listen to a port which is accessible
|
||||||
form other qubes.
|
from other qubes.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -186,13 +186,13 @@ form other qubes.
|
|||||||
|
|
||||||
## USBArmory
|
## USBArmory
|
||||||
|
|
||||||
The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 Mhz ARM processor. It is a pocket-size
|
The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 MHz ARM processor. It is a pocket-size
|
||||||
computer. When inserted into a laptop, it identifies itself as a USB network interface, basically adding another network
|
computer. When inserted into a laptop, it identifies itself as a USB network interface, basically adding another network
|
||||||
to your computer. Over this new network interface, you can SSH into the device.
|
to your computer. Over this new network interface, you can SSH into the device.
|
||||||
|
|
||||||
Running Clef off a USB armory means that you can use the armory as a very versatile offline computer, which only
|
Running Clef off a USB armory means that you can use the armory as a very versatile offline computer, which only
|
||||||
ever connects to a local network between your computer and the device itself.
|
ever connects to a local network between your computer and the device itself.
|
||||||
|
|
||||||
Needless to say, the while this model should be fairly secure against remote attacks, an attacker with physical access
|
Needless to say, while this model should be fairly secure against remote attacks, an attacker with physical access
|
||||||
to the USB Armory would trivially be able to extract the contents of the device filesystem.
|
to the USB Armory would trivially be able to extract the contents of the device filesystem.
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,17 @@ TL;DR: Given a version number MAJOR.MINOR.PATCH, increment the:
|
|||||||
|
|
||||||
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
|
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
|
||||||
|
|
||||||
|
### 7.0.1
|
||||||
|
|
||||||
|
Added `clef_New` to the internal API callable from a UI.
|
||||||
|
|
||||||
|
> `New` creates a new password protected Account. The private key is protected with
|
||||||
|
> the given password. Users are responsible to backup the private key that is stored
|
||||||
|
> in the keystore location that was specified when this API was created.
|
||||||
|
> This method is the same as New on the external API, the difference being that
|
||||||
|
> this implementation does not ask for confirmation, since it's initiated by
|
||||||
|
> the user
|
||||||
|
|
||||||
### 7.0.0
|
### 7.0.0
|
||||||
|
|
||||||
- The `message` field was renamed to `messages` in all data signing request methods to better reflect that it's a list, not a value.
|
- The `message` field was renamed to `messages` in all data signing request methods to better reflect that it's a list, not a value.
|
||||||
@@ -150,7 +161,7 @@ UserInputResponse struct {
|
|||||||
#### 1.2.0
|
#### 1.2.0
|
||||||
|
|
||||||
* Add `OnStartup` method, to provide the UI with information about what API version
|
* Add `OnStartup` method, to provide the UI with information about what API version
|
||||||
the signer uses (both internal and external) aswell as build-info and external api.
|
the signer uses (both internal and external) as well as build-info and external api.
|
||||||
|
|
||||||
Example call:
|
Example call:
|
||||||
```json
|
```json
|
||||||
|
|||||||
211
cmd/clef/main.go
211
cmd/clef/main.go
@@ -32,6 +32,7 @@ import (
|
|||||||
"os/user"
|
"os/user"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -40,10 +41,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@@ -53,6 +54,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/signer/fourbyte"
|
"github.com/ethereum/go-ethereum/signer/fourbyte"
|
||||||
"github.com/ethereum/go-ethereum/signer/rules"
|
"github.com/ethereum/go-ethereum/signer/rules"
|
||||||
"github.com/ethereum/go-ethereum/signer/storage"
|
"github.com/ethereum/go-ethereum/signer/storage"
|
||||||
|
|
||||||
colorable "github.com/mattn/go-colorable"
|
colorable "github.com/mattn/go-colorable"
|
||||||
"github.com/mattn/go-isatty"
|
"github.com/mattn/go-isatty"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
@@ -82,6 +84,10 @@ var (
|
|||||||
Name: "advanced",
|
Name: "advanced",
|
||||||
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
|
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
|
||||||
}
|
}
|
||||||
|
acceptFlag = cli.BoolFlag{
|
||||||
|
Name: "suppress-bootwarn",
|
||||||
|
Usage: "If set, does not show the warning during boot",
|
||||||
|
}
|
||||||
keystoreFlag = cli.StringFlag{
|
keystoreFlag = cli.StringFlag{
|
||||||
Name: "keystore",
|
Name: "keystore",
|
||||||
Value: filepath.Join(node.DefaultDataDir(), "keystore"),
|
Value: filepath.Join(node.DefaultDataDir(), "keystore"),
|
||||||
@@ -98,10 +104,15 @@ var (
|
|||||||
Usage: "Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli)",
|
Usage: "Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli)",
|
||||||
}
|
}
|
||||||
rpcPortFlag = cli.IntFlag{
|
rpcPortFlag = cli.IntFlag{
|
||||||
Name: "rpcport",
|
Name: "http.port",
|
||||||
Usage: "HTTP-RPC server listening port",
|
Usage: "HTTP-RPC server listening port",
|
||||||
Value: node.DefaultHTTPPort + 5,
|
Value: node.DefaultHTTPPort + 5,
|
||||||
}
|
}
|
||||||
|
legacyRPCPortFlag = cli.IntFlag{
|
||||||
|
Name: "rpcport",
|
||||||
|
Usage: "HTTP-RPC server listening port (Deprecated, please use --http.port).",
|
||||||
|
Value: node.DefaultHTTPPort + 5,
|
||||||
|
}
|
||||||
signerSecretFlag = cli.StringFlag{
|
signerSecretFlag = cli.StringFlag{
|
||||||
Name: "signersecret",
|
Name: "signersecret",
|
||||||
Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash",
|
Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash",
|
||||||
@@ -187,6 +198,22 @@ The setpw command stores a password for a given address (keyfile).
|
|||||||
Description: `
|
Description: `
|
||||||
The delpw command removes a password for a given address (keyfile).
|
The delpw command removes a password for a given address (keyfile).
|
||||||
`}
|
`}
|
||||||
|
newAccountCommand = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(newAccount),
|
||||||
|
Name: "newaccount",
|
||||||
|
Usage: "Create a new account",
|
||||||
|
ArgsUsage: "",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
logLevelFlag,
|
||||||
|
keystoreFlag,
|
||||||
|
utils.LightKDFFlag,
|
||||||
|
acceptFlag,
|
||||||
|
},
|
||||||
|
Description: `
|
||||||
|
The newaccount command creates a new keystore-backed account. It is a convenience-method
|
||||||
|
which can be used in lieu of an external UI.`,
|
||||||
|
}
|
||||||
|
|
||||||
gendocCommand = cli.Command{
|
gendocCommand = cli.Command{
|
||||||
Action: GenDoc,
|
Action: GenDoc,
|
||||||
Name: "gendoc",
|
Name: "gendoc",
|
||||||
@@ -196,6 +223,42 @@ The gendoc generates example structures of the json-rpc communication types.
|
|||||||
`}
|
`}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// AppHelpFlagGroups is the application flags, grouped by functionality.
|
||||||
|
var AppHelpFlagGroups = []flags.FlagGroup{
|
||||||
|
{
|
||||||
|
Name: "FLAGS",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
logLevelFlag,
|
||||||
|
keystoreFlag,
|
||||||
|
configdirFlag,
|
||||||
|
chainIdFlag,
|
||||||
|
utils.LightKDFFlag,
|
||||||
|
utils.NoUSBFlag,
|
||||||
|
utils.SmartCardDaemonPathFlag,
|
||||||
|
utils.HTTPListenAddrFlag,
|
||||||
|
utils.HTTPVirtualHostsFlag,
|
||||||
|
utils.IPCDisabledFlag,
|
||||||
|
utils.IPCPathFlag,
|
||||||
|
utils.HTTPEnabledFlag,
|
||||||
|
rpcPortFlag,
|
||||||
|
signerSecretFlag,
|
||||||
|
customDBFlag,
|
||||||
|
auditLogFlag,
|
||||||
|
ruleFlag,
|
||||||
|
stdiouiFlag,
|
||||||
|
testFlag,
|
||||||
|
advancedMode,
|
||||||
|
acceptFlag,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ALIASED (deprecated)",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
legacyRPCPortFlag,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app.Name = "Clef"
|
app.Name = "Clef"
|
||||||
app.Usage = "Manage Ethereum account operations"
|
app.Usage = "Manage Ethereum account operations"
|
||||||
@@ -207,11 +270,11 @@ func init() {
|
|||||||
utils.LightKDFFlag,
|
utils.LightKDFFlag,
|
||||||
utils.NoUSBFlag,
|
utils.NoUSBFlag,
|
||||||
utils.SmartCardDaemonPathFlag,
|
utils.SmartCardDaemonPathFlag,
|
||||||
utils.RPCListenAddrFlag,
|
utils.HTTPListenAddrFlag,
|
||||||
utils.RPCVirtualHostsFlag,
|
utils.HTTPVirtualHostsFlag,
|
||||||
utils.IPCDisabledFlag,
|
utils.IPCDisabledFlag,
|
||||||
utils.IPCPathFlag,
|
utils.IPCPathFlag,
|
||||||
utils.RPCEnabledFlag,
|
utils.HTTPEnabledFlag,
|
||||||
rpcPortFlag,
|
rpcPortFlag,
|
||||||
signerSecretFlag,
|
signerSecretFlag,
|
||||||
customDBFlag,
|
customDBFlag,
|
||||||
@@ -220,9 +283,51 @@ func init() {
|
|||||||
stdiouiFlag,
|
stdiouiFlag,
|
||||||
testFlag,
|
testFlag,
|
||||||
advancedMode,
|
advancedMode,
|
||||||
|
acceptFlag,
|
||||||
|
legacyRPCPortFlag,
|
||||||
}
|
}
|
||||||
app.Action = signer
|
app.Action = signer
|
||||||
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, delCredentialCommand, gendocCommand}
|
app.Commands = []cli.Command{initCommand,
|
||||||
|
attestCommand,
|
||||||
|
setCredentialCommand,
|
||||||
|
delCredentialCommand,
|
||||||
|
newAccountCommand,
|
||||||
|
gendocCommand}
|
||||||
|
cli.CommandHelpTemplate = flags.CommandHelpTemplate
|
||||||
|
// Override the default app help template
|
||||||
|
cli.AppHelpTemplate = flags.ClefAppHelpTemplate
|
||||||
|
|
||||||
|
// Override the default app help printer, but only for the global app help
|
||||||
|
originalHelpPrinter := cli.HelpPrinter
|
||||||
|
cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) {
|
||||||
|
if tmpl == flags.ClefAppHelpTemplate {
|
||||||
|
// Render out custom usage screen
|
||||||
|
originalHelpPrinter(w, tmpl, flags.HelpData{App: data, FlagGroups: AppHelpFlagGroups})
|
||||||
|
} else if tmpl == flags.CommandHelpTemplate {
|
||||||
|
// Iterate over all command specific flags and categorize them
|
||||||
|
categorized := make(map[string][]cli.Flag)
|
||||||
|
for _, flag := range data.(cli.Command).Flags {
|
||||||
|
if _, ok := categorized[flag.String()]; !ok {
|
||||||
|
categorized[flags.FlagCategory(flag, AppHelpFlagGroups)] = append(categorized[flags.FlagCategory(flag, AppHelpFlagGroups)], flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sort to get a stable ordering
|
||||||
|
sorted := make([]flags.FlagGroup, 0, len(categorized))
|
||||||
|
for cat, flgs := range categorized {
|
||||||
|
sorted = append(sorted, flags.FlagGroup{Name: cat, Flags: flgs})
|
||||||
|
}
|
||||||
|
sort.Sort(flags.ByCategory(sorted))
|
||||||
|
|
||||||
|
// add sorted array to data and render with default printer
|
||||||
|
originalHelpPrinter(w, tmpl, map[string]interface{}{
|
||||||
|
"cmd": data,
|
||||||
|
"categorizedFlags": sorted,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
originalHelpPrinter(w, tmpl, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -262,7 +367,7 @@ func initializeSecrets(c *cli.Context) error {
|
|||||||
text := "The master seed of clef will be locked with a password.\nPlease specify a password. Do not forget this password!"
|
text := "The master seed of clef will be locked with a password.\nPlease specify a password. Do not forget this password!"
|
||||||
var password string
|
var password string
|
||||||
for {
|
for {
|
||||||
password = getPassPhrase(text, true)
|
password = utils.GetPassPhrase(text, true)
|
||||||
if err := core.ValidatePasswordFormat(password); err != nil {
|
if err := core.ValidatePasswordFormat(password); err != nil {
|
||||||
fmt.Printf("invalid password: %v\n", err)
|
fmt.Printf("invalid password: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -335,7 +440,7 @@ func setCredential(ctx *cli.Context) error {
|
|||||||
utils.Fatalf("Invalid address specified: %s", addr)
|
utils.Fatalf("Invalid address specified: %s", addr)
|
||||||
}
|
}
|
||||||
address := common.HexToAddress(addr)
|
address := common.HexToAddress(addr)
|
||||||
password := getPassPhrase("Please enter a password to store for this address:", true)
|
password := utils.GetPassPhrase("Please enter a password to store for this address:", true)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
stretchedKey, err := readMasterKey(ctx, nil)
|
stretchedKey, err := readMasterKey(ctx, nil)
|
||||||
@@ -381,14 +486,41 @@ func removeCredential(ctx *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newAccount(c *cli.Context) error {
|
||||||
|
if err := initialize(c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// The newaccount is meant for users using the CLI, since 'real' external
|
||||||
|
// UIs can use the UI-api instead. So we'll just use the native CLI UI here.
|
||||||
|
var (
|
||||||
|
ui = core.NewCommandlineUI()
|
||||||
|
pwStorage storage.Storage = &storage.NoStorage{}
|
||||||
|
ksLoc = c.GlobalString(keystoreFlag.Name)
|
||||||
|
lightKdf = c.GlobalBool(utils.LightKDFFlag.Name)
|
||||||
|
)
|
||||||
|
log.Info("Starting clef", "keystore", ksLoc, "light-kdf", lightKdf)
|
||||||
|
am := core.StartClefAccountManager(ksLoc, true, lightKdf, "")
|
||||||
|
// This gives is us access to the external API
|
||||||
|
apiImpl := core.NewSignerAPI(am, 0, true, ui, nil, false, pwStorage)
|
||||||
|
// This gives us access to the internal API
|
||||||
|
internalApi := core.NewUIServerAPI(apiImpl)
|
||||||
|
addr, err := internalApi.New(context.Background())
|
||||||
|
if err == nil {
|
||||||
|
fmt.Printf("Generated account %v\n", addr.String())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func initialize(c *cli.Context) error {
|
func initialize(c *cli.Context) error {
|
||||||
// Set up the logger to print everything
|
// Set up the logger to print everything
|
||||||
logOutput := os.Stdout
|
logOutput := os.Stdout
|
||||||
if c.GlobalBool(stdiouiFlag.Name) {
|
if c.GlobalBool(stdiouiFlag.Name) {
|
||||||
logOutput = os.Stderr
|
logOutput = os.Stderr
|
||||||
// If using the stdioui, we can't do the 'confirm'-flow
|
// If using the stdioui, we can't do the 'confirm'-flow
|
||||||
fmt.Fprint(logOutput, legalWarning)
|
if !c.GlobalBool(acceptFlag.Name) {
|
||||||
} else {
|
fmt.Fprint(logOutput, legalWarning)
|
||||||
|
}
|
||||||
|
} else if !c.GlobalBool(acceptFlag.Name) {
|
||||||
if !confirm(legalWarning) {
|
if !confirm(legalWarning) {
|
||||||
return fmt.Errorf("aborted by user")
|
return fmt.Errorf("aborted by user")
|
||||||
}
|
}
|
||||||
@@ -456,7 +588,6 @@ func signer(c *cli.Context) error {
|
|||||||
api core.ExternalAPI
|
api core.ExternalAPI
|
||||||
pwStorage storage.Storage = &storage.NoStorage{}
|
pwStorage storage.Storage = &storage.NoStorage{}
|
||||||
)
|
)
|
||||||
|
|
||||||
configDir := c.GlobalString(configdirFlag.Name)
|
configDir := c.GlobalString(configdirFlag.Name)
|
||||||
if stretchedKey, err := readMasterKey(c, ui); err != nil {
|
if stretchedKey, err := readMasterKey(c, ui); err != nil {
|
||||||
log.Warn("Failed to open master, rules disabled", "err", err)
|
log.Warn("Failed to open master, rules disabled", "err", err)
|
||||||
@@ -534,22 +665,39 @@ func signer(c *cli.Context) error {
|
|||||||
Service: api,
|
Service: api,
|
||||||
Version: "1.0"},
|
Version: "1.0"},
|
||||||
}
|
}
|
||||||
if c.GlobalBool(utils.RPCEnabledFlag.Name) {
|
if c.GlobalBool(utils.HTTPEnabledFlag.Name) {
|
||||||
vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name))
|
vhosts := splitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name))
|
||||||
cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name))
|
cors := splitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name))
|
||||||
|
|
||||||
|
srv := rpc.NewServer()
|
||||||
|
err := node.RegisterApisFromWhitelist(rpcAPI, []string{"account"}, srv, false)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Could not register API: %w", err)
|
||||||
|
}
|
||||||
|
handler := node.NewHTTPHandlerStack(srv, cors, vhosts)
|
||||||
|
|
||||||
|
// set port
|
||||||
|
port := c.Int(rpcPortFlag.Name)
|
||||||
|
if c.GlobalIsSet(legacyRPCPortFlag.Name) {
|
||||||
|
if !c.GlobalIsSet(rpcPortFlag.Name) {
|
||||||
|
port = c.Int(legacyRPCPortFlag.Name)
|
||||||
|
}
|
||||||
|
log.Warn("The flag --rpcport is deprecated and will be removed in the future, please use --http.port")
|
||||||
|
}
|
||||||
|
|
||||||
// start http server
|
// start http server
|
||||||
httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPListenAddrFlag.Name), port)
|
||||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
|
httpServer, addr, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Could not start RPC api: %v", err)
|
utils.Fatalf("Could not start RPC api: %v", err)
|
||||||
}
|
}
|
||||||
extapiURL = fmt.Sprintf("http://%s", httpEndpoint)
|
extapiURL = fmt.Sprintf("http://%v/", addr)
|
||||||
log.Info("HTTP endpoint opened", "url", extapiURL)
|
log.Info("HTTP endpoint opened", "url", extapiURL)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
listener.Close()
|
// Don't bother imposing a timeout here.
|
||||||
log.Info("HTTP endpoint closed", "url", httpEndpoint)
|
httpServer.Shutdown(context.Background())
|
||||||
|
log.Info("HTTP endpoint closed", "url", extapiURL)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
|
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
|
||||||
@@ -659,7 +807,7 @@ func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
password = resp.Text
|
password = resp.Text
|
||||||
} else {
|
} else {
|
||||||
password = getPassPhrase("Decrypt master seed of clef", false)
|
password = utils.GetPassPhrase("Decrypt master seed of clef", false)
|
||||||
}
|
}
|
||||||
masterSeed, err := decryptSeed(cipherKey, password)
|
masterSeed, err := decryptSeed(cipherKey, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -854,27 +1002,6 @@ func testExternalUI(api *core.SignerAPI) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPassPhrase retrieves the password associated with clef, either fetched
|
|
||||||
// from a list of preloaded passphrases, or requested interactively from the user.
|
|
||||||
// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one.
|
|
||||||
func getPassPhrase(prompt string, confirmation bool) string {
|
|
||||||
fmt.Println(prompt)
|
|
||||||
password, err := console.Stdin.PromptPassword("Password: ")
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("Failed to read password: %v", err)
|
|
||||||
}
|
|
||||||
if confirmation {
|
|
||||||
confirm, err := console.Stdin.PromptPassword("Repeat password: ")
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("Failed to read password confirmation: %v", err)
|
|
||||||
}
|
|
||||||
if password != confirm {
|
|
||||||
utils.Fatalf("Passwords do not match")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return password
|
|
||||||
}
|
|
||||||
|
|
||||||
type encryptedSeedStorage struct {
|
type encryptedSeedStorage struct {
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Version int `json:"version"`
|
Version int `json:"version"`
|
||||||
@@ -925,7 +1052,7 @@ func GenDoc(ctx *cli.Context) {
|
|||||||
if data, err := json.MarshalIndent(v, "", " "); err == nil {
|
if data, err := json.MarshalIndent(v, "", " "); err == nil {
|
||||||
output = append(output, fmt.Sprintf("### %s\n\n%s\n\nExample:\n```json\n%s\n```", name, desc, data))
|
output = append(output, fmt.Sprintf("### %s\n\n%s\n\nExample:\n```json\n%s\n```", name, desc, data))
|
||||||
} else {
|
} else {
|
||||||
log.Error("Error generating output", err)
|
log.Error("Error generating output", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
## Initializing Clef
|
## Initializing Clef
|
||||||
|
|
||||||
First thing's first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password:
|
First things first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ clef init
|
$ clef init
|
||||||
|
|||||||
@@ -20,14 +20,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
)
|
)
|
||||||
|
|
||||||
type crawler struct {
|
type crawler struct {
|
||||||
input nodeSet
|
input nodeSet
|
||||||
output nodeSet
|
output nodeSet
|
||||||
disc *discover.UDPv4
|
disc resolver
|
||||||
iters []enode.Iterator
|
iters []enode.Iterator
|
||||||
inputIter enode.Iterator
|
inputIter enode.Iterator
|
||||||
ch chan *enode.Node
|
ch chan *enode.Node
|
||||||
@@ -37,7 +36,11 @@ type crawler struct {
|
|||||||
revalidateInterval time.Duration
|
revalidateInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCrawler(input nodeSet, disc *discover.UDPv4, iters ...enode.Iterator) *crawler {
|
type resolver interface {
|
||||||
|
RequestENR(*enode.Node) (*enode.Node, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler {
|
||||||
c := &crawler{
|
c := &crawler{
|
||||||
input: input,
|
input: input,
|
||||||
output: make(nodeSet, len(input)),
|
output: make(nodeSet, len(input)),
|
||||||
@@ -63,6 +66,7 @@ func (c *crawler) run(timeout time.Duration) nodeSet {
|
|||||||
doneCh = make(chan enode.Iterator, len(c.iters))
|
doneCh = make(chan enode.Iterator, len(c.iters))
|
||||||
liveIters = len(c.iters)
|
liveIters = len(c.iters)
|
||||||
)
|
)
|
||||||
|
defer timeoutTimer.Stop()
|
||||||
for _, it := range c.iters {
|
for _, it := range c.iters {
|
||||||
go c.runIterator(doneCh, it)
|
go c.runIterator(doneCh, it)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,11 +19,14 @@ package main
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@@ -40,6 +43,7 @@ var (
|
|||||||
discv4ResolveCommand,
|
discv4ResolveCommand,
|
||||||
discv4ResolveJSONCommand,
|
discv4ResolveJSONCommand,
|
||||||
discv4CrawlCommand,
|
discv4CrawlCommand,
|
||||||
|
discv4TestCommand,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
discv4PingCommand = cli.Command{
|
discv4PingCommand = cli.Command{
|
||||||
@@ -74,6 +78,12 @@ var (
|
|||||||
Action: discv4Crawl,
|
Action: discv4Crawl,
|
||||||
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
||||||
}
|
}
|
||||||
|
discv4TestCommand = cli.Command{
|
||||||
|
Name: "test",
|
||||||
|
Usage: "Runs tests against a node",
|
||||||
|
Action: discv4Test,
|
||||||
|
Flags: []cli.Flag{remoteEnodeFlag, testPatternFlag, testListen1Flag, testListen2Flag},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -81,11 +91,42 @@ var (
|
|||||||
Name: "bootnodes",
|
Name: "bootnodes",
|
||||||
Usage: "Comma separated nodes used for bootstrapping",
|
Usage: "Comma separated nodes used for bootstrapping",
|
||||||
}
|
}
|
||||||
|
nodekeyFlag = cli.StringFlag{
|
||||||
|
Name: "nodekey",
|
||||||
|
Usage: "Hex-encoded node key",
|
||||||
|
}
|
||||||
|
nodedbFlag = cli.StringFlag{
|
||||||
|
Name: "nodedb",
|
||||||
|
Usage: "Nodes database location",
|
||||||
|
}
|
||||||
|
listenAddrFlag = cli.StringFlag{
|
||||||
|
Name: "addr",
|
||||||
|
Usage: "Listening address",
|
||||||
|
}
|
||||||
crawlTimeoutFlag = cli.DurationFlag{
|
crawlTimeoutFlag = cli.DurationFlag{
|
||||||
Name: "timeout",
|
Name: "timeout",
|
||||||
Usage: "Time limit for the crawl.",
|
Usage: "Time limit for the crawl.",
|
||||||
Value: 30 * time.Minute,
|
Value: 30 * time.Minute,
|
||||||
}
|
}
|
||||||
|
remoteEnodeFlag = cli.StringFlag{
|
||||||
|
Name: "remote",
|
||||||
|
Usage: "Enode of the remote node under test",
|
||||||
|
EnvVar: "REMOTE_ENODE",
|
||||||
|
}
|
||||||
|
testPatternFlag = cli.StringFlag{
|
||||||
|
Name: "run",
|
||||||
|
Usage: "Pattern of test suite(s) to run",
|
||||||
|
}
|
||||||
|
testListen1Flag = cli.StringFlag{
|
||||||
|
Name: "listen1",
|
||||||
|
Usage: "IP address of the first tester",
|
||||||
|
Value: v4test.Listen1,
|
||||||
|
}
|
||||||
|
testListen2Flag = cli.StringFlag{
|
||||||
|
Name: "listen2",
|
||||||
|
Usage: "IP address of the second tester",
|
||||||
|
Value: v4test.Listen2,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func discv4Ping(ctx *cli.Context) error {
|
func discv4Ping(ctx *cli.Context) error {
|
||||||
@@ -172,6 +213,84 @@ func discv4Crawl(ctx *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func discv4Test(ctx *cli.Context) error {
|
||||||
|
// Configure test package globals.
|
||||||
|
if !ctx.IsSet(remoteEnodeFlag.Name) {
|
||||||
|
return fmt.Errorf("Missing -%v", remoteEnodeFlag.Name)
|
||||||
|
}
|
||||||
|
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
|
||||||
|
v4test.Listen1 = ctx.String(testListen1Flag.Name)
|
||||||
|
v4test.Listen2 = ctx.String(testListen2Flag.Name)
|
||||||
|
|
||||||
|
// Filter and run test cases.
|
||||||
|
tests := v4test.AllTests
|
||||||
|
if ctx.IsSet(testPatternFlag.Name) {
|
||||||
|
tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name))
|
||||||
|
}
|
||||||
|
results := utesting.RunTests(tests, os.Stdout)
|
||||||
|
if fails := utesting.CountFailures(results); fails > 0 {
|
||||||
|
return fmt.Errorf("%v/%v tests passed.", len(tests)-fails, len(tests))
|
||||||
|
}
|
||||||
|
fmt.Printf("%v/%v passed\n", len(tests), len(tests))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startV4 starts an ephemeral discovery V4 node.
|
||||||
|
func startV4(ctx *cli.Context) *discover.UDPv4 {
|
||||||
|
ln, config := makeDiscoveryConfig(ctx)
|
||||||
|
socket := listen(ln, ctx.String(listenAddrFlag.Name))
|
||||||
|
disc, err := discover.ListenV4(socket, ln, config)
|
||||||
|
if err != nil {
|
||||||
|
exit(err)
|
||||||
|
}
|
||||||
|
return disc
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) {
|
||||||
|
var cfg discover.Config
|
||||||
|
|
||||||
|
if ctx.IsSet(nodekeyFlag.Name) {
|
||||||
|
key, err := crypto.HexToECDSA(ctx.String(nodekeyFlag.Name))
|
||||||
|
if err != nil {
|
||||||
|
exit(fmt.Errorf("-%s: %v", nodekeyFlag.Name, err))
|
||||||
|
}
|
||||||
|
cfg.PrivateKey = key
|
||||||
|
} else {
|
||||||
|
cfg.PrivateKey, _ = crypto.GenerateKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
if commandHasFlag(ctx, bootnodesFlag) {
|
||||||
|
bn, err := parseBootnodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
exit(err)
|
||||||
|
}
|
||||||
|
cfg.Bootnodes = bn
|
||||||
|
}
|
||||||
|
|
||||||
|
dbpath := ctx.String(nodedbFlag.Name)
|
||||||
|
db, err := enode.OpenDB(dbpath)
|
||||||
|
if err != nil {
|
||||||
|
exit(err)
|
||||||
|
}
|
||||||
|
ln := enode.NewLocalNode(db, cfg.PrivateKey)
|
||||||
|
return ln, cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func listen(ln *enode.LocalNode, addr string) *net.UDPConn {
|
||||||
|
if addr == "" {
|
||||||
|
addr = "0.0.0.0:0"
|
||||||
|
}
|
||||||
|
socket, err := net.ListenPacket("udp4", addr)
|
||||||
|
if err != nil {
|
||||||
|
exit(err)
|
||||||
|
}
|
||||||
|
usocket := socket.(*net.UDPConn)
|
||||||
|
uaddr := socket.LocalAddr().(*net.UDPAddr)
|
||||||
|
ln.SetFallbackIP(net.IP{127, 0, 0, 1})
|
||||||
|
ln.SetFallbackUDP(uaddr.Port)
|
||||||
|
return usocket
|
||||||
|
}
|
||||||
|
|
||||||
func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
||||||
s := params.RinkebyBootnodes
|
s := params.RinkebyBootnodes
|
||||||
if ctx.IsSet(bootnodesFlag.Name) {
|
if ctx.IsSet(bootnodesFlag.Name) {
|
||||||
@@ -187,40 +306,3 @@ func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
|||||||
}
|
}
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// startV4 starts an ephemeral discovery V4 node.
|
|
||||||
func startV4(ctx *cli.Context) *discover.UDPv4 {
|
|
||||||
socket, ln, cfg, err := listen()
|
|
||||||
if err != nil {
|
|
||||||
exit(err)
|
|
||||||
}
|
|
||||||
if commandHasFlag(ctx, bootnodesFlag) {
|
|
||||||
bn, err := parseBootnodes(ctx)
|
|
||||||
if err != nil {
|
|
||||||
exit(err)
|
|
||||||
}
|
|
||||||
cfg.Bootnodes = bn
|
|
||||||
}
|
|
||||||
disc, err := discover.ListenV4(socket, ln, cfg)
|
|
||||||
if err != nil {
|
|
||||||
exit(err)
|
|
||||||
}
|
|
||||||
return disc
|
|
||||||
}
|
|
||||||
|
|
||||||
func listen() (*net.UDPConn, *enode.LocalNode, discover.Config, error) {
|
|
||||||
var cfg discover.Config
|
|
||||||
cfg.PrivateKey, _ = crypto.GenerateKey()
|
|
||||||
db, _ := enode.OpenDB("")
|
|
||||||
ln := enode.NewLocalNode(db, cfg.PrivateKey)
|
|
||||||
|
|
||||||
socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{0, 0, 0, 0}})
|
|
||||||
if err != nil {
|
|
||||||
db.Close()
|
|
||||||
return nil, nil, cfg, err
|
|
||||||
}
|
|
||||||
addr := socket.LocalAddr().(*net.UDPAddr)
|
|
||||||
ln.SetFallbackIP(net.IP{127, 0, 0, 1})
|
|
||||||
ln.SetFallbackUDP(addr.Port)
|
|
||||||
return socket, ln, cfg, nil
|
|
||||||
}
|
|
||||||
|
|||||||
123
cmd/devp2p/discv5cmd.go
Normal file
123
cmd/devp2p/discv5cmd.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
discv5Command = cli.Command{
|
||||||
|
Name: "discv5",
|
||||||
|
Usage: "Node Discovery v5 tools",
|
||||||
|
Subcommands: []cli.Command{
|
||||||
|
discv5PingCommand,
|
||||||
|
discv5ResolveCommand,
|
||||||
|
discv5CrawlCommand,
|
||||||
|
discv5ListenCommand,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
discv5PingCommand = cli.Command{
|
||||||
|
Name: "ping",
|
||||||
|
Usage: "Sends ping to a node",
|
||||||
|
Action: discv5Ping,
|
||||||
|
}
|
||||||
|
discv5ResolveCommand = cli.Command{
|
||||||
|
Name: "resolve",
|
||||||
|
Usage: "Finds a node in the DHT",
|
||||||
|
Action: discv5Resolve,
|
||||||
|
Flags: []cli.Flag{bootnodesFlag},
|
||||||
|
}
|
||||||
|
discv5CrawlCommand = cli.Command{
|
||||||
|
Name: "crawl",
|
||||||
|
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||||
|
Action: discv5Crawl,
|
||||||
|
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
||||||
|
}
|
||||||
|
discv5ListenCommand = cli.Command{
|
||||||
|
Name: "listen",
|
||||||
|
Usage: "Runs a node",
|
||||||
|
Action: discv5Listen,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
bootnodesFlag,
|
||||||
|
nodekeyFlag,
|
||||||
|
nodedbFlag,
|
||||||
|
listenAddrFlag,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func discv5Ping(ctx *cli.Context) error {
|
||||||
|
n := getNodeArg(ctx)
|
||||||
|
disc := startV5(ctx)
|
||||||
|
defer disc.Close()
|
||||||
|
|
||||||
|
fmt.Println(disc.Ping(n))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func discv5Resolve(ctx *cli.Context) error {
|
||||||
|
n := getNodeArg(ctx)
|
||||||
|
disc := startV5(ctx)
|
||||||
|
defer disc.Close()
|
||||||
|
|
||||||
|
fmt.Println(disc.Resolve(n))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func discv5Crawl(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() < 1 {
|
||||||
|
return fmt.Errorf("need nodes file as argument")
|
||||||
|
}
|
||||||
|
nodesFile := ctx.Args().First()
|
||||||
|
var inputSet nodeSet
|
||||||
|
if common.FileExist(nodesFile) {
|
||||||
|
inputSet = loadNodesJSON(nodesFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
disc := startV5(ctx)
|
||||||
|
defer disc.Close()
|
||||||
|
c := newCrawler(inputSet, disc, disc.RandomNodes())
|
||||||
|
c.revalidateInterval = 10 * time.Minute
|
||||||
|
output := c.run(ctx.Duration(crawlTimeoutFlag.Name))
|
||||||
|
writeNodesJSON(nodesFile, output)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func discv5Listen(ctx *cli.Context) error {
|
||||||
|
disc := startV5(ctx)
|
||||||
|
defer disc.Close()
|
||||||
|
|
||||||
|
fmt.Println(disc.Self())
|
||||||
|
select {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startV5 starts an ephemeral discovery v5 node.
|
||||||
|
func startV5(ctx *cli.Context) *discover.UDPv5 {
|
||||||
|
ln, config := makeDiscoveryConfig(ctx)
|
||||||
|
socket := listen(ln, ctx.String(listenAddrFlag.Name))
|
||||||
|
disc, err := discover.ListenV5(socket, ln, config)
|
||||||
|
if err != nil {
|
||||||
|
exit(err)
|
||||||
|
}
|
||||||
|
return disc
|
||||||
|
}
|
||||||
@@ -130,9 +130,9 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||||||
if !exists {
|
if !exists {
|
||||||
// Entry is unknown, push a new one to Cloudflare.
|
// Entry is unknown, push a new one to Cloudflare.
|
||||||
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||||
ttl := 1
|
ttl := rootTTL
|
||||||
if path != name {
|
if path != name {
|
||||||
ttl = 2147483647 // Max TTL permitted by Cloudflare
|
ttl = treeNodeTTL // Max TTL permitted by Cloudflare
|
||||||
}
|
}
|
||||||
_, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl})
|
_, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl})
|
||||||
} else if old.Content != val {
|
} else if old.Content != val {
|
||||||
|
|||||||
322
cmd/devp2p/dns_route53.go
Normal file
322
cmd/devp2p/dns_route53.go
Normal file
@@ -0,0 +1,322 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/route53"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Route53 limits change sets to 32k of 'RDATA size'. Change sets are also limited to
|
||||||
|
// 1000 items. UPSERTs count double.
|
||||||
|
// https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-requests-changeresourcerecordsets
|
||||||
|
route53ChangeSizeLimit = 32000
|
||||||
|
route53ChangeCountLimit = 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
route53AccessKeyFlag = cli.StringFlag{
|
||||||
|
Name: "access-key-id",
|
||||||
|
Usage: "AWS Access Key ID",
|
||||||
|
EnvVar: "AWS_ACCESS_KEY_ID",
|
||||||
|
}
|
||||||
|
route53AccessSecretFlag = cli.StringFlag{
|
||||||
|
Name: "access-key-secret",
|
||||||
|
Usage: "AWS Access Key Secret",
|
||||||
|
EnvVar: "AWS_SECRET_ACCESS_KEY",
|
||||||
|
}
|
||||||
|
route53ZoneIDFlag = cli.StringFlag{
|
||||||
|
Name: "zone-id",
|
||||||
|
Usage: "Route53 Zone ID",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type route53Client struct {
|
||||||
|
api *route53.Route53
|
||||||
|
zoneID string
|
||||||
|
}
|
||||||
|
|
||||||
|
type recordSet struct {
|
||||||
|
values []string
|
||||||
|
ttl int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRoute53Client sets up a Route53 API client from command line flags.
|
||||||
|
func newRoute53Client(ctx *cli.Context) *route53Client {
|
||||||
|
akey := ctx.String(route53AccessKeyFlag.Name)
|
||||||
|
asec := ctx.String(route53AccessSecretFlag.Name)
|
||||||
|
if akey == "" || asec == "" {
|
||||||
|
exit(fmt.Errorf("need Route53 Access Key ID and secret proceed"))
|
||||||
|
}
|
||||||
|
config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")}
|
||||||
|
session, err := session.NewSession(config)
|
||||||
|
if err != nil {
|
||||||
|
exit(fmt.Errorf("can't create AWS session: %v", err))
|
||||||
|
}
|
||||||
|
return &route53Client{
|
||||||
|
api: route53.New(session),
|
||||||
|
zoneID: ctx.String(route53ZoneIDFlag.Name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deploy uploads the given tree to Route53.
|
||||||
|
func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error {
|
||||||
|
if err := c.checkZone(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute DNS changes.
|
||||||
|
existing, err := c.collectRecords(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info(fmt.Sprintf("Found %d TXT records", len(existing)))
|
||||||
|
|
||||||
|
records := t.ToTXT(name)
|
||||||
|
changes := c.computeChanges(name, records, existing)
|
||||||
|
if len(changes) == 0 {
|
||||||
|
log.Info("No DNS changes needed")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Submit change batches.
|
||||||
|
batches := splitChanges(changes, route53ChangeSizeLimit, route53ChangeCountLimit)
|
||||||
|
for i, changes := range batches {
|
||||||
|
log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes)))
|
||||||
|
batch := new(route53.ChangeBatch)
|
||||||
|
batch.SetChanges(changes)
|
||||||
|
batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq()))
|
||||||
|
req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch}
|
||||||
|
resp, err := c.api.ChangeResourceRecordSets(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id))
|
||||||
|
wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id}
|
||||||
|
if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkZone verifies zone information for the given domain.
|
||||||
|
func (c *route53Client) checkZone(name string) (err error) {
|
||||||
|
if c.zoneID == "" {
|
||||||
|
c.zoneID, err = c.findZoneID(name)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// findZoneID searches for the Zone ID containing the given domain.
|
||||||
|
func (c *route53Client) findZoneID(name string) (string, error) {
|
||||||
|
log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name))
|
||||||
|
var req route53.ListHostedZonesByNameInput
|
||||||
|
for {
|
||||||
|
resp, err := c.api.ListHostedZonesByName(&req)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
for _, zone := range resp.HostedZones {
|
||||||
|
if isSubdomain(name, *zone.Name) {
|
||||||
|
return *zone.Id, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !*resp.IsTruncated {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
req.DNSName = resp.NextDNSName
|
||||||
|
req.HostedZoneId = resp.NextHostedZoneId
|
||||||
|
}
|
||||||
|
return "", errors.New("can't find zone ID for " + name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeChanges creates DNS changes for the given record.
|
||||||
|
func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change {
|
||||||
|
// Convert all names to lowercase.
|
||||||
|
lrecords := make(map[string]string, len(records))
|
||||||
|
for name, r := range records {
|
||||||
|
lrecords[strings.ToLower(name)] = r
|
||||||
|
}
|
||||||
|
records = lrecords
|
||||||
|
|
||||||
|
var changes []*route53.Change
|
||||||
|
for path, val := range records {
|
||||||
|
ttl := int64(rootTTL)
|
||||||
|
if path != name {
|
||||||
|
ttl = int64(treeNodeTTL)
|
||||||
|
}
|
||||||
|
|
||||||
|
prevRecords, exists := existing[path]
|
||||||
|
prevValue := strings.Join(prevRecords.values, "")
|
||||||
|
if !exists {
|
||||||
|
// Entry is unknown, push a new one
|
||||||
|
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||||
|
changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val)))
|
||||||
|
} else if prevValue != val || prevRecords.ttl != ttl {
|
||||||
|
// Entry already exists, only change its content.
|
||||||
|
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val))
|
||||||
|
changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val)))
|
||||||
|
} else {
|
||||||
|
log.Info(fmt.Sprintf("Skipping %s = %q", path, val))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over the old records and delete anything stale.
|
||||||
|
for path, set := range existing {
|
||||||
|
if _, ok := records[path]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Stale entry, nuke it.
|
||||||
|
log.Info(fmt.Sprintf("Deleting %s = %q", path, strings.Join(set.values, "")))
|
||||||
|
changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
|
||||||
|
}
|
||||||
|
|
||||||
|
sortChanges(changes)
|
||||||
|
return changes
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order.
|
||||||
|
func sortChanges(changes []*route53.Change) {
|
||||||
|
score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3}
|
||||||
|
sort.Slice(changes, func(i, j int) bool {
|
||||||
|
if *changes[i].Action == *changes[j].Action {
|
||||||
|
return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name
|
||||||
|
}
|
||||||
|
return score[*changes[i].Action] < score[*changes[j].Action]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitChanges splits up DNS changes such that each change batch
|
||||||
|
// is smaller than the given RDATA limit.
|
||||||
|
func splitChanges(changes []*route53.Change, sizeLimit, countLimit int) [][]*route53.Change {
|
||||||
|
var (
|
||||||
|
batches [][]*route53.Change
|
||||||
|
batchSize int
|
||||||
|
batchCount int
|
||||||
|
)
|
||||||
|
for _, ch := range changes {
|
||||||
|
// Start new batch if this change pushes the current one over the limit.
|
||||||
|
count := changeCount(ch)
|
||||||
|
size := changeSize(ch) * count
|
||||||
|
overSize := batchSize+size > sizeLimit
|
||||||
|
overCount := batchCount+count > countLimit
|
||||||
|
if len(batches) == 0 || overSize || overCount {
|
||||||
|
batches = append(batches, nil)
|
||||||
|
batchSize = 0
|
||||||
|
batchCount = 0
|
||||||
|
}
|
||||||
|
batches[len(batches)-1] = append(batches[len(batches)-1], ch)
|
||||||
|
batchSize += size
|
||||||
|
batchCount += count
|
||||||
|
}
|
||||||
|
return batches
|
||||||
|
}
|
||||||
|
|
||||||
|
// changeSize returns the RDATA size of a DNS change.
|
||||||
|
func changeSize(ch *route53.Change) int {
|
||||||
|
size := 0
|
||||||
|
for _, rr := range ch.ResourceRecordSet.ResourceRecords {
|
||||||
|
if rr.Value != nil {
|
||||||
|
size += len(*rr.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
func changeCount(ch *route53.Change) int {
|
||||||
|
if *ch.Action == "UPSERT" {
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectRecords collects all TXT records below the given name.
|
||||||
|
func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) {
|
||||||
|
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID))
|
||||||
|
var req route53.ListResourceRecordSetsInput
|
||||||
|
req.SetHostedZoneId(c.zoneID)
|
||||||
|
existing := make(map[string]recordSet)
|
||||||
|
err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool {
|
||||||
|
for _, set := range resp.ResourceRecordSets {
|
||||||
|
if !isSubdomain(*set.Name, name) || *set.Type != "TXT" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s := recordSet{ttl: *set.TTL}
|
||||||
|
for _, rec := range set.ResourceRecords {
|
||||||
|
s.values = append(s.values, *rec.Value)
|
||||||
|
}
|
||||||
|
name := strings.TrimSuffix(*set.Name, ".")
|
||||||
|
existing[name] = s
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return existing, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTXTChange creates a change to a TXT record.
|
||||||
|
func newTXTChange(action, name string, ttl int64, values ...string) *route53.Change {
|
||||||
|
var c route53.Change
|
||||||
|
var r route53.ResourceRecordSet
|
||||||
|
var rrs []*route53.ResourceRecord
|
||||||
|
for _, val := range values {
|
||||||
|
rr := new(route53.ResourceRecord)
|
||||||
|
rr.SetValue(val)
|
||||||
|
rrs = append(rrs, rr)
|
||||||
|
}
|
||||||
|
r.SetType("TXT")
|
||||||
|
r.SetName(name)
|
||||||
|
r.SetTTL(ttl)
|
||||||
|
r.SetResourceRecords(rrs)
|
||||||
|
c.SetAction(action)
|
||||||
|
c.SetResourceRecordSet(&r)
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSubdomain returns true if name is a subdomain of domain.
|
||||||
|
func isSubdomain(name, domain string) bool {
|
||||||
|
domain = strings.TrimSuffix(domain, ".")
|
||||||
|
name = strings.TrimSuffix(name, ".")
|
||||||
|
return strings.HasSuffix("."+name, "."+domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitTXT splits value into a list of quoted 255-character strings.
|
||||||
|
func splitTXT(value string) string {
|
||||||
|
var result strings.Builder
|
||||||
|
for len(value) > 0 {
|
||||||
|
rlen := len(value)
|
||||||
|
if rlen > 253 {
|
||||||
|
rlen = 253
|
||||||
|
}
|
||||||
|
result.WriteString(strconv.Quote(value[:rlen]))
|
||||||
|
value = value[rlen:]
|
||||||
|
}
|
||||||
|
return result.String()
|
||||||
|
}
|
||||||
166
cmd/devp2p/dns_route53_test.go
Normal file
166
cmd/devp2p/dns_route53_test.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/route53"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This test checks that computeChanges/splitChanges create DNS changes in
|
||||||
|
// leaf-added -> root-changed -> leaf-deleted order.
|
||||||
|
func TestRoute53ChangeSort(t *testing.T) {
|
||||||
|
testTree0 := map[string]recordSet{
|
||||||
|
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
|
||||||
|
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
|
||||||
|
}},
|
||||||
|
"fdxn3sn67na5dka4j2gok7bvqi.n": {ttl: treeNodeTTL, values: []string{`"enrtree-branch:"`}},
|
||||||
|
"n": {ttl: rootTTL, values: []string{`"enrtree-root:v1 e=2KFJOGVXDQTXXUGBH7GS7NAAAI l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=0 sig=v_-J_q_9ICQg5ztExFvLQhDBGMb0lZPJLhe3ts9LAcgqhOhtT3YFJsl8BWNDSwGtamUdR-9xl88_w-X42SVpjwE"`}},
|
||||||
|
}
|
||||||
|
|
||||||
|
testTree1 := map[string]string{
|
||||||
|
"n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA",
|
||||||
|
"C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org",
|
||||||
|
"JWXYDBPXYWG6FX3GMDIBFA6CJ4.n": "enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24",
|
||||||
|
"2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA",
|
||||||
|
"H4FHT4B454P6UXFD7JCYQ5PWDY.n": "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI",
|
||||||
|
"MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o",
|
||||||
|
}
|
||||||
|
|
||||||
|
wantChanges := []*route53.Change{
|
||||||
|
{
|
||||||
|
Action: sp("CREATE"),
|
||||||
|
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||||
|
Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"),
|
||||||
|
ResourceRecords: []*route53.ResourceRecord{{
|
||||||
|
Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`),
|
||||||
|
}},
|
||||||
|
TTL: ip(treeNodeTTL),
|
||||||
|
Type: sp("TXT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: sp("CREATE"),
|
||||||
|
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||||
|
Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"),
|
||||||
|
ResourceRecords: []*route53.ResourceRecord{{
|
||||||
|
Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`),
|
||||||
|
}},
|
||||||
|
TTL: ip(treeNodeTTL),
|
||||||
|
Type: sp("TXT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: sp("CREATE"),
|
||||||
|
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||||
|
Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"),
|
||||||
|
ResourceRecords: []*route53.ResourceRecord{{
|
||||||
|
Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`),
|
||||||
|
}},
|
||||||
|
TTL: ip(treeNodeTTL),
|
||||||
|
Type: sp("TXT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: sp("CREATE"),
|
||||||
|
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||||
|
Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"),
|
||||||
|
ResourceRecords: []*route53.ResourceRecord{{
|
||||||
|
Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`),
|
||||||
|
}},
|
||||||
|
TTL: ip(treeNodeTTL),
|
||||||
|
Type: sp("TXT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: sp("CREATE"),
|
||||||
|
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||||
|
Name: sp("mhtdo6tmubria2xwg5ludack24.n"),
|
||||||
|
ResourceRecords: []*route53.ResourceRecord{{
|
||||||
|
Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`),
|
||||||
|
}},
|
||||||
|
TTL: ip(treeNodeTTL),
|
||||||
|
Type: sp("TXT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: sp("UPSERT"),
|
||||||
|
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||||
|
Name: sp("n"),
|
||||||
|
ResourceRecords: []*route53.ResourceRecord{{
|
||||||
|
Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`),
|
||||||
|
}},
|
||||||
|
TTL: ip(rootTTL),
|
||||||
|
Type: sp("TXT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: sp("DELETE"),
|
||||||
|
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||||
|
Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"),
|
||||||
|
ResourceRecords: []*route53.ResourceRecord{
|
||||||
|
{Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)},
|
||||||
|
},
|
||||||
|
TTL: ip(3333),
|
||||||
|
Type: sp("TXT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: sp("DELETE"),
|
||||||
|
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||||
|
Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"),
|
||||||
|
ResourceRecords: []*route53.ResourceRecord{{
|
||||||
|
Value: sp(`"enrtree-branch:"`),
|
||||||
|
}},
|
||||||
|
TTL: ip(treeNodeTTL),
|
||||||
|
Type: sp("TXT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var client route53Client
|
||||||
|
changes := client.computeChanges("n", testTree1, testTree0)
|
||||||
|
if !reflect.DeepEqual(changes, wantChanges) {
|
||||||
|
t.Fatalf("wrong changes (got %d, want %d)", len(changes), len(wantChanges))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check splitting according to size.
|
||||||
|
wantSplit := [][]*route53.Change{
|
||||||
|
wantChanges[:4],
|
||||||
|
wantChanges[4:6],
|
||||||
|
wantChanges[6:],
|
||||||
|
}
|
||||||
|
split := splitChanges(changes, 600, 4000)
|
||||||
|
if !reflect.DeepEqual(split, wantSplit) {
|
||||||
|
t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check splitting according to count.
|
||||||
|
wantSplit = [][]*route53.Change{
|
||||||
|
wantChanges[:5],
|
||||||
|
wantChanges[5:],
|
||||||
|
}
|
||||||
|
split = splitChanges(changes, 10000, 6)
|
||||||
|
if !reflect.DeepEqual(split, wantSplit) {
|
||||||
|
t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sp(s string) *string { return &s }
|
||||||
|
func ip(i int64) *int64 { return &i }
|
||||||
@@ -27,7 +27,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console/prompt"
|
||||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
@@ -42,6 +42,7 @@ var (
|
|||||||
dnsSignCommand,
|
dnsSignCommand,
|
||||||
dnsTXTCommand,
|
dnsTXTCommand,
|
||||||
dnsCloudflareCommand,
|
dnsCloudflareCommand,
|
||||||
|
dnsRoute53Command,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
dnsSyncCommand = cli.Command{
|
dnsSyncCommand = cli.Command{
|
||||||
@@ -66,11 +67,18 @@ var (
|
|||||||
}
|
}
|
||||||
dnsCloudflareCommand = cli.Command{
|
dnsCloudflareCommand = cli.Command{
|
||||||
Name: "to-cloudflare",
|
Name: "to-cloudflare",
|
||||||
Usage: "Deploy DNS TXT records to cloudflare",
|
Usage: "Deploy DNS TXT records to CloudFlare",
|
||||||
ArgsUsage: "<tree-directory>",
|
ArgsUsage: "<tree-directory>",
|
||||||
Action: dnsToCloudflare,
|
Action: dnsToCloudflare,
|
||||||
Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag},
|
Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag},
|
||||||
}
|
}
|
||||||
|
dnsRoute53Command = cli.Command{
|
||||||
|
Name: "to-route53",
|
||||||
|
Usage: "Deploy DNS TXT records to Amazon Route53",
|
||||||
|
ArgsUsage: "<tree-directory>",
|
||||||
|
Action: dnsToRoute53,
|
||||||
|
Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -88,6 +96,11 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rootTTL = 30 * 60 // 30 min
|
||||||
|
treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks
|
||||||
|
)
|
||||||
|
|
||||||
// dnsSync performs dnsSyncCommand.
|
// dnsSync performs dnsSyncCommand.
|
||||||
func dnsSync(ctx *cli.Context) error {
|
func dnsSync(ctx *cli.Context) error {
|
||||||
var (
|
var (
|
||||||
@@ -194,13 +207,26 @@ func dnsToCloudflare(ctx *cli.Context) error {
|
|||||||
return client.deploy(domain, t)
|
return client.deploy(domain, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dnsToRoute53 peforms dnsRoute53Command.
|
||||||
|
func dnsToRoute53(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() < 1 {
|
||||||
|
return fmt.Errorf("need tree definition directory as argument")
|
||||||
|
}
|
||||||
|
domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
client := newRoute53Client(ctx)
|
||||||
|
return client.deploy(domain, t)
|
||||||
|
}
|
||||||
|
|
||||||
// loadSigningKey loads a private key in Ethereum keystore format.
|
// loadSigningKey loads a private key in Ethereum keystore format.
|
||||||
func loadSigningKey(keyfile string) *ecdsa.PrivateKey {
|
func loadSigningKey(keyfile string) *ecdsa.PrivateKey {
|
||||||
keyjson, err := ioutil.ReadFile(keyfile)
|
keyjson, err := ioutil.ReadFile(keyfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit(fmt.Errorf("failed to read the keyfile at '%s': %v", keyfile, err))
|
exit(fmt.Errorf("failed to read the keyfile at '%s': %v", keyfile, err))
|
||||||
}
|
}
|
||||||
password, _ := console.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ")
|
password, _ := prompt.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ")
|
||||||
key, err := keystore.DecryptKey(keyjson, password)
|
key, err := keystore.DecryptKey(keyjson, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit(fmt.Errorf("error decrypting key: %v", err))
|
exit(fmt.Errorf("error decrypting key: %v", err))
|
||||||
@@ -214,8 +240,7 @@ func dnsClient(ctx *cli.Context) *dnsdisc.Client {
|
|||||||
if commandHasFlag(ctx, dnsTimeoutFlag) {
|
if commandHasFlag(ctx, dnsTimeoutFlag) {
|
||||||
cfg.Timeout = ctx.Duration(dnsTimeoutFlag.Name)
|
cfg.Timeout = ctx.Duration(dnsTimeoutFlag.Name)
|
||||||
}
|
}
|
||||||
c, _ := dnsdisc.NewClient(cfg) // cannot fail because no URLs given
|
return dnsdisc.NewClient(cfg)
|
||||||
return c
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// There are two file formats for DNS node trees on disk:
|
// There are two file formats for DNS node trees on disk:
|
||||||
|
|||||||
467
cmd/devp2p/internal/v4test/discv4tests.go
Normal file
467
cmd/devp2p/internal/v4test/discv4tests.go
Normal file
@@ -0,0 +1,467 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package v4test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover/v4wire"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
expiration = 20 * time.Second
|
||||||
|
wrongPacket = 66
|
||||||
|
macSize = 256 / 8
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Remote node under test
|
||||||
|
Remote string
|
||||||
|
// IP where the first tester is listening, port will be assigned
|
||||||
|
Listen1 string = "127.0.0.1"
|
||||||
|
// IP where the second tester is listening, port will be assigned
|
||||||
|
// Before running the test, you may have to `sudo ifconfig lo0 add 127.0.0.2` (on MacOS at least)
|
||||||
|
Listen2 string = "127.0.0.2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type pingWithJunk struct {
|
||||||
|
Version uint
|
||||||
|
From, To v4wire.Endpoint
|
||||||
|
Expiration uint64
|
||||||
|
JunkData1 uint
|
||||||
|
JunkData2 []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (req *pingWithJunk) Name() string { return "PING/v4" }
|
||||||
|
func (req *pingWithJunk) Kind() byte { return v4wire.PingPacket }
|
||||||
|
|
||||||
|
type pingWrongType struct {
|
||||||
|
Version uint
|
||||||
|
From, To v4wire.Endpoint
|
||||||
|
Expiration uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (req *pingWrongType) Name() string { return "WRONG/v4" }
|
||||||
|
func (req *pingWrongType) Kind() byte { return wrongPacket }
|
||||||
|
|
||||||
|
func futureExpiration() uint64 {
|
||||||
|
return uint64(time.Now().Add(expiration).Unix())
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test just sends a PING packet and expects a response.
|
||||||
|
func BasicPing(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
pingHash := te.send(te.l1, &v4wire.Ping{
|
||||||
|
Version: 4,
|
||||||
|
From: te.localEndpoint(te.l1),
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if err := te.checkPong(reply, pingHash); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPong verifies that reply is a valid PONG matching the given ping hash.
|
||||||
|
func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error {
|
||||||
|
if reply == nil || reply.Kind() != v4wire.PongPacket {
|
||||||
|
return fmt.Errorf("expected PONG reply, got %v", reply)
|
||||||
|
}
|
||||||
|
pong := reply.(*v4wire.Pong)
|
||||||
|
if !bytes.Equal(pong.ReplyTok, pingHash) {
|
||||||
|
return fmt.Errorf("PONG reply token mismatch: got %x, want %x", pong.ReplyTok, pingHash)
|
||||||
|
}
|
||||||
|
wantEndpoint := te.localEndpoint(te.l1)
|
||||||
|
if !reflect.DeepEqual(pong.To, wantEndpoint) {
|
||||||
|
return fmt.Errorf("PONG 'to' endpoint mismatch: got %+v, want %+v", pong.To, wantEndpoint)
|
||||||
|
}
|
||||||
|
if v4wire.Expired(pong.Expiration) {
|
||||||
|
return fmt.Errorf("PONG is expired (%v)", pong.Expiration)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test sends a PING packet with wrong 'to' field and expects a PONG response.
|
||||||
|
func PingWrongTo(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")}
|
||||||
|
pingHash := te.send(te.l1, &v4wire.Ping{
|
||||||
|
Version: 4,
|
||||||
|
From: te.localEndpoint(te.l1),
|
||||||
|
To: wrongEndpoint,
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if err := te.checkPong(reply, pingHash); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test sends a PING packet with wrong 'from' field and expects a PONG response.
|
||||||
|
func PingWrongFrom(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")}
|
||||||
|
pingHash := te.send(te.l1, &v4wire.Ping{
|
||||||
|
Version: 4,
|
||||||
|
From: wrongEndpoint,
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if err := te.checkPong(reply, pingHash); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test sends a PING packet with additional data at the end and expects a PONG
|
||||||
|
// response. The remote node should respond because EIP-8 mandates ignoring additional
|
||||||
|
// trailing data.
|
||||||
|
func PingExtraData(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
pingHash := te.send(te.l1, &pingWithJunk{
|
||||||
|
Version: 4,
|
||||||
|
From: te.localEndpoint(te.l1),
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
JunkData1: 42,
|
||||||
|
JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1},
|
||||||
|
})
|
||||||
|
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if err := te.checkPong(reply, pingHash); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test sends a PING packet with additional data and wrong 'from' field
|
||||||
|
// and expects a PONG response.
|
||||||
|
func PingExtraDataWrongFrom(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")}
|
||||||
|
req := pingWithJunk{
|
||||||
|
Version: 4,
|
||||||
|
From: wrongEndpoint,
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
JunkData1: 42,
|
||||||
|
JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1},
|
||||||
|
}
|
||||||
|
pingHash := te.send(te.l1, &req)
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if err := te.checkPong(reply, pingHash); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test sends a PING packet with an expiration in the past.
|
||||||
|
// The remote node should not respond.
|
||||||
|
func PingPastExpiration(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
te.send(te.l1, &v4wire.Ping{
|
||||||
|
Version: 4,
|
||||||
|
From: te.localEndpoint(te.l1),
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: -futureExpiration(),
|
||||||
|
})
|
||||||
|
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if reply != nil {
|
||||||
|
t.Fatal("Expected no reply, got", reply)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test sends an invalid packet. The remote node should not respond.
|
||||||
|
func WrongPacketType(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
te.send(te.l1, &pingWrongType{
|
||||||
|
Version: 4,
|
||||||
|
From: te.localEndpoint(te.l1),
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if reply != nil {
|
||||||
|
t.Fatal("Expected no reply, got", reply)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test verifies that the default behaviour of ignoring 'from' fields is unaffected by
|
||||||
|
// the bonding process. After bonding, it pings the target with a different from endpoint.
|
||||||
|
func BondThenPingWithWrongFrom(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
bond(t, te)
|
||||||
|
|
||||||
|
wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")}
|
||||||
|
pingHash := te.send(te.l1, &v4wire.Ping{
|
||||||
|
Version: 4,
|
||||||
|
From: wrongEndpoint,
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if err := te.checkPong(reply, pingHash); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test just sends FINDNODE. The remote node should not reply
|
||||||
|
// because the endpoint proof has not completed.
|
||||||
|
func FindnodeWithoutEndpointProof(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
req := v4wire.Findnode{Expiration: futureExpiration()}
|
||||||
|
rand.Read(req.Target[:])
|
||||||
|
te.send(te.l1, &req)
|
||||||
|
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if reply != nil {
|
||||||
|
t.Fatal("Expected no response, got", reply)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasicFindnode sends a FINDNODE request after performing the endpoint
|
||||||
|
// proof. The remote node should respond.
|
||||||
|
func BasicFindnode(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
bond(t, te)
|
||||||
|
|
||||||
|
findnode := v4wire.Findnode{Expiration: futureExpiration()}
|
||||||
|
rand.Read(findnode.Target[:])
|
||||||
|
te.send(te.l1, &findnode)
|
||||||
|
|
||||||
|
reply, _, err := te.read(te.l1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("read find nodes", err)
|
||||||
|
}
|
||||||
|
if reply.Kind() != v4wire.NeighborsPacket {
|
||||||
|
t.Fatal("Expected neighbors, got", reply.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends
|
||||||
|
// FINDNODE to read the remote table. The remote node should not return the node contained
|
||||||
|
// in the unsolicited NEIGHBORS packet.
|
||||||
|
func UnsolicitedNeighbors(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
bond(t, te)
|
||||||
|
|
||||||
|
// Send unsolicited NEIGHBORS response.
|
||||||
|
fakeKey, _ := crypto.GenerateKey()
|
||||||
|
encFakeKey := v4wire.EncodePubkey(&fakeKey.PublicKey)
|
||||||
|
neighbors := v4wire.Neighbors{
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
Nodes: []v4wire.Node{{
|
||||||
|
ID: encFakeKey,
|
||||||
|
IP: net.IP{1, 2, 3, 4},
|
||||||
|
UDP: 30303,
|
||||||
|
TCP: 30303,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
te.send(te.l1, &neighbors)
|
||||||
|
|
||||||
|
// Check if the remote node included the fake node.
|
||||||
|
te.send(te.l1, &v4wire.Findnode{
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
Target: encFakeKey,
|
||||||
|
})
|
||||||
|
|
||||||
|
reply, _, err := te.read(te.l1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("read find nodes", err)
|
||||||
|
}
|
||||||
|
if reply.Kind() != v4wire.NeighborsPacket {
|
||||||
|
t.Fatal("Expected neighbors, got", reply.Name())
|
||||||
|
}
|
||||||
|
nodes := reply.(*v4wire.Neighbors).Nodes
|
||||||
|
if contains(nodes, encFakeKey) {
|
||||||
|
t.Fatal("neighbors response contains node from earlier unsolicited neighbors response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test sends FINDNODE with an expiration timestamp in the past.
|
||||||
|
// The remote node should not respond.
|
||||||
|
func FindnodePastExpiration(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
bond(t, te)
|
||||||
|
|
||||||
|
findnode := v4wire.Findnode{Expiration: -futureExpiration()}
|
||||||
|
rand.Read(findnode.Target[:])
|
||||||
|
te.send(te.l1, &findnode)
|
||||||
|
|
||||||
|
for {
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if reply == nil {
|
||||||
|
return
|
||||||
|
} else if reply.Kind() == v4wire.NeighborsPacket {
|
||||||
|
t.Fatal("Unexpected NEIGHBORS response for expired FINDNODE request")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bond performs the endpoint proof with the remote node.
|
||||||
|
func bond(t *utesting.T, te *testenv) {
|
||||||
|
te.send(te.l1, &v4wire.Ping{
|
||||||
|
Version: 4,
|
||||||
|
From: te.localEndpoint(te.l1),
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
|
||||||
|
var gotPing, gotPong bool
|
||||||
|
for !gotPing || !gotPong {
|
||||||
|
req, hash, err := te.read(te.l1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
switch req.(type) {
|
||||||
|
case *v4wire.Ping:
|
||||||
|
te.send(te.l1, &v4wire.Pong{
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
ReplyTok: hash,
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
gotPing = true
|
||||||
|
case *v4wire.Pong:
|
||||||
|
// TODO: maybe verify pong data here
|
||||||
|
gotPong = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test attempts to perform a traffic amplification attack against a
|
||||||
|
// 'victim' endpoint using FINDNODE. In this attack scenario, the attacker
|
||||||
|
// attempts to complete the endpoint proof non-interactively by sending a PONG
|
||||||
|
// with mismatching reply token from the 'victim' endpoint. The attack works if
|
||||||
|
// the remote node does not verify the PONG reply token field correctly. The
|
||||||
|
// attacker could then perform traffic amplification by sending many FINDNODE
|
||||||
|
// requests to the discovery node, which would reply to the 'victim' address.
|
||||||
|
func FindnodeAmplificationInvalidPongHash(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
// Send PING to start endpoint verification.
|
||||||
|
te.send(te.l1, &v4wire.Ping{
|
||||||
|
Version: 4,
|
||||||
|
From: te.localEndpoint(te.l1),
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
|
||||||
|
var gotPing, gotPong bool
|
||||||
|
for !gotPing || !gotPong {
|
||||||
|
req, _, err := te.read(te.l1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
switch req.(type) {
|
||||||
|
case *v4wire.Ping:
|
||||||
|
// Send PONG from this node ID, but with invalid ReplyTok.
|
||||||
|
te.send(te.l1, &v4wire.Pong{
|
||||||
|
To: te.remoteEndpoint(),
|
||||||
|
ReplyTok: make([]byte, macSize),
|
||||||
|
Expiration: futureExpiration(),
|
||||||
|
})
|
||||||
|
gotPing = true
|
||||||
|
case *v4wire.Pong:
|
||||||
|
gotPong = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now send FINDNODE. The remote node should not respond because our
|
||||||
|
// PONG did not reference the PING hash.
|
||||||
|
findnode := v4wire.Findnode{Expiration: futureExpiration()}
|
||||||
|
rand.Read(findnode.Target[:])
|
||||||
|
te.send(te.l1, &findnode)
|
||||||
|
|
||||||
|
// If we receive a NEIGHBORS response, the attack worked and the test fails.
|
||||||
|
reply, _, _ := te.read(te.l1)
|
||||||
|
if reply != nil && reply.Kind() == v4wire.NeighborsPacket {
|
||||||
|
t.Error("Got neighbors")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test attempts to perform a traffic amplification attack using FINDNODE.
|
||||||
|
// The attack works if the remote node does not verify the IP address of FINDNODE
|
||||||
|
// against the endpoint verification proof done by PING/PONG.
|
||||||
|
func FindnodeAmplificationWrongIP(t *utesting.T) {
|
||||||
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
|
defer te.close()
|
||||||
|
|
||||||
|
// Do the endpoint proof from the l1 IP.
|
||||||
|
bond(t, te)
|
||||||
|
|
||||||
|
// Now send FINDNODE from the same node ID, but different IP address.
|
||||||
|
// The remote node should not respond.
|
||||||
|
findnode := v4wire.Findnode{Expiration: futureExpiration()}
|
||||||
|
rand.Read(findnode.Target[:])
|
||||||
|
te.send(te.l2, &findnode)
|
||||||
|
|
||||||
|
// If we receive a NEIGHBORS response, the attack worked and the test fails.
|
||||||
|
reply, _, _ := te.read(te.l2)
|
||||||
|
if reply != nil {
|
||||||
|
t.Error("Got NEIGHORS response for FINDNODE from wrong IP")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var AllTests = []utesting.Test{
|
||||||
|
{Name: "Ping/Basic", Fn: BasicPing},
|
||||||
|
{Name: "Ping/WrongTo", Fn: PingWrongTo},
|
||||||
|
{Name: "Ping/WrongFrom", Fn: PingWrongFrom},
|
||||||
|
{Name: "Ping/ExtraData", Fn: PingExtraData},
|
||||||
|
{Name: "Ping/ExtraDataWrongFrom", Fn: PingExtraDataWrongFrom},
|
||||||
|
{Name: "Ping/PastExpiration", Fn: PingPastExpiration},
|
||||||
|
{Name: "Ping/WrongPacketType", Fn: WrongPacketType},
|
||||||
|
{Name: "Ping/BondThenPingWithWrongFrom", Fn: BondThenPingWithWrongFrom},
|
||||||
|
{Name: "Findnode/WithoutEndpointProof", Fn: FindnodeWithoutEndpointProof},
|
||||||
|
{Name: "Findnode/BasicFindnode", Fn: BasicFindnode},
|
||||||
|
{Name: "Findnode/UnsolicitedNeighbors", Fn: UnsolicitedNeighbors},
|
||||||
|
{Name: "Findnode/PastExpiration", Fn: FindnodePastExpiration},
|
||||||
|
{Name: "Amplification/InvalidPongHash", Fn: FindnodeAmplificationInvalidPongHash},
|
||||||
|
{Name: "Amplification/WrongIP", Fn: FindnodeAmplificationWrongIP},
|
||||||
|
}
|
||||||
123
cmd/devp2p/internal/v4test/framework.go
Normal file
123
cmd/devp2p/internal/v4test/framework.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package v4test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover/v4wire"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
)
|
||||||
|
|
||||||
|
const waitTime = 300 * time.Millisecond
|
||||||
|
|
||||||
|
type testenv struct {
|
||||||
|
l1, l2 net.PacketConn
|
||||||
|
key *ecdsa.PrivateKey
|
||||||
|
remote *enode.Node
|
||||||
|
remoteAddr *net.UDPAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestEnv(remote string, listen1, listen2 string) *testenv {
|
||||||
|
l1, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", listen1))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
l2, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", listen2))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
key, err := crypto.GenerateKey()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
node, err := enode.Parse(enode.ValidSchemes, remote)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if node.IP() == nil || node.UDP() == 0 {
|
||||||
|
var ip net.IP
|
||||||
|
var tcpPort, udpPort int
|
||||||
|
if ip = node.IP(); ip == nil {
|
||||||
|
ip = net.ParseIP("127.0.0.1")
|
||||||
|
}
|
||||||
|
if tcpPort = node.TCP(); tcpPort == 0 {
|
||||||
|
tcpPort = 30303
|
||||||
|
}
|
||||||
|
if udpPort = node.TCP(); udpPort == 0 {
|
||||||
|
udpPort = 30303
|
||||||
|
}
|
||||||
|
node = enode.NewV4(node.Pubkey(), ip, tcpPort, udpPort)
|
||||||
|
}
|
||||||
|
addr := &net.UDPAddr{IP: node.IP(), Port: node.UDP()}
|
||||||
|
return &testenv{l1, l2, key, node, addr}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (te *testenv) close() {
|
||||||
|
te.l1.Close()
|
||||||
|
te.l2.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (te *testenv) send(c net.PacketConn, req v4wire.Packet) []byte {
|
||||||
|
packet, hash, err := v4wire.Encode(te.key, req)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("can't encode %v packet: %v", req.Name(), err))
|
||||||
|
}
|
||||||
|
if _, err := c.WriteTo(packet, te.remoteAddr); err != nil {
|
||||||
|
panic(fmt.Errorf("can't send %v: %v", req.Name(), err))
|
||||||
|
}
|
||||||
|
return hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (te *testenv) read(c net.PacketConn) (v4wire.Packet, []byte, error) {
|
||||||
|
buf := make([]byte, 2048)
|
||||||
|
if err := c.SetReadDeadline(time.Now().Add(waitTime)); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
n, _, err := c.ReadFrom(buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
p, _, hash, err := v4wire.Decode(buf[:n])
|
||||||
|
return p, hash, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (te *testenv) localEndpoint(c net.PacketConn) v4wire.Endpoint {
|
||||||
|
addr := c.LocalAddr().(*net.UDPAddr)
|
||||||
|
return v4wire.Endpoint{
|
||||||
|
IP: addr.IP.To4(),
|
||||||
|
UDP: uint16(addr.Port),
|
||||||
|
TCP: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (te *testenv) remoteEndpoint() v4wire.Endpoint {
|
||||||
|
return v4wire.NewEndpoint(te.remoteAddr, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func contains(ns []v4wire.Node, key v4wire.Pubkey) bool {
|
||||||
|
for _, n := range ns {
|
||||||
|
if n.ID == key {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
105
cmd/devp2p/keycmd.go
Normal file
105
cmd/devp2p/keycmd.go
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
keyCommand = cli.Command{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "Operations on node keys",
|
||||||
|
Subcommands: []cli.Command{
|
||||||
|
keyGenerateCommand,
|
||||||
|
keyToNodeCommand,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
keyGenerateCommand = cli.Command{
|
||||||
|
Name: "generate",
|
||||||
|
Usage: "Generates node key files",
|
||||||
|
ArgsUsage: "keyfile",
|
||||||
|
Action: genkey,
|
||||||
|
}
|
||||||
|
keyToNodeCommand = cli.Command{
|
||||||
|
Name: "to-enode",
|
||||||
|
Usage: "Creates an enode URL from a node key file",
|
||||||
|
ArgsUsage: "keyfile",
|
||||||
|
Action: keyToURL,
|
||||||
|
Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
hostFlag = cli.StringFlag{
|
||||||
|
Name: "ip",
|
||||||
|
Usage: "IP address of the node",
|
||||||
|
Value: "127.0.0.1",
|
||||||
|
}
|
||||||
|
tcpPortFlag = cli.IntFlag{
|
||||||
|
Name: "tcp",
|
||||||
|
Usage: "TCP port of the node",
|
||||||
|
Value: 30303,
|
||||||
|
}
|
||||||
|
udpPortFlag = cli.IntFlag{
|
||||||
|
Name: "udp",
|
||||||
|
Usage: "UDP port of the node",
|
||||||
|
Value: 30303,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func genkey(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() != 1 {
|
||||||
|
return fmt.Errorf("need key file as argument")
|
||||||
|
}
|
||||||
|
file := ctx.Args().Get(0)
|
||||||
|
|
||||||
|
key, err := crypto.GenerateKey()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not generate key: %v", err)
|
||||||
|
}
|
||||||
|
return crypto.SaveECDSA(file, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyToURL(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() != 1 {
|
||||||
|
return fmt.Errorf("need key file as argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file = ctx.Args().Get(0)
|
||||||
|
host = ctx.String(hostFlag.Name)
|
||||||
|
tcp = ctx.Int(tcpPortFlag.Name)
|
||||||
|
udp = ctx.Int(udpPortFlag.Name)
|
||||||
|
)
|
||||||
|
key, err := crypto.LoadECDSA(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ip := net.ParseIP(host)
|
||||||
|
if ip == nil {
|
||||||
|
return fmt.Errorf("invalid IP address %q", host)
|
||||||
|
}
|
||||||
|
node := enode.NewV4(&key.PublicKey, ip, tcp, udp)
|
||||||
|
fmt.Println(node.URLv4())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -45,7 +45,7 @@ func init() {
|
|||||||
// Set up the CLI app.
|
// Set up the CLI app.
|
||||||
app.Flags = append(app.Flags, debug.Flags...)
|
app.Flags = append(app.Flags, debug.Flags...)
|
||||||
app.Before = func(ctx *cli.Context) error {
|
app.Before = func(ctx *cli.Context) error {
|
||||||
return debug.Setup(ctx, "")
|
return debug.Setup(ctx)
|
||||||
}
|
}
|
||||||
app.After = func(ctx *cli.Context) error {
|
app.After = func(ctx *cli.Context) error {
|
||||||
debug.Exit()
|
debug.Exit()
|
||||||
@@ -58,7 +58,9 @@ func init() {
|
|||||||
// Add subcommands.
|
// Add subcommands.
|
||||||
app.Commands = []cli.Command{
|
app.Commands = []cli.Command{
|
||||||
enrdumpCommand,
|
enrdumpCommand,
|
||||||
|
keyCommand,
|
||||||
discv4Command,
|
discv4Command,
|
||||||
|
discv5Command,
|
||||||
dnsCommand,
|
dnsCommand,
|
||||||
nodesetCommand,
|
nodesetCommand,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -164,7 +164,7 @@ func ethFilter(args []string) (nodeFilter, error) {
|
|||||||
case "goerli":
|
case "goerli":
|
||||||
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
||||||
case "ropsten":
|
case "ropsten":
|
||||||
filter = forkid.NewStaticFilter(params.TestnetChainConfig, params.TestnetGenesisHash)
|
filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown network %q", args[0])
|
return nil, fmt.Errorf("unknown network %q", args[0])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ Change the password of a keyfile.`,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt key with passphrase.
|
// Decrypt key with passphrase.
|
||||||
passphrase := getPassphrase(ctx)
|
passphrase := getPassphrase(ctx, false)
|
||||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error decrypting key: %v", err)
|
utils.Fatalf("Error decrypting key: %v", err)
|
||||||
@@ -67,7 +67,7 @@ Change the password of a keyfile.`,
|
|||||||
}
|
}
|
||||||
newPhrase = strings.TrimRight(string(content), "\r\n")
|
newPhrase = strings.TrimRight(string(content), "\r\n")
|
||||||
} else {
|
} else {
|
||||||
newPhrase = promptPassphrase(true)
|
newPhrase = utils.GetPassPhrase("", true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt the key with the new passphrase.
|
// Encrypt the key with the new passphrase.
|
||||||
|
|||||||
@@ -52,6 +52,10 @@ If you want to encrypt an existing private key, it can be specified by setting
|
|||||||
Name: "privatekey",
|
Name: "privatekey",
|
||||||
Usage: "file containing a raw private key to encrypt",
|
Usage: "file containing a raw private key to encrypt",
|
||||||
},
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "lightkdf",
|
||||||
|
Usage: "use less secure scrypt parameters",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Action: func(ctx *cli.Context) error {
|
Action: func(ctx *cli.Context) error {
|
||||||
// Check if keyfile path given and make sure it doesn't already exist.
|
// Check if keyfile path given and make sure it doesn't already exist.
|
||||||
@@ -90,8 +94,12 @@ If you want to encrypt an existing private key, it can be specified by setting
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt key with passphrase.
|
// Encrypt key with passphrase.
|
||||||
passphrase := promptPassphrase(true)
|
passphrase := getPassphrase(ctx, true)
|
||||||
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
scryptN, scryptP := keystore.StandardScryptN, keystore.StandardScryptP
|
||||||
|
if ctx.Bool("lightkdf") {
|
||||||
|
scryptN, scryptP = keystore.LightScryptN, keystore.LightScryptP
|
||||||
|
}
|
||||||
|
keyjson, err := keystore.EncryptKey(key, passphrase, scryptN, scryptP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error encrypting key: %v", err)
|
utils.Fatalf("Error encrypting key: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ make sure to use this feature with great caution!`,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt key with passphrase.
|
// Decrypt key with passphrase.
|
||||||
passphrase := getPassphrase(ctx)
|
passphrase := getPassphrase(ctx, false)
|
||||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error decrypting key: %v", err)
|
utils.Fatalf("Error decrypting key: %v", err)
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,7 +35,7 @@ var gitDate = ""
|
|||||||
var app *cli.App
|
var app *cli.App
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app = utils.NewApp(gitCommit, gitDate, "an Ethereum key manager")
|
app = flags.NewApp(gitCommit, gitDate, "an Ethereum key manager")
|
||||||
app.Commands = []cli.Command{
|
app.Commands = []cli.Command{
|
||||||
commandGenerate,
|
commandGenerate,
|
||||||
commandInspect,
|
commandInspect,
|
||||||
@@ -43,6 +43,7 @@ func init() {
|
|||||||
commandSignMessage,
|
commandSignMessage,
|
||||||
commandVerifyMessage,
|
commandVerifyMessage,
|
||||||
}
|
}
|
||||||
|
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commonly used command line flags.
|
// Commonly used command line flags.
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ To sign a message contained in a file, use the --msgfile flag.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt key with passphrase.
|
// Decrypt key with passphrase.
|
||||||
passphrase := getPassphrase(ctx)
|
passphrase := getPassphrase(ctx, false)
|
||||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error decrypting key: %v", err)
|
utils.Fatalf("Error decrypting key: %v", err)
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func TestMessageSignVerify(t *testing.T) {
|
|||||||
message := "test message"
|
message := "test message"
|
||||||
|
|
||||||
// Create the key.
|
// Create the key.
|
||||||
generate := runEthkey(t, "generate", keyfile)
|
generate := runEthkey(t, "generate", "--lightkdf", keyfile)
|
||||||
generate.Expect(`
|
generate.Expect(`
|
||||||
!! Unsupported terminal, password will be echoed.
|
!! Unsupported terminal, password will be echoed.
|
||||||
Password: {{.InputLine "foobar"}}
|
Password: {{.InputLine "foobar"}}
|
||||||
|
|||||||
@@ -23,36 +23,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// promptPassphrase prompts the user for a passphrase. Set confirmation to true
|
|
||||||
// to require the user to confirm the passphrase.
|
|
||||||
func promptPassphrase(confirmation bool) string {
|
|
||||||
passphrase, err := console.Stdin.PromptPassword("Password: ")
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("Failed to read password: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if confirmation {
|
|
||||||
confirm, err := console.Stdin.PromptPassword("Repeat password: ")
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("Failed to read password confirmation: %v", err)
|
|
||||||
}
|
|
||||||
if passphrase != confirm {
|
|
||||||
utils.Fatalf("Passwords do not match")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return passphrase
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPassphrase obtains a passphrase given by the user. It first checks the
|
// getPassphrase obtains a passphrase given by the user. It first checks the
|
||||||
// --passfile command line flag and ultimately prompts the user for a
|
// --passfile command line flag and ultimately prompts the user for a
|
||||||
// passphrase.
|
// passphrase.
|
||||||
func getPassphrase(ctx *cli.Context) string {
|
func getPassphrase(ctx *cli.Context, confirmation bool) string {
|
||||||
// Look for the --passwordfile flag.
|
// Look for the --passwordfile flag.
|
||||||
passphraseFile := ctx.String(passphraseFlag.Name)
|
passphraseFile := ctx.String(passphraseFlag.Name)
|
||||||
if passphraseFile != "" {
|
if passphraseFile != "" {
|
||||||
@@ -65,7 +43,7 @@ func getPassphrase(ctx *cli.Context) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise prompt the user for the passphrase.
|
// Otherwise prompt the user for the passphrase.
|
||||||
return promptPassphrase(false)
|
return utils.GetPassPhrase("", confirmation)
|
||||||
}
|
}
|
||||||
|
|
||||||
// signHash is a helper function that calculates a hash for the given message
|
// signHash is a helper function that calculates a hash for the given message
|
||||||
|
|||||||
268
cmd/evm/README.md
Normal file
268
cmd/evm/README.md
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
## EVM state transition tool
|
||||||
|
|
||||||
|
The `evm t8n` tool is a stateless state transition utility. It is a utility
|
||||||
|
which can
|
||||||
|
|
||||||
|
1. Take a prestate, including
|
||||||
|
- Accounts,
|
||||||
|
- Block context information,
|
||||||
|
- Previous blockshashes (*optional)
|
||||||
|
2. Apply a set of transactions,
|
||||||
|
3. Apply a mining-reward (*optional),
|
||||||
|
4. And generate a post-state, including
|
||||||
|
- State root, transaction root, receipt root,
|
||||||
|
- Information about rejected transactions,
|
||||||
|
- Optionally: a full or partial post-state dump
|
||||||
|
|
||||||
|
## Specification
|
||||||
|
|
||||||
|
The idea is to specify the behaviour of this binary very _strict_, so that other
|
||||||
|
node implementors can build replicas based on their own state-machines, and the
|
||||||
|
state generators can swap between a `geth`-based implementation and a `parityvm`-based
|
||||||
|
implementation.
|
||||||
|
|
||||||
|
### Command line params
|
||||||
|
|
||||||
|
Command line params that has to be supported are
|
||||||
|
```
|
||||||
|
|
||||||
|
--trace Output full trace logs to files <txhash>.jsonl
|
||||||
|
--trace.nomemory Disable full memory dump in traces
|
||||||
|
--trace.nostack Disable stack output in traces
|
||||||
|
--output.alloc alloc Determines where to put the alloc of the post-state.
|
||||||
|
`stdout` - into the stdout output
|
||||||
|
`stderr` - into the stderr output
|
||||||
|
--output.result result Determines where to put the result (stateroot, txroot etc) of the post-state.
|
||||||
|
`stdout` - into the stdout output
|
||||||
|
`stderr` - into the stderr output
|
||||||
|
--state.fork value Name of ruleset to use.
|
||||||
|
--state.chainid value ChainID to use (default: 1)
|
||||||
|
--state.reward value Mining reward. Set to -1 to disable (default: 0)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Error codes and output
|
||||||
|
|
||||||
|
All logging should happen against the `stderr`.
|
||||||
|
There are a few (not many) errors that can occur, those are defined below.
|
||||||
|
|
||||||
|
#### EVM-based errors (`2` to `9`)
|
||||||
|
|
||||||
|
- Other EVM error. Exit code `2`
|
||||||
|
- Failed configuration: when a non-supported or invalid fork was specified. Exit code `3`.
|
||||||
|
- Block history is not supplied, but needed for a `BLOCKHASH` operation. If `BLOCKHASH`
|
||||||
|
is invoked targeting a block which history has not been provided for, the program will
|
||||||
|
exit with code `4`.
|
||||||
|
|
||||||
|
#### IO errors (`10`-`20`)
|
||||||
|
|
||||||
|
- Invalid input json: the supplied data could not be marshalled.
|
||||||
|
The program will exit with code `10`
|
||||||
|
- IO problems: failure to load or save files, the program will exit with code `11`
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
### Basic usage
|
||||||
|
|
||||||
|
Invoking it with the provided example files
|
||||||
|
```
|
||||||
|
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json
|
||||||
|
```
|
||||||
|
Two resulting files:
|
||||||
|
|
||||||
|
`alloc.json`:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
|
||||||
|
"balance": "0xfeed1a9d",
|
||||||
|
"nonce": "0x1"
|
||||||
|
},
|
||||||
|
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0x5ffd4878be161d74",
|
||||||
|
"nonce": "0xac"
|
||||||
|
},
|
||||||
|
"0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0xa410"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
`result.json`:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
|
||||||
|
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
|
||||||
|
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [
|
||||||
|
{
|
||||||
|
"root": "0x",
|
||||||
|
"status": "0x1",
|
||||||
|
"cumulativeGasUsed": "0x5208",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"logs": null,
|
||||||
|
"transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
|
||||||
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
|
"gasUsed": "0x5208",
|
||||||
|
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"transactionIndex": "0x0"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rejected": [
|
||||||
|
1
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
We can make them spit out the data to e.g. `stdout` like this:
|
||||||
|
```
|
||||||
|
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout
|
||||||
|
```
|
||||||
|
Output:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"alloc": {
|
||||||
|
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
|
||||||
|
"balance": "0xfeed1a9d",
|
||||||
|
"nonce": "0x1"
|
||||||
|
},
|
||||||
|
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0x5ffd4878be161d74",
|
||||||
|
"nonce": "0xac"
|
||||||
|
},
|
||||||
|
"0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0xa410"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
|
||||||
|
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
|
||||||
|
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [
|
||||||
|
{
|
||||||
|
"root": "0x",
|
||||||
|
"status": "0x1",
|
||||||
|
"cumulativeGasUsed": "0x5208",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"logs": null,
|
||||||
|
"transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
|
||||||
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
|
"gasUsed": "0x5208",
|
||||||
|
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"transactionIndex": "0x0"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rejected": [
|
||||||
|
1
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## About Ommers
|
||||||
|
|
||||||
|
Mining rewards and ommer rewards might need to be added. This is how those are applied:
|
||||||
|
|
||||||
|
- `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`.
|
||||||
|
- For each ommer (mined by `0xbb`), with blocknumber `N-delta`
|
||||||
|
- (where `delta` is the difference between the current block and the ommer)
|
||||||
|
- The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward`
|
||||||
|
- The account `0xaa` (block miner) is awarded `block_reward / 32`
|
||||||
|
|
||||||
|
To make `state_t8n` apply these, the following inputs are required:
|
||||||
|
|
||||||
|
- `state.reward`
|
||||||
|
- For ethash, it is `5000000000000000000` `wei`,
|
||||||
|
- If this is not defined, mining rewards are not applied,
|
||||||
|
- A value of `0` is valid, and causes accounts to be 'touched'.
|
||||||
|
- For each ommer, the tool needs to be given an `address` and a `delta`. This
|
||||||
|
is done via the `env`.
|
||||||
|
|
||||||
|
Note: the tool does not verify that e.g. the normal uncle rules apply,
|
||||||
|
and allows e.g two uncles at the same height, or the uncle-distance. This means that
|
||||||
|
the tool allows for negative uncle reward (distance > 8)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
`./testdata/5/env.json`:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"currentCoinbase": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||||
|
"currentDifficulty": "0x20000",
|
||||||
|
"currentGasLimit": "0x750a163df65e8a",
|
||||||
|
"currentNumber": "1",
|
||||||
|
"currentTimestamp": "1000",
|
||||||
|
"ommers": [
|
||||||
|
{"delta": 1, "address": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" },
|
||||||
|
{"delta": 2, "address": "0xcccccccccccccccccccccccccccccccccccccccc" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
When applying this, using a reward of `0x08`
|
||||||
|
Output:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"alloc": {
|
||||||
|
"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {
|
||||||
|
"balance": "0x88"
|
||||||
|
},
|
||||||
|
"0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": {
|
||||||
|
"balance": "0x70"
|
||||||
|
},
|
||||||
|
"0xcccccccccccccccccccccccccccccccccccccccc": {
|
||||||
|
"balance": "0x60"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
### Future EIPS
|
||||||
|
|
||||||
|
It is also possible to experiment with future eips that are not yet defined in a hard fork.
|
||||||
|
Example, putting EIP-1344 into Frontier:
|
||||||
|
```
|
||||||
|
./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json
|
||||||
|
```
|
||||||
|
|
||||||
|
### Block history
|
||||||
|
|
||||||
|
The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`.
|
||||||
|
If a required blockhash is not provided, the exit code should be `4`:
|
||||||
|
Example where blockhashes are provided:
|
||||||
|
```
|
||||||
|
./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace
|
||||||
|
```
|
||||||
|
```
|
||||||
|
cat trace-0.jsonl | grep BLOCKHASH -C2
|
||||||
|
```
|
||||||
|
```
|
||||||
|
{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"depth":1,"refund":0,"opName":"PUSH1","error":""}
|
||||||
|
{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"depth":1,"refund":0,"opName":"BLOCKHASH","error":""}
|
||||||
|
{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"depth":1,"refund":0,"opName":"STOP","error":""}
|
||||||
|
{"output":"","gasUsed":"0x17","time":155861}
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example, the caller has not provided the required blockhash:
|
||||||
|
```
|
||||||
|
./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace
|
||||||
|
```
|
||||||
|
```
|
||||||
|
ERROR(4): getHash(3) invoked, blockhash for that block not provided
|
||||||
|
```
|
||||||
|
Error code: 4
|
||||||
|
### Chaining
|
||||||
|
|
||||||
|
Another thing that can be done, is to chain invocations:
|
||||||
|
```
|
||||||
|
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json
|
||||||
|
INFO [06-29|11:52:04.934] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||||
|
INFO [06-29|11:52:04.936] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||||
|
INFO [06-29|11:52:04.936] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
|
||||||
|
|
||||||
|
```
|
||||||
|
What happened here, is that we first applied two identical transactions, so the second one was rejected.
|
||||||
|
Then, taking the poststate alloc as the input for the next state, we tried again to include
|
||||||
|
the same two transactions: this time, both failed due to too low nonce.
|
||||||
|
|
||||||
|
In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the
|
||||||
|
actual blocknumber (exposed to the EVM) would not increase.
|
||||||
|
|
||||||
@@ -34,17 +34,22 @@ var disasmCommand = cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func disasmCmd(ctx *cli.Context) error {
|
func disasmCmd(ctx *cli.Context) error {
|
||||||
if len(ctx.Args().First()) == 0 {
|
var in string
|
||||||
return errors.New("filename required")
|
switch {
|
||||||
|
case len(ctx.Args().First()) > 0:
|
||||||
|
fn := ctx.Args().First()
|
||||||
|
input, err := ioutil.ReadFile(fn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
in = string(input)
|
||||||
|
case ctx.GlobalIsSet(InputFlag.Name):
|
||||||
|
in = ctx.GlobalString(InputFlag.Name)
|
||||||
|
default:
|
||||||
|
return errors.New("Missing filename or --input value")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn := ctx.Args().First()
|
code := strings.TrimSpace(in)
|
||||||
in, err := ioutil.ReadFile(fn)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
code := strings.TrimSpace(string(in))
|
|
||||||
fmt.Printf("%v\n", code)
|
fmt.Printf("%v\n", code)
|
||||||
return asm.PrintDisassembled(code)
|
return asm.PrintDisassembled(code)
|
||||||
}
|
}
|
||||||
|
|||||||
255
cmd/evm/internal/t8ntool/execution.go
Normal file
255
cmd/evm/internal/t8ntool/execution.go
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Prestate struct {
|
||||||
|
Env stEnv `json:"env"`
|
||||||
|
Pre core.GenesisAlloc `json:"pre"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecutionResult contains the execution status after running a state test, any
|
||||||
|
// error that might have occurred and a dump of the final state if requested.
|
||||||
|
type ExecutionResult struct {
|
||||||
|
StateRoot common.Hash `json:"stateRoot"`
|
||||||
|
TxRoot common.Hash `json:"txRoot"`
|
||||||
|
ReceiptRoot common.Hash `json:"receiptRoot"`
|
||||||
|
LogsHash common.Hash `json:"logsHash"`
|
||||||
|
Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
|
||||||
|
Receipts types.Receipts `json:"receipts"`
|
||||||
|
Rejected []int `json:"rejected,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ommer struct {
|
||||||
|
Delta uint64 `json:"delta"`
|
||||||
|
Address common.Address `json:"address"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
|
||||||
|
type stEnv struct {
|
||||||
|
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
|
||||||
|
Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"`
|
||||||
|
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
|
Number uint64 `json:"currentNumber" gencodec:"required"`
|
||||||
|
Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
|
||||||
|
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
||||||
|
Ommers []ommer `json:"ommers,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type stEnvMarshaling struct {
|
||||||
|
Coinbase common.UnprefixedAddress
|
||||||
|
Difficulty *math.HexOrDecimal256
|
||||||
|
GasLimit math.HexOrDecimal64
|
||||||
|
Number math.HexOrDecimal64
|
||||||
|
Timestamp math.HexOrDecimal64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply applies a set of transactions to a pre-state
|
||||||
|
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
|
txs types.Transactions, miningReward int64,
|
||||||
|
getTracerFn func(txIndex int) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) {
|
||||||
|
|
||||||
|
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
||||||
|
// required blockhashes
|
||||||
|
var hashError error
|
||||||
|
getHash := func(num uint64) common.Hash {
|
||||||
|
if pre.Env.BlockHashes == nil {
|
||||||
|
hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num)
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
h, ok := pre.Env.BlockHashes[math.HexOrDecimal64(num)]
|
||||||
|
if !ok {
|
||||||
|
hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre)
|
||||||
|
signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number))
|
||||||
|
gaspool = new(core.GasPool)
|
||||||
|
blockHash = common.Hash{0x13, 0x37}
|
||||||
|
rejectedTxs []int
|
||||||
|
includedTxs types.Transactions
|
||||||
|
gasUsed = uint64(0)
|
||||||
|
receipts = make(types.Receipts, 0)
|
||||||
|
txIndex = 0
|
||||||
|
)
|
||||||
|
gaspool.AddGas(pre.Env.GasLimit)
|
||||||
|
vmContext := vm.Context{
|
||||||
|
CanTransfer: core.CanTransfer,
|
||||||
|
Transfer: core.Transfer,
|
||||||
|
Coinbase: pre.Env.Coinbase,
|
||||||
|
BlockNumber: new(big.Int).SetUint64(pre.Env.Number),
|
||||||
|
Time: new(big.Int).SetUint64(pre.Env.Timestamp),
|
||||||
|
Difficulty: pre.Env.Difficulty,
|
||||||
|
GasLimit: pre.Env.GasLimit,
|
||||||
|
GetHash: getHash,
|
||||||
|
// GasPrice and Origin needs to be set per transaction
|
||||||
|
}
|
||||||
|
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
|
||||||
|
// done in StateProcessor.Process(block, ...), right before transactions are applied.
|
||||||
|
if chainConfig.DAOForkSupport &&
|
||||||
|
chainConfig.DAOForkBlock != nil &&
|
||||||
|
chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 {
|
||||||
|
misc.ApplyDAOHardFork(statedb)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tx := range txs {
|
||||||
|
msg, err := tx.AsMessage(signer)
|
||||||
|
if err != nil {
|
||||||
|
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "error", err)
|
||||||
|
rejectedTxs = append(rejectedTxs, i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tracer, err := getTracerFn(txIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
vmConfig.Tracer = tracer
|
||||||
|
vmConfig.Debug = (tracer != nil)
|
||||||
|
statedb.Prepare(tx.Hash(), blockHash, txIndex)
|
||||||
|
vmContext.GasPrice = msg.GasPrice()
|
||||||
|
vmContext.Origin = msg.From()
|
||||||
|
|
||||||
|
evm := vm.NewEVM(vmContext, statedb, chainConfig, vmConfig)
|
||||||
|
snapshot := statedb.Snapshot()
|
||||||
|
// (ret []byte, usedGas uint64, failed bool, err error)
|
||||||
|
msgResult, err := core.ApplyMessage(evm, msg, gaspool)
|
||||||
|
if err != nil {
|
||||||
|
statedb.RevertToSnapshot(snapshot)
|
||||||
|
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err)
|
||||||
|
rejectedTxs = append(rejectedTxs, i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
includedTxs = append(includedTxs, tx)
|
||||||
|
if hashError != nil {
|
||||||
|
return nil, nil, NewError(ErrorMissingBlockhash, hashError)
|
||||||
|
}
|
||||||
|
gasUsed += msgResult.UsedGas
|
||||||
|
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
|
||||||
|
{
|
||||||
|
var root []byte
|
||||||
|
if chainConfig.IsByzantium(vmContext.BlockNumber) {
|
||||||
|
statedb.Finalise(true)
|
||||||
|
} else {
|
||||||
|
root = statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)).Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt := types.NewReceipt(root, msgResult.Failed(), gasUsed)
|
||||||
|
receipt.TxHash = tx.Hash()
|
||||||
|
receipt.GasUsed = msgResult.UsedGas
|
||||||
|
// if the transaction created a contract, store the creation address in the receipt.
|
||||||
|
if msg.To() == nil {
|
||||||
|
receipt.ContractAddress = crypto.CreateAddress(evm.Context.Origin, tx.Nonce())
|
||||||
|
}
|
||||||
|
// Set the receipt logs and create a bloom for filtering
|
||||||
|
receipt.Logs = statedb.GetLogs(tx.Hash())
|
||||||
|
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
||||||
|
// These three are non-consensus fields
|
||||||
|
//receipt.BlockHash
|
||||||
|
//receipt.BlockNumber =
|
||||||
|
receipt.TransactionIndex = uint(txIndex)
|
||||||
|
receipts = append(receipts, receipt)
|
||||||
|
}
|
||||||
|
txIndex++
|
||||||
|
}
|
||||||
|
statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber))
|
||||||
|
// Add mining reward?
|
||||||
|
if miningReward > 0 {
|
||||||
|
// Add mining reward. The mining reward may be `0`, which only makes a difference in the cases
|
||||||
|
// where
|
||||||
|
// - the coinbase suicided, or
|
||||||
|
// - there are only 'bad' transactions, which aren't executed. In those cases,
|
||||||
|
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
|
||||||
|
var (
|
||||||
|
blockReward = big.NewInt(miningReward)
|
||||||
|
minerReward = new(big.Int).Set(blockReward)
|
||||||
|
perOmmer = new(big.Int).Div(blockReward, big.NewInt(32))
|
||||||
|
)
|
||||||
|
for _, ommer := range pre.Env.Ommers {
|
||||||
|
// Add 1/32th for each ommer included
|
||||||
|
minerReward.Add(minerReward, perOmmer)
|
||||||
|
// Add (8-delta)/8
|
||||||
|
reward := big.NewInt(8)
|
||||||
|
reward.Sub(reward, big.NewInt(0).SetUint64(ommer.Delta))
|
||||||
|
reward.Mul(reward, blockReward)
|
||||||
|
reward.Div(reward, big.NewInt(8))
|
||||||
|
statedb.AddBalance(ommer.Address, reward)
|
||||||
|
}
|
||||||
|
statedb.AddBalance(pre.Env.Coinbase, minerReward)
|
||||||
|
}
|
||||||
|
// Commit block
|
||||||
|
root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Could not commit state: %v", err)
|
||||||
|
return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
|
||||||
|
}
|
||||||
|
execRs := &ExecutionResult{
|
||||||
|
StateRoot: root,
|
||||||
|
TxRoot: types.DeriveSha(includedTxs),
|
||||||
|
ReceiptRoot: types.DeriveSha(receipts),
|
||||||
|
Bloom: types.CreateBloom(receipts),
|
||||||
|
LogsHash: rlpHash(statedb.Logs()),
|
||||||
|
Receipts: receipts,
|
||||||
|
Rejected: rejectedTxs,
|
||||||
|
}
|
||||||
|
return statedb, execRs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
|
||||||
|
sdb := state.NewDatabase(db)
|
||||||
|
statedb, _ := state.New(common.Hash{}, sdb, nil)
|
||||||
|
for addr, a := range accounts {
|
||||||
|
statedb.SetCode(addr, a.Code)
|
||||||
|
statedb.SetNonce(addr, a.Nonce)
|
||||||
|
statedb.SetBalance(addr, a.Balance)
|
||||||
|
for k, v := range a.Storage {
|
||||||
|
statedb.SetState(addr, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Commit and re-open to start with a clean state.
|
||||||
|
root, _ := statedb.Commit(false)
|
||||||
|
statedb, _ = state.New(root, sdb, nil)
|
||||||
|
return statedb
|
||||||
|
}
|
||||||
|
|
||||||
|
func rlpHash(x interface{}) (h common.Hash) {
|
||||||
|
hw := sha3.NewLegacyKeccak256()
|
||||||
|
rlp.Encode(hw, x)
|
||||||
|
hw.Sum(h[:0])
|
||||||
|
return h
|
||||||
|
}
|
||||||
103
cmd/evm/internal/t8ntool/flags.go
Normal file
103
cmd/evm/internal/t8ntool/flags.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
TraceFlag = cli.BoolFlag{
|
||||||
|
Name: "trace",
|
||||||
|
Usage: "Output full trace logs to files <txhash>.jsonl",
|
||||||
|
}
|
||||||
|
TraceDisableMemoryFlag = cli.BoolFlag{
|
||||||
|
Name: "trace.nomemory",
|
||||||
|
Usage: "Disable full memory dump in traces",
|
||||||
|
}
|
||||||
|
TraceDisableStackFlag = cli.BoolFlag{
|
||||||
|
Name: "trace.nostack",
|
||||||
|
Usage: "Disable stack output in traces",
|
||||||
|
}
|
||||||
|
TraceDisableReturnDataFlag = cli.BoolFlag{
|
||||||
|
Name: "trace.noreturndata",
|
||||||
|
Usage: "Disable return data output in traces",
|
||||||
|
}
|
||||||
|
OutputAllocFlag = cli.StringFlag{
|
||||||
|
Name: "output.alloc",
|
||||||
|
Usage: "Determines where to put the `alloc` of the post-state.\n" +
|
||||||
|
"\t`stdout` - into the stdout output\n" +
|
||||||
|
"\t`stderr` - into the stderr output\n" +
|
||||||
|
"\t<file> - into the file <file> ",
|
||||||
|
Value: "alloc.json",
|
||||||
|
}
|
||||||
|
OutputResultFlag = cli.StringFlag{
|
||||||
|
Name: "output.result",
|
||||||
|
Usage: "Determines where to put the `result` (stateroot, txroot etc) of the post-state.\n" +
|
||||||
|
"\t`stdout` - into the stdout output\n" +
|
||||||
|
"\t`stderr` - into the stderr output\n" +
|
||||||
|
"\t<file> - into the file <file> ",
|
||||||
|
Value: "result.json",
|
||||||
|
}
|
||||||
|
InputAllocFlag = cli.StringFlag{
|
||||||
|
Name: "input.alloc",
|
||||||
|
Usage: "`stdin` or file name of where to find the prestate alloc to use.",
|
||||||
|
Value: "alloc.json",
|
||||||
|
}
|
||||||
|
InputEnvFlag = cli.StringFlag{
|
||||||
|
Name: "input.env",
|
||||||
|
Usage: "`stdin` or file name of where to find the prestate env to use.",
|
||||||
|
Value: "env.json",
|
||||||
|
}
|
||||||
|
InputTxsFlag = cli.StringFlag{
|
||||||
|
Name: "input.txs",
|
||||||
|
Usage: "`stdin` or file name of where to find the transactions to apply.",
|
||||||
|
Value: "txs.json",
|
||||||
|
}
|
||||||
|
RewardFlag = cli.Int64Flag{
|
||||||
|
Name: "state.reward",
|
||||||
|
Usage: "Mining reward. Set to -1 to disable",
|
||||||
|
Value: 0,
|
||||||
|
}
|
||||||
|
ChainIDFlag = cli.Int64Flag{
|
||||||
|
Name: "state.chainid",
|
||||||
|
Usage: "ChainID to use",
|
||||||
|
Value: 1,
|
||||||
|
}
|
||||||
|
ForknameFlag = cli.StringFlag{
|
||||||
|
Name: "state.fork",
|
||||||
|
Usage: fmt.Sprintf("Name of ruleset to use."+
|
||||||
|
"\n\tAvailable forknames:"+
|
||||||
|
"\n\t %v"+
|
||||||
|
"\n\tAvailable extra eips:"+
|
||||||
|
"\n\t %v"+
|
||||||
|
"\n\tSyntax <forkname>(+ExtraEip)",
|
||||||
|
strings.Join(tests.AvailableForks(), "\n\t "),
|
||||||
|
strings.Join(vm.ActivateableEips(), ", ")),
|
||||||
|
Value: "Istanbul",
|
||||||
|
}
|
||||||
|
VerbosityFlag = cli.IntFlag{
|
||||||
|
Name: "verbosity",
|
||||||
|
Usage: "sets the verbosity level",
|
||||||
|
Value: 3,
|
||||||
|
}
|
||||||
|
)
|
||||||
80
cmd/evm/internal/t8ntool/gen_stenv.go
Normal file
80
cmd/evm/internal/t8ntool/gen_stenv.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = (*stEnvMarshaling)(nil)
|
||||||
|
|
||||||
|
// MarshalJSON marshals as JSON.
|
||||||
|
func (s stEnv) MarshalJSON() ([]byte, error) {
|
||||||
|
type stEnv struct {
|
||||||
|
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||||
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
||||||
|
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
|
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||||
|
Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
|
||||||
|
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
||||||
|
Ommers []ommer `json:"ommers,omitempty"`
|
||||||
|
}
|
||||||
|
var enc stEnv
|
||||||
|
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
||||||
|
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
|
||||||
|
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
|
||||||
|
enc.Number = math.HexOrDecimal64(s.Number)
|
||||||
|
enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
|
||||||
|
enc.BlockHashes = s.BlockHashes
|
||||||
|
enc.Ommers = s.Ommers
|
||||||
|
return json.Marshal(&enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals from JSON.
|
||||||
|
func (s *stEnv) UnmarshalJSON(input []byte) error {
|
||||||
|
type stEnv struct {
|
||||||
|
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||||
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
||||||
|
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
|
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||||
|
Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
|
||||||
|
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
||||||
|
Ommers []ommer `json:"ommers,omitempty"`
|
||||||
|
}
|
||||||
|
var dec stEnv
|
||||||
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if dec.Coinbase == nil {
|
||||||
|
return errors.New("missing required field 'currentCoinbase' for stEnv")
|
||||||
|
}
|
||||||
|
s.Coinbase = common.Address(*dec.Coinbase)
|
||||||
|
if dec.Difficulty == nil {
|
||||||
|
return errors.New("missing required field 'currentDifficulty' for stEnv")
|
||||||
|
}
|
||||||
|
s.Difficulty = (*big.Int)(dec.Difficulty)
|
||||||
|
if dec.GasLimit == nil {
|
||||||
|
return errors.New("missing required field 'currentGasLimit' for stEnv")
|
||||||
|
}
|
||||||
|
s.GasLimit = uint64(*dec.GasLimit)
|
||||||
|
if dec.Number == nil {
|
||||||
|
return errors.New("missing required field 'currentNumber' for stEnv")
|
||||||
|
}
|
||||||
|
s.Number = uint64(*dec.Number)
|
||||||
|
if dec.Timestamp == nil {
|
||||||
|
return errors.New("missing required field 'currentTimestamp' for stEnv")
|
||||||
|
}
|
||||||
|
s.Timestamp = uint64(*dec.Timestamp)
|
||||||
|
if dec.BlockHashes != nil {
|
||||||
|
s.BlockHashes = dec.BlockHashes
|
||||||
|
}
|
||||||
|
if dec.Ommers != nil {
|
||||||
|
s.Ommers = dec.Ommers
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
277
cmd/evm/internal/t8ntool/transition.go
Normal file
277
cmd/evm/internal/t8ntool/transition.go
Normal file
@@ -0,0 +1,277 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ErrorEVM = 2
|
||||||
|
ErrorVMConfig = 3
|
||||||
|
ErrorMissingBlockhash = 4
|
||||||
|
|
||||||
|
ErrorJson = 10
|
||||||
|
ErrorIO = 11
|
||||||
|
|
||||||
|
stdinSelector = "stdin"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NumberedError struct {
|
||||||
|
errorCode int
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewError(errorCode int, err error) *NumberedError {
|
||||||
|
return &NumberedError{errorCode, err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NumberedError) Error() string {
|
||||||
|
return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NumberedError) Code() int {
|
||||||
|
return n.errorCode
|
||||||
|
}
|
||||||
|
|
||||||
|
type input struct {
|
||||||
|
Alloc core.GenesisAlloc `json:"alloc,omitempty"`
|
||||||
|
Env *stEnv `json:"env,omitempty"`
|
||||||
|
Txs types.Transactions `json:"txs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func Main(ctx *cli.Context) error {
|
||||||
|
// Configure the go-ethereum logger
|
||||||
|
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||||
|
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
||||||
|
log.Root().SetHandler(glogger)
|
||||||
|
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
tracer vm.Tracer
|
||||||
|
)
|
||||||
|
var getTracer func(txIndex int) (vm.Tracer, error)
|
||||||
|
|
||||||
|
if ctx.Bool(TraceFlag.Name) {
|
||||||
|
// Configure the EVM logger
|
||||||
|
logConfig := &vm.LogConfig{
|
||||||
|
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||||
|
DisableMemory: ctx.Bool(TraceDisableMemoryFlag.Name),
|
||||||
|
DisableReturnData: ctx.Bool(TraceDisableReturnDataFlag.Name),
|
||||||
|
Debug: true,
|
||||||
|
}
|
||||||
|
var prevFile *os.File
|
||||||
|
// This one closes the last file
|
||||||
|
defer func() {
|
||||||
|
if prevFile != nil {
|
||||||
|
prevFile.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
getTracer = func(txIndex int) (vm.Tracer, error) {
|
||||||
|
if prevFile != nil {
|
||||||
|
prevFile.Close()
|
||||||
|
}
|
||||||
|
traceFile, err := os.Create(fmt.Sprintf("trace-%d.jsonl", txIndex))
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||||
|
}
|
||||||
|
prevFile = traceFile
|
||||||
|
return vm.NewJSONLogger(logConfig, traceFile), nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
getTracer = func(txIndex int) (tracer vm.Tracer, err error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We need to load three things: alloc, env and transactions. May be either in
|
||||||
|
// stdin input or in files.
|
||||||
|
// Check if anything needs to be read from stdin
|
||||||
|
var (
|
||||||
|
prestate Prestate
|
||||||
|
txs types.Transactions // txs to apply
|
||||||
|
allocStr = ctx.String(InputAllocFlag.Name)
|
||||||
|
|
||||||
|
envStr = ctx.String(InputEnvFlag.Name)
|
||||||
|
txStr = ctx.String(InputTxsFlag.Name)
|
||||||
|
inputData = &input{}
|
||||||
|
)
|
||||||
|
|
||||||
|
if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector {
|
||||||
|
decoder := json.NewDecoder(os.Stdin)
|
||||||
|
decoder.Decode(inputData)
|
||||||
|
}
|
||||||
|
if allocStr != stdinSelector {
|
||||||
|
inFile, err := os.Open(allocStr)
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorIO, fmt.Errorf("failed reading alloc file: %v", err))
|
||||||
|
}
|
||||||
|
defer inFile.Close()
|
||||||
|
decoder := json.NewDecoder(inFile)
|
||||||
|
if err := decoder.Decode(&inputData.Alloc); err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling alloc-file: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if envStr != stdinSelector {
|
||||||
|
inFile, err := os.Open(envStr)
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorIO, fmt.Errorf("failed reading env file: %v", err))
|
||||||
|
}
|
||||||
|
defer inFile.Close()
|
||||||
|
decoder := json.NewDecoder(inFile)
|
||||||
|
var env stEnv
|
||||||
|
if err := decoder.Decode(&env); err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling env-file: %v", err))
|
||||||
|
}
|
||||||
|
inputData.Env = &env
|
||||||
|
}
|
||||||
|
|
||||||
|
if txStr != stdinSelector {
|
||||||
|
inFile, err := os.Open(txStr)
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
|
||||||
|
}
|
||||||
|
defer inFile.Close()
|
||||||
|
decoder := json.NewDecoder(inFile)
|
||||||
|
var txs types.Transactions
|
||||||
|
if err := decoder.Decode(&txs); err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling txs-file: %v", err))
|
||||||
|
}
|
||||||
|
inputData.Txs = txs
|
||||||
|
}
|
||||||
|
|
||||||
|
prestate.Pre = inputData.Alloc
|
||||||
|
prestate.Env = *inputData.Env
|
||||||
|
txs = inputData.Txs
|
||||||
|
|
||||||
|
// Iterate over all the tests, run them and aggregate the results
|
||||||
|
vmConfig := vm.Config{
|
||||||
|
Tracer: tracer,
|
||||||
|
Debug: (tracer != nil),
|
||||||
|
}
|
||||||
|
// Construct the chainconfig
|
||||||
|
var chainConfig *params.ChainConfig
|
||||||
|
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
||||||
|
return NewError(ErrorVMConfig, fmt.Errorf("Failed constructing chain configuration: %v", err))
|
||||||
|
} else {
|
||||||
|
chainConfig = cConf
|
||||||
|
vmConfig.ExtraEips = extraEips
|
||||||
|
}
|
||||||
|
// Set the chain id
|
||||||
|
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
|
||||||
|
|
||||||
|
// Run the test and aggregate the result
|
||||||
|
state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Dump the excution result
|
||||||
|
//postAlloc := state.DumpGenesisFormat(false, false, false)
|
||||||
|
collector := make(Alloc)
|
||||||
|
state.DumpToCollector(collector, false, false, false, nil, -1)
|
||||||
|
return dispatchOutput(ctx, result, collector)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
type Alloc map[common.Address]core.GenesisAccount
|
||||||
|
|
||||||
|
func (g Alloc) OnRoot(common.Hash) {}
|
||||||
|
|
||||||
|
func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) {
|
||||||
|
balance, _ := new(big.Int).SetString(dumpAccount.Balance, 10)
|
||||||
|
var storage map[common.Hash]common.Hash
|
||||||
|
if dumpAccount.Storage != nil {
|
||||||
|
storage = make(map[common.Hash]common.Hash)
|
||||||
|
for k, v := range dumpAccount.Storage {
|
||||||
|
storage[k] = common.HexToHash(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
genesisAccount := core.GenesisAccount{
|
||||||
|
Code: common.FromHex(dumpAccount.Code),
|
||||||
|
Storage: storage,
|
||||||
|
Balance: balance,
|
||||||
|
Nonce: dumpAccount.Nonce,
|
||||||
|
}
|
||||||
|
g[addr] = genesisAccount
|
||||||
|
}
|
||||||
|
|
||||||
|
// saveFile marshalls the object to the given file
|
||||||
|
func saveFile(filename string, data interface{}) error {
|
||||||
|
b, err := json.MarshalIndent(data, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||||
|
}
|
||||||
|
if err = ioutil.WriteFile(filename, b, 0644); err != nil {
|
||||||
|
return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
|
||||||
|
// files
|
||||||
|
func dispatchOutput(ctx *cli.Context, result *ExecutionResult, alloc Alloc) error {
|
||||||
|
stdOutObject := make(map[string]interface{})
|
||||||
|
stdErrObject := make(map[string]interface{})
|
||||||
|
dispatch := func(fName, name string, obj interface{}) error {
|
||||||
|
switch fName {
|
||||||
|
case "stdout":
|
||||||
|
stdOutObject[name] = obj
|
||||||
|
case "stderr":
|
||||||
|
stdErrObject[name] = obj
|
||||||
|
default: // save to file
|
||||||
|
if err := saveFile(fName, obj); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := dispatch(ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := dispatch(ctx.String(OutputResultFlag.Name), "result", result); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(stdOutObject) > 0 {
|
||||||
|
b, err := json.MarshalIndent(stdOutObject, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||||
|
}
|
||||||
|
os.Stdout.Write(b)
|
||||||
|
}
|
||||||
|
if len(stdErrObject) > 0 {
|
||||||
|
b, err := json.MarshalIndent(stdErrObject, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||||
|
}
|
||||||
|
os.Stderr.Write(b)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -22,7 +22,9 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,7 +32,7 @@ var gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags)
|
|||||||
var gitDate = ""
|
var gitDate = ""
|
||||||
|
|
||||||
var (
|
var (
|
||||||
app = utils.NewApp(gitCommit, gitDate, "the evm command line interface")
|
app = flags.NewApp(gitCommit, gitDate, "the evm command line interface")
|
||||||
|
|
||||||
DebugFlag = cli.BoolFlag{
|
DebugFlag = cli.BoolFlag{
|
||||||
Name: "debug",
|
Name: "debug",
|
||||||
@@ -87,6 +89,10 @@ var (
|
|||||||
Name: "verbosity",
|
Name: "verbosity",
|
||||||
Usage: "sets the verbosity level",
|
Usage: "sets the verbosity level",
|
||||||
}
|
}
|
||||||
|
BenchFlag = cli.BoolFlag{
|
||||||
|
Name: "bench",
|
||||||
|
Usage: "benchmark the execution",
|
||||||
|
}
|
||||||
CreateFlag = cli.BoolFlag{
|
CreateFlag = cli.BoolFlag{
|
||||||
Name: "create",
|
Name: "create",
|
||||||
Usage: "indicates the action should be create rather than call",
|
Usage: "indicates the action should be create rather than call",
|
||||||
@@ -115,6 +121,14 @@ var (
|
|||||||
Name: "nostack",
|
Name: "nostack",
|
||||||
Usage: "disable stack output",
|
Usage: "disable stack output",
|
||||||
}
|
}
|
||||||
|
DisableStorageFlag = cli.BoolFlag{
|
||||||
|
Name: "nostorage",
|
||||||
|
Usage: "disable storage output",
|
||||||
|
}
|
||||||
|
DisableReturnDataFlag = cli.BoolFlag{
|
||||||
|
Name: "noreturndata",
|
||||||
|
Usage: "disable return data output",
|
||||||
|
}
|
||||||
EVMInterpreterFlag = cli.StringFlag{
|
EVMInterpreterFlag = cli.StringFlag{
|
||||||
Name: "vm.evm",
|
Name: "vm.evm",
|
||||||
Usage: "External EVM configuration (default = built-in interpreter)",
|
Usage: "External EVM configuration (default = built-in interpreter)",
|
||||||
@@ -122,8 +136,31 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var stateTransitionCommand = cli.Command{
|
||||||
|
Name: "transition",
|
||||||
|
Aliases: []string{"t8n"},
|
||||||
|
Usage: "executes a full state transition",
|
||||||
|
Action: t8ntool.Main,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
t8ntool.TraceFlag,
|
||||||
|
t8ntool.TraceDisableMemoryFlag,
|
||||||
|
t8ntool.TraceDisableStackFlag,
|
||||||
|
t8ntool.TraceDisableReturnDataFlag,
|
||||||
|
t8ntool.OutputAllocFlag,
|
||||||
|
t8ntool.OutputResultFlag,
|
||||||
|
t8ntool.InputAllocFlag,
|
||||||
|
t8ntool.InputEnvFlag,
|
||||||
|
t8ntool.InputTxsFlag,
|
||||||
|
t8ntool.ForknameFlag,
|
||||||
|
t8ntool.ChainIDFlag,
|
||||||
|
t8ntool.RewardFlag,
|
||||||
|
t8ntool.VerbosityFlag,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app.Flags = []cli.Flag{
|
app.Flags = []cli.Flag{
|
||||||
|
BenchFlag,
|
||||||
CreateFlag,
|
CreateFlag,
|
||||||
DebugFlag,
|
DebugFlag,
|
||||||
VerbosityFlag,
|
VerbosityFlag,
|
||||||
@@ -144,6 +181,8 @@ func init() {
|
|||||||
ReceiverFlag,
|
ReceiverFlag,
|
||||||
DisableMemoryFlag,
|
DisableMemoryFlag,
|
||||||
DisableStackFlag,
|
DisableStackFlag,
|
||||||
|
DisableStorageFlag,
|
||||||
|
DisableReturnDataFlag,
|
||||||
EVMInterpreterFlag,
|
EVMInterpreterFlag,
|
||||||
}
|
}
|
||||||
app.Commands = []cli.Command{
|
app.Commands = []cli.Command{
|
||||||
@@ -151,12 +190,18 @@ func init() {
|
|||||||
disasmCommand,
|
disasmCommand,
|
||||||
runCommand,
|
runCommand,
|
||||||
stateTestCommand,
|
stateTestCommand,
|
||||||
|
stateTransitionCommand,
|
||||||
}
|
}
|
||||||
|
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
if err := app.Run(os.Args); err != nil {
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
code := 1
|
||||||
|
if ec, ok := err.(*t8ntool.NumberedError); ok {
|
||||||
|
code = ec.Code()
|
||||||
|
}
|
||||||
fmt.Fprintln(os.Stderr, err)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
os.Exit(1)
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
23
cmd/evm/poststate.json
Normal file
23
cmd/evm/poststate.json
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"root": "f4157bb27bcb1d1a63001434a249a80948f2e9fe1f53d551244c1dae826b5b23",
|
||||||
|
"accounts": {
|
||||||
|
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
|
||||||
|
"balance": "4276951709",
|
||||||
|
"nonce": 1,
|
||||||
|
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||||
|
},
|
||||||
|
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "6916764286133345652",
|
||||||
|
"nonce": 172,
|
||||||
|
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||||
|
},
|
||||||
|
"0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "42500",
|
||||||
|
"nonce": 0,
|
||||||
|
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -25,6 +25,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
goruntime "runtime"
|
goruntime "runtime"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||||
@@ -69,14 +70,49 @@ func readGenesis(genesisPath string) *core.Genesis {
|
|||||||
return genesis
|
return genesis
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type execStats struct {
|
||||||
|
time time.Duration // The execution time.
|
||||||
|
allocs int64 // The number of heap allocations during execution.
|
||||||
|
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
|
||||||
|
}
|
||||||
|
|
||||||
|
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
|
||||||
|
if bench {
|
||||||
|
result := testing.Benchmark(func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
output, gasLeft, err = execFunc()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Get the average execution time from the benchmarking result.
|
||||||
|
// There are other useful stats here that could be reported.
|
||||||
|
stats.time = time.Duration(result.NsPerOp())
|
||||||
|
stats.allocs = result.AllocsPerOp()
|
||||||
|
stats.bytesAllocated = result.AllocedBytesPerOp()
|
||||||
|
} else {
|
||||||
|
var memStatsBefore, memStatsAfter goruntime.MemStats
|
||||||
|
goruntime.ReadMemStats(&memStatsBefore)
|
||||||
|
startTime := time.Now()
|
||||||
|
output, gasLeft, err = execFunc()
|
||||||
|
stats.time = time.Since(startTime)
|
||||||
|
goruntime.ReadMemStats(&memStatsAfter)
|
||||||
|
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
|
||||||
|
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
|
||||||
|
}
|
||||||
|
|
||||||
|
return output, gasLeft, stats, err
|
||||||
|
}
|
||||||
|
|
||||||
func runCmd(ctx *cli.Context) error {
|
func runCmd(ctx *cli.Context) error {
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||||
log.Root().SetHandler(glogger)
|
log.Root().SetHandler(glogger)
|
||||||
logconfig := &vm.LogConfig{
|
logconfig := &vm.LogConfig{
|
||||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
||||||
|
DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
|
||||||
|
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -101,10 +137,10 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
genesisConfig = gen
|
genesisConfig = gen
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
genesis := gen.ToBlock(db)
|
genesis := gen.ToBlock(db)
|
||||||
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
|
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
|
||||||
chainConfig = gen.Config
|
chainConfig = gen.Config
|
||||||
} else {
|
} else {
|
||||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
|
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
genesisConfig = new(core.Genesis)
|
genesisConfig = new(core.Genesis)
|
||||||
}
|
}
|
||||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||||
@@ -116,11 +152,7 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
|
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var code []byte
|
||||||
code []byte
|
|
||||||
ret []byte
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
codeFileFlag := ctx.GlobalString(CodeFileFlag.Name)
|
codeFileFlag := ctx.GlobalString(CodeFileFlag.Name)
|
||||||
codeFlag := ctx.GlobalString(CodeFlag.Name)
|
codeFlag := ctx.GlobalString(CodeFlag.Name)
|
||||||
|
|
||||||
@@ -203,10 +235,10 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
} else {
|
} else {
|
||||||
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
|
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
|
||||||
}
|
}
|
||||||
tstart := time.Now()
|
|
||||||
var leftOverGas uint64
|
|
||||||
var hexInput []byte
|
var hexInput []byte
|
||||||
if inputFileFlag := ctx.GlobalString(InputFileFlag.Name); inputFileFlag != "" {
|
if inputFileFlag := ctx.GlobalString(InputFileFlag.Name); inputFileFlag != "" {
|
||||||
|
var err error
|
||||||
if hexInput, err = ioutil.ReadFile(inputFileFlag); err != nil {
|
if hexInput, err = ioutil.ReadFile(inputFileFlag); err != nil {
|
||||||
fmt.Printf("could not load input from file: %v\n", err)
|
fmt.Printf("could not load input from file: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -215,16 +247,25 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
hexInput = []byte(ctx.GlobalString(InputFlag.Name))
|
hexInput = []byte(ctx.GlobalString(InputFlag.Name))
|
||||||
}
|
}
|
||||||
input := common.FromHex(string(bytes.TrimSpace(hexInput)))
|
input := common.FromHex(string(bytes.TrimSpace(hexInput)))
|
||||||
|
|
||||||
|
var execFunc func() ([]byte, uint64, error)
|
||||||
if ctx.GlobalBool(CreateFlag.Name) {
|
if ctx.GlobalBool(CreateFlag.Name) {
|
||||||
input = append(code, input...)
|
input = append(code, input...)
|
||||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
execFunc = func() ([]byte, uint64, error) {
|
||||||
|
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
|
||||||
|
return output, gasLeft, err
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if len(code) > 0 {
|
if len(code) > 0 {
|
||||||
statedb.SetCode(receiver, code)
|
statedb.SetCode(receiver, code)
|
||||||
}
|
}
|
||||||
ret, leftOverGas, err = runtime.Call(receiver, input, &runtimeConfig)
|
execFunc = func() ([]byte, uint64, error) {
|
||||||
|
return runtime.Call(receiver, input, &runtimeConfig)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
execTime := time.Since(tstart)
|
|
||||||
|
bench := ctx.GlobalBool(BenchFlag.Name)
|
||||||
|
output, leftOverGas, stats, err := timedExec(bench, execFunc)
|
||||||
|
|
||||||
if ctx.GlobalBool(DumpFlag.Name) {
|
if ctx.GlobalBool(DumpFlag.Name) {
|
||||||
statedb.Commit(true)
|
statedb.Commit(true)
|
||||||
@@ -254,20 +295,15 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalBool(StatDumpFlag.Name) {
|
if bench || ctx.GlobalBool(StatDumpFlag.Name) {
|
||||||
var mem goruntime.MemStats
|
fmt.Fprintf(os.Stderr, `EVM gas used: %d
|
||||||
goruntime.ReadMemStats(&mem)
|
execution time: %v
|
||||||
fmt.Fprintf(os.Stderr, `evm execution time: %v
|
allocations: %d
|
||||||
heap objects: %d
|
allocated bytes: %d
|
||||||
allocations: %d
|
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
|
||||||
total allocations: %d
|
|
||||||
GC calls: %d
|
|
||||||
Gas used: %d
|
|
||||||
|
|
||||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
|
||||||
}
|
}
|
||||||
if tracer == nil {
|
if tracer == nil {
|
||||||
fmt.Printf("0x%x\n", ret)
|
fmt.Printf("0x%x\n", output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf(" error: %v\n", err)
|
fmt.Printf(" error: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -59,8 +59,10 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||||||
|
|
||||||
// Configure the EVM logger
|
// Configure the EVM logger
|
||||||
config := &vm.LogConfig{
|
config := &vm.LogConfig{
|
||||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||||
|
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
||||||
|
DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
tracer vm.Tracer
|
tracer vm.Tracer
|
||||||
@@ -96,7 +98,7 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||||||
for _, st := range test.Subtests() {
|
for _, st := range test.Subtests() {
|
||||||
// Run the test and aggregate the result
|
// Run the test and aggregate the result
|
||||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||||
state, err := test.Run(st, cfg)
|
_, state, err := test.Run(st, cfg, false)
|
||||||
// print state root for evmlab tracing
|
// print state root for evmlab tracing
|
||||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||||
|
|||||||
12
cmd/evm/testdata/1/alloc.json
vendored
Normal file
12
cmd/evm/testdata/1/alloc.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0x5ffd4878be161d74",
|
||||||
|
"code": "0x",
|
||||||
|
"nonce": "0xac",
|
||||||
|
"storage": {}
|
||||||
|
},
|
||||||
|
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
|
||||||
|
"balance": "0xfeedbead",
|
||||||
|
"nonce" : "0x00"
|
||||||
|
}
|
||||||
|
}
|
||||||
7
cmd/evm/testdata/1/env.json
vendored
Normal file
7
cmd/evm/testdata/1/env.json
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"currentDifficulty": "0x20000",
|
||||||
|
"currentGasLimit": "0x750a163df65e8a",
|
||||||
|
"currentNumber": "1",
|
||||||
|
"currentTimestamp": "1000"
|
||||||
|
}
|
||||||
26
cmd/evm/testdata/1/txs.json
vendored
Normal file
26
cmd/evm/testdata/1/txs.json
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"gas": "0x5208",
|
||||||
|
"gasPrice": "0x2",
|
||||||
|
"hash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
|
||||||
|
"input": "0x",
|
||||||
|
"nonce": "0x0",
|
||||||
|
"r": "0x9500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdb",
|
||||||
|
"s": "0x7235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600",
|
||||||
|
"to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192",
|
||||||
|
"v": "0x1b",
|
||||||
|
"value": "0x1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"gas": "0x5208",
|
||||||
|
"gasPrice": "0x2",
|
||||||
|
"hash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
|
||||||
|
"input": "0x",
|
||||||
|
"nonce": "0x0",
|
||||||
|
"r": "0x9500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdb",
|
||||||
|
"s": "0x7235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600",
|
||||||
|
"to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192",
|
||||||
|
"v": "0x1b",
|
||||||
|
"value": "0x1"
|
||||||
|
}
|
||||||
|
]
|
||||||
16
cmd/evm/testdata/2/alloc.json
vendored
Normal file
16
cmd/evm/testdata/2/alloc.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
|
||||||
|
"balance" : "0x0de0b6b3a7640000",
|
||||||
|
"code" : "0x6001600053600160006001f0ff00",
|
||||||
|
"nonce" : "0x00",
|
||||||
|
"storage" : {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
|
||||||
|
"balance" : "0x0de0b6b3a7640000",
|
||||||
|
"code" : "0x",
|
||||||
|
"nonce" : "0x00",
|
||||||
|
"storage" : {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
7
cmd/evm/testdata/2/env.json
vendored
Normal file
7
cmd/evm/testdata/2/env.json
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
|
||||||
|
"currentDifficulty" : "0x020000",
|
||||||
|
"currentGasLimit" : "0x3b9aca00",
|
||||||
|
"currentNumber" : "0x01",
|
||||||
|
"currentTimestamp" : "0x03e8"
|
||||||
|
}
|
||||||
1
cmd/evm/testdata/2/readme.md
vendored
Normal file
1
cmd/evm/testdata/2/readme.md
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
These files examplify a selfdestruct to the `0`-address.
|
||||||
14
cmd/evm/testdata/2/txs.json
vendored
Normal file
14
cmd/evm/testdata/2/txs.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"input" : "0x",
|
||||||
|
"gas" : "0x5f5e100",
|
||||||
|
"gasPrice" : "0x1",
|
||||||
|
"nonce" : "0x0",
|
||||||
|
"to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
|
||||||
|
"value" : "0x186a0",
|
||||||
|
"v" : "0x1b",
|
||||||
|
"r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b",
|
||||||
|
"s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28",
|
||||||
|
"hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81"
|
||||||
|
}
|
||||||
|
]
|
||||||
16
cmd/evm/testdata/3/alloc.json
vendored
Normal file
16
cmd/evm/testdata/3/alloc.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
|
||||||
|
"balance" : "0x0de0b6b3a7640000",
|
||||||
|
"code" : "0x600140",
|
||||||
|
"nonce" : "0x00",
|
||||||
|
"storage" : {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
|
||||||
|
"balance" : "0x0de0b6b3a7640000",
|
||||||
|
"code" : "0x",
|
||||||
|
"nonce" : "0x00",
|
||||||
|
"storage" : {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user