Compare commits
611 Commits
Author | SHA1 | Date | |
---|---|---|---|
5f54075760 | |||
57bca0af8c | |||
a5c0bbb4f4 | |||
0544a43c13 | |||
20797348ca | |||
88f2839da4 | |||
b007412db1 | |||
da41a7258d | |||
8d32c4b990 | |||
12dab53495 | |||
70fbc87379 | |||
6c6247a690 | |||
589b603a9b | |||
9123eceb0f | |||
1d39912a9b | |||
69c1f2c2a7 | |||
52ad848b2e | |||
4065695350 | |||
969474f60a | |||
62ffec1be3 | |||
57fd2da0fe | |||
aa9432b816 | |||
7a0019c63b | |||
96dad6b6f6 | |||
5cf75a30c1 | |||
2f849ade82 | |||
a00f4a12a9 | |||
42628ba7ed | |||
ccf8083537 | |||
c4712bf96b | |||
2b4c7e9b37 | |||
03daf601c1 | |||
eb07dbb079 | |||
1a4e68721a | |||
806430a252 | |||
55599ee95d | |||
59336283c0 | |||
203440e813 | |||
c3f238dd53 | |||
bc0666fb27 | |||
0662384d29 | |||
b4e05adcc7 | |||
efc9209158 | |||
ec28a58cc1 | |||
4dedde7beb | |||
fdb34b7a7c | |||
07d4a02257 | |||
3e89b80ccb | |||
017b9f7eac | |||
566d5c0777 | |||
6198c53e28 | |||
a9e4a90d57 | |||
59a852e418 | |||
dd7a715d73 | |||
722bac84fa | |||
23bca0f374 | |||
367c329b88 | |||
2ef3815af4 | |||
4dd0727c39 | |||
8f6990dc7d | |||
c335821479 | |||
952482d5e4 | |||
5c83a4e5dd | |||
1bf508b449 | |||
05ade19302 | |||
ec96216d16 | |||
397c6cde1e | |||
302c17c36a | |||
924065e19d | |||
48641d7308 | |||
5d4267911a | |||
92580d69d3 | |||
84be009154 | |||
02aeb3d766 | |||
370dca4491 | |||
f08cd94fb7 | |||
216e584899 | |||
18a7d31338 | |||
938cf4528a | |||
81ad8f665d | |||
90e5744d6f | |||
3f40b22dac | |||
fd869dc839 | |||
bd0dbfa2a8 | |||
56152b31ac | |||
023769d9d4 | |||
b06e20bc7c | |||
3a5a5599dd | |||
83d1657444 | |||
9d06026c19 | |||
5c2f1e0014 | |||
a139041d40 | |||
1c2378b926 | |||
ae71da1b03 | |||
7a59a9380e | |||
762f3a48a0 | |||
b47285f1cf | |||
6f69cdd109 | |||
b8caba9709 | |||
9d48dbf5c2 | |||
85a1eda59e | |||
72e70bcec2 | |||
5866626b08 | |||
c495bca4ad | |||
413cc5b0c8 | |||
d2533d0efb | |||
3e0113fff4 | |||
9c42a41ed8 | |||
2fe07c203e | |||
6882943e39 | |||
b98aa3b4f1 | |||
6cd6b921ac | |||
908faf8cd7 | |||
b9731767af | |||
36a10875c8 | |||
f7ca03ae87 | |||
c15d76a40f | |||
5369a5c54d | |||
9d187f0238 | |||
c095c87e11 | |||
73d4a57d47 | |||
5f8888e116 | |||
9dbb8ef4aa | |||
52f4d6dd78 | |||
e4aa882ec5 | |||
38b1e8ee20 | |||
81d4cafb32 | |||
1afca33eac | |||
95461e8b22 | |||
0ed8b838a9 | |||
9becba5540 | |||
3511904aad | |||
b0d41e386e | |||
91c3362315 | |||
14852810b4 | |||
542d51895f | |||
68651a2329 | |||
5258785c81 | |||
1a5425779b | |||
a28390542c | |||
eeb53bc143 | |||
e21aa0fda3 | |||
9f1007e554 | |||
4b939c23e4 | |||
7138de7b55 | |||
b4cf57a581 | |||
da58afcea0 | |||
ce823c9f84 | |||
5e1581c2c3 | |||
50df2b78be | |||
c786f75389 | |||
7f9d94fe9a | |||
cf7aba36c8 | |||
3857cdc267 | |||
48648bc2f8 | |||
fe070ab5c3 | |||
8c33ac10bf | |||
3b79bac05b | |||
afc2039f22 | |||
13db4af345 | |||
64ee3e92ea | |||
afa3c72c40 | |||
1d7d7f57d0 | |||
fb5f25eeee | |||
c6069a627c | |||
1f2176dedc | |||
7bb2a489b2 | |||
e9971d356b | |||
5129ef22c2 | |||
3654aeaa4f | |||
f258a21a63 | |||
fd777bb210 | |||
3da1bf8ca1 | |||
bbea4b2b53 | |||
32516c768e | |||
1a32bdf92c | |||
2499b1b139 | |||
e7610eadfe | |||
732f5468d3 | |||
bbfe0b8d04 | |||
46e5583993 | |||
bf62acf033 | |||
586198ccea | |||
d95962cd5d | |||
79d5e5593f | |||
b5874273ce | |||
8092106abc | |||
eab2201f80 | |||
e85b68ef53 | |||
6e613cf3de | |||
afb8154eab | |||
1d06e41f04 | |||
43dd8e62fc | |||
80c6dfc19f | |||
d927c67f9d | |||
20fe928914 | |||
54aeb8e4c0 | |||
73067fd24f | |||
e37f7be97e | |||
b33a5294ea | |||
be12392fba | |||
8f35e3086c | |||
e323ed5a9a | |||
6bb61ee9ef | |||
0f7fbb85d6 | |||
62dc530773 | |||
e4c9fd29a3 | |||
de37e088f2 | |||
f0ac925fa7 | |||
0981d2e566 | |||
f14047dae5 | |||
b0056f5bd0 | |||
5dea0f2aa4 | |||
989fb4472a | |||
9ff9d04a69 | |||
edc3e0efeb | |||
f9569f3cd8 | |||
a3a2c6b0d9 | |||
35801f938e | |||
cc3ca63dbf | |||
049797d40a | |||
41ef34ae40 | |||
b169a309f9 | |||
7f40ae7876 | |||
327dcd3622 | |||
ffc12f63ec | |||
80be5e5463 | |||
7abf968d6f | |||
6eb38e02a8 | |||
51a86f61be | |||
b5cf603895 | |||
1e0c336d29 | |||
9e095251b7 | |||
da3b9f831e | |||
7b258c9681 | |||
8c78449a9e | |||
005665867d | |||
4bb3c89d44 | |||
bedf6f40af | |||
b4f2e4de8f | |||
72ed186f46 | |||
7b95cca56c | |||
e2b3a23663 | |||
0f184d3b14 | |||
6810674de9 | |||
8a79836044 | |||
f5091e5711 | |||
0dbf55d478 | |||
2ab5c11261 | |||
e39ca5e597 | |||
c7b0abf86b | |||
c8b7ea1c2e | |||
3c6b9c5d72 | |||
c5b8569707 | |||
b0190189a3 | |||
87f5b4123c | |||
b64525694b | |||
448abb61eb | |||
4013e23312 | |||
5aa3eac22d | |||
bb57f1f1e5 | |||
463014126f | |||
bce5d837b5 | |||
43c8a1914c | |||
ba62215d9e | |||
984c25ac40 | |||
a3128f9099 | |||
924098c6e5 | |||
96ddf27a48 | |||
54ce3887d8 | |||
b81a9cd829 | |||
cef06358ff | |||
9b97f98334 | |||
836314c055 | |||
e401536c97 | |||
cb8bbe7081 | |||
f47adc9ea8 | |||
5d895db2fd | |||
86f6568f66 | |||
3ee86a57f3 | |||
b31cc71ff6 | |||
febfea7af2 | |||
d1eb4006e2 | |||
03ec3fed2b | |||
3d88ecd61a | |||
09b347fec9 | |||
d7f2462e8f | |||
4fe30bf5ad | |||
4732ee89cb | |||
7ace023981 | |||
0914d4e0d2 | |||
9619a61024 | |||
bfdc0fa362 | |||
9f7cd75682 | |||
0131bd6ff9 | |||
dd8a62683e | |||
07e8c177e7 | |||
8d434f6a6f | |||
6dafec0666 | |||
3e6d7c169b | |||
0095531a58 | |||
ca376ead88 | |||
6d6a5a9337 | |||
ea5f2da39a | |||
479aa61f11 | |||
0af1ab0c86 | |||
65738c1eb3 | |||
0e7d019e0e | |||
eaa4f8a5f9 | |||
0900aae412 | |||
ba2201981a | |||
eea996e4e1 | |||
2ab7fe8982 | |||
5d2c947060 | |||
e5c19b6f98 | |||
dec8bba9d4 | |||
7f7abfe4d1 | |||
0bb194c956 | |||
e9295163aa | |||
1db4ecdc0b | |||
fdb3bd287e | |||
a91e682234 | |||
41b7745529 | |||
a5fcaa55ac | |||
0ed4d76c79 | |||
4b5e797288 | |||
2e83c82f80 | |||
a5330fe0c5 | |||
8d8034fe59 | |||
fd0e7b1c67 | |||
e9382c6e9f | |||
c599b78f62 | |||
ad44475231 | |||
35767dfd0c | |||
345332906c | |||
cefeb58598 | |||
b45cc0c9e8 | |||
3680cd5926 | |||
d3beff7e20 | |||
40a3856af9 | |||
89860f4197 | |||
88b1db7288 | |||
7a045af05b | |||
36243c7ed8 | |||
1ae0411d41 | |||
d54e3539d4 | |||
5df0b240ae | |||
605c2b261f | |||
4bc60e3aa8 | |||
eb9abbd3f2 | |||
41d361565b | |||
edba5e9854 | |||
c0a1f1c907 | |||
0510164145 | |||
629b5837e9 | |||
c2d93ded35 | |||
8d126a4981 | |||
f4c49bc0f0 | |||
d347656280 | |||
94903d572b | |||
f86c4177d5 | |||
7e9e3a134b | |||
7514e8a24d | |||
a31835c8b4 | |||
d78ad226c2 | |||
a660685746 | |||
2ab2a9f131 | |||
860e697b00 | |||
f3c9585f2e | |||
229bf51f0d | |||
2ee885958b | |||
2b4a5f2677 | |||
d6a6180366 | |||
9feec51e2d | |||
673007d7ae | |||
d558a595ad | |||
a0d783094e | |||
3c8656347f | |||
b9ff44bd64 | |||
cb5235eb07 | |||
a92d8a2654 | |||
dc17fa6b18 | |||
019dca9ba2 | |||
c197d805f7 | |||
5705ad004e | |||
a989cf5bad | |||
6c6c7b2af3 | |||
5c93462b5e | |||
701d60c889 | |||
9be07de539 | |||
885c13c2c9 | |||
5bbd7fb390 | |||
79b11121a7 | |||
72af509abe | |||
382c9266e6 | |||
f46adfac28 | |||
514b1587db | |||
66a7ef57e6 | |||
ecca2c3c1b | |||
7a7f6a4f29 | |||
c8e70186a6 | |||
794741b8b2 | |||
48705f8aea | |||
10b3f97c9d | |||
5596b664c4 | |||
42a5b54bf5 | |||
10181b57a9 | |||
ac193e36ce | |||
d6681ed360 | |||
5ba9225fe3 | |||
fc87bc5f52 | |||
c1740e4540 | |||
e3db1236de | |||
02b4d074f6 | |||
2dcb22afec | |||
69c8be7c86 | |||
55e5926f34 | |||
f30179d62e | |||
c4d21bc8e5 | |||
160add8570 | |||
564c8f3ae6 | |||
451ffdb62b | |||
6ff2c02991 | |||
f585f9eee8 | |||
4ea4d2dc34 | |||
1e67378df8 | |||
cc313e78b7 | |||
b0ca1b67ce | |||
03d00361f5 | |||
f90a193f92 | |||
8e14bb1448 | |||
cd6c861dc5 | |||
c91f7beb53 | |||
2bacf36d80 | |||
32d8d42274 | |||
da7d57e07c | |||
8cab3ab435 | |||
8f567dc8a2 | |||
504278e839 | |||
e7408b5552 | |||
1901521ed0 | |||
23b51a68cb | |||
dc92779c0a | |||
d70536b5d4 | |||
07635e43e2 | |||
64a3a3d23c | |||
777540628e | |||
a4df80f47f | |||
bc2a5578c0 | |||
ebf41d16a0 | |||
9d0c51fb0f | |||
08f27428b4 | |||
27a5622e99 | |||
8596fc5974 | |||
ad16aeb0a2 | |||
b872961ec8 | |||
54b1de67e2 | |||
68955ed2eb | |||
ff9a868232 | |||
20b818d206 | |||
63246e2542 | |||
3c48a25762 | |||
286ec5df40 | |||
4ee92f2d19 | |||
f7e39a7724 | |||
79cdbcfe64 | |||
79bf69b556 | |||
28aea46ac0 | |||
fc5f8a3dda | |||
3cc476c8ab | |||
2fd5ba6bd4 | |||
b4b27ebaea | |||
8c037dc487 | |||
3e14837c1c | |||
58f7f977e7 | |||
afdfdebd87 | |||
e311bb520a | |||
1ab3e30698 | |||
314246da78 | |||
bf1e263128 | |||
7e57fee355 | |||
a4da8416ee | |||
998abb9107 | |||
059c767adf | |||
d4f11d9b4f | |||
104375f398 | |||
1bbd400899 | |||
f9fb70d2ee | |||
1335a6cc8c | |||
b70a73cd3e | |||
0b978f91b6 | |||
64d199edf2 | |||
4e0fea4d30 | |||
9bd6068fef | |||
76069eef38 | |||
3df7142b3e | |||
3d123bcde6 | |||
3040243042 | |||
9facf6423d | |||
2403656373 | |||
ef0edc6e32 | |||
133de3d806 | |||
f8d8b56b28 | |||
d8aaa3a215 | |||
6131dd55c5 | |||
02656f9f61 | |||
967e097faa | |||
02aa86e659 | |||
7bbdf3e268 | |||
6ca59d98f8 | |||
833eeb9f23 | |||
2b422b1a47 | |||
73c5aba21f | |||
6a56b15019 | |||
5d9ac49c7e | |||
db568a61e2 | |||
17ce0a37de | |||
26b2d6e1aa | |||
fff6e03a79 | |||
d375193797 | |||
374c49e0ac | |||
10ce8b0e3c | |||
9a7e99f75d | |||
6f8c7b0def | |||
1c45f2f42e | |||
e063d538b8 | |||
43437806fb | |||
8f06b7980d | |||
971079822e | |||
f42bd73ce5 | |||
f5925b0459 | |||
8edaaa227d | |||
bd74882d83 | |||
67439c1dba | |||
f59a49d591 | |||
2b50367fe9 | |||
46cf0a616b | |||
85454e7678 | |||
80de4dc72c | |||
8c2cf3c66c | |||
37e9fcacca | |||
b36f54c684 | |||
3991745c5f | |||
6dd2803b8e | |||
fc0c6c175c | |||
7c74e166b0 | |||
f7848c2aa5 | |||
19866075ac | |||
faafeef79e | |||
cd82b89fde | |||
3780d0b6f7 | |||
ff89a3ddce | |||
aee70ae30b | |||
392151e251 | |||
5b742fb82b | |||
b159cdd8dd | |||
524ca544b2 | |||
1059927f9c | |||
fca6e515d6 | |||
ca436f4b90 | |||
350bb6d9ca | |||
5e805aa865 | |||
455fcc8309 | |||
0cc9b8791e | |||
8b84bd283f | |||
4a260dc1f2 | |||
4371367cd1 | |||
bc0e6a5e68 | |||
60c858a529 | |||
e9b850805e | |||
53f3460ab5 | |||
bdf98b4fcd | |||
c259e6874e | |||
13cda8d9b6 | |||
4f9789b28d | |||
ee748d1451 | |||
0732ad4e47 | |||
3d32690b54 | |||
a602ee90f2 | |||
fc78ce61c0 | |||
ffebf00114 | |||
99da85c895 | |||
f4841ff43d | |||
3a678a15c9 | |||
3e0dbe0eaa | |||
1802682f65 | |||
4d2249773a | |||
8a8fc5f8ef | |||
671ba3791d | |||
9488e7fd5f | |||
23c6fcdbe8 | |||
cf5d4b5541 | |||
b95c2b58f0 | |||
8e9197f2a1 | |||
c65f10a17b | |||
a56f3dc0d9 | |||
47359301a2 | |||
688ee6d1e5 | |||
ad8d519eb5 | |||
9e80d9bee1 | |||
0ff35e170d | |||
8d6a5a3581 | |||
33a24bb731 | |||
2d47c6bfde | |||
d68ad36bb9 | |||
df74a43396 | |||
01cb9948f1 | |||
bf468a81ec | |||
8c313eed26 | |||
c4d28aee9b | |||
f4b5f67ee0 |
@ -1,4 +1,7 @@
|
||||
**/.git
|
||||
.git
|
||||
!.git/HEAD
|
||||
!.git/refs/heads
|
||||
**/*_test.go
|
||||
|
||||
build/_workspace
|
||||
|
9
.github/CODEOWNERS
vendored
Normal file
9
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
2
.github/CONTRIBUTING.md
vendored
2
.github/CONTRIBUTING.md
vendored
@ -2,7 +2,7 @@
|
||||
|
||||
Before you do a feature request please check and make sure that it isn't possible
|
||||
through some other means. The JavaScript enabled console is a powerful feature
|
||||
in the right hands. Please check our [Bitchin' tricks](https://github.com/ethereum/go-ethereum/wiki/bitchin-tricks) wiki page for more info
|
||||
in the right hands. Please check our [Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
|
||||
and help.
|
||||
|
||||
## Contributing
|
||||
|
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@ -1,3 +1,9 @@
|
||||
Hi there,
|
||||
|
||||
please note that this is an issue tracker reserved for bug reports and feature requests.
|
||||
|
||||
For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com.
|
||||
|
||||
#### System information
|
||||
|
||||
Geth version: `geth version`
|
||||
|
9
.gitignore
vendored
9
.gitignore
vendored
@ -30,3 +30,12 @@ build/_vendor/pkg
|
||||
# travis
|
||||
profile.tmp
|
||||
profile.cov
|
||||
|
||||
# IdeaIDE
|
||||
.idea
|
||||
|
||||
# dashboard
|
||||
/dashboard/assets/flow-typed
|
||||
/dashboard/assets/node_modules
|
||||
/dashboard/assets/stats.json
|
||||
/dashboard/assets/bundle.js
|
||||
|
14
.mailmap
14
.mailmap
@ -65,7 +65,8 @@ Enrique Fynn <enriquefynn@gmail.com>
|
||||
|
||||
Vincent G <caktux@gmail.com>
|
||||
|
||||
RJ Catalano <rj@erisindustries.com>
|
||||
RJ Catalano <catalanor0220@gmail.com>
|
||||
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
|
||||
|
||||
Nchinda Nchinda <nchinda2@gmail.com>
|
||||
|
||||
@ -109,3 +110,14 @@ Frank Wang <eternnoir@gmail.com>
|
||||
Gary Rong <garyrong0905@gmail.com>
|
||||
|
||||
Guillaume Nicolas <guin56@gmail.com>
|
||||
|
||||
Sorin Neacsu <sorin.neacsu@gmail.com>
|
||||
Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com>
|
||||
|
||||
Valentin Wüstholz <wuestholz@gmail.com>
|
||||
Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com>
|
||||
|
||||
Armin Braun <me@obrown.io>
|
||||
|
||||
Ernesto del Toro <ernesto.deltoro@gmail.com>
|
||||
Ernesto del Toro <ernesto.deltoro@gmail.com> <ernestodeltoro@users.noreply.github.com>
|
||||
|
81
.travis.yml
81
.travis.yml
@ -6,9 +6,19 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.7.6
|
||||
go: 1.7.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
@ -19,33 +29,45 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
- os: osx
|
||||
go: 1.8.3
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
script:
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- brew update
|
||||
- brew install caskroom/cask/brew-cask
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
# This builder only tests code linters on latest version of Go
|
||||
- os: linux
|
||||
dist: trusty
|
||||
go: 1.9.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go lint
|
||||
|
||||
# This builder does the Ubuntu PPA and Linux Azure uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
@ -66,24 +88,25 @@ matrix:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
|
||||
- sudo ln -s /usr/include/asm-generic /usr/include/asm
|
||||
|
||||
- GOARM=5 CC=arm-linux-gnueabi-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
|
||||
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=6 CC=arm-linux-gnueabi-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
|
||||
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=7 CC=arm-linux-gnueabihf-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc
|
||||
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- CC=aarch64-linux-gnu-gcc go run build/ci.go install -arch arm64
|
||||
- go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
|
||||
@ -120,17 +143,19 @@ matrix:
|
||||
env:
|
||||
- azure-android
|
||||
- maven-android
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
|
||||
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
|
||||
- mv android-ndk-r14b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r14b
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r15c-linux-x86_64.zip -o android-ndk-r15c.zip
|
||||
- unzip -q android-ndk-r15c.zip && rm android-ndk-r15c.zip
|
||||
- mv android-ndk-r15c $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r15c
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
@ -138,11 +163,13 @@ matrix:
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
- cocoapods-ios
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
|
||||
@ -158,24 +185,22 @@ matrix:
|
||||
- xctool -version
|
||||
- xcrun simctl list
|
||||
|
||||
# Workaround for https://github.com/golang/go/issues/23749
|
||||
- export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-purge
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go purge -store gethstore/builds -days 14
|
||||
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
|
92
AUTHORS
92
AUTHORS
@ -1,85 +1,173 @@
|
||||
# This is the official list of go-ethereum authors for copyright purposes.
|
||||
|
||||
Afri Schoedon <5chdn@users.noreply.github.com>
|
||||
Agustin Armellini Fischer <armellini13@gmail.com>
|
||||
Airead <fgh1987168@gmail.com>
|
||||
Alan Chen <alanchchen@users.noreply.github.com>
|
||||
Alejandro Isaza <alejandro.isaza@gmail.com>
|
||||
Ales Katona <ales@coinbase.com>
|
||||
Alex Leverington <alex@ethdev.com>
|
||||
Alex Wu <wuyiding@gmail.com>
|
||||
Alexandre Van de Sande <alex.vandesande@ethdev.com>
|
||||
Ali Hajimirza <Ali92hm@users.noreply.github.com>
|
||||
Anton Evangelatov <anton.evangelatov@gmail.com>
|
||||
Arba Sasmoyo <arba.sasmoyo@gmail.com>
|
||||
Armani Ferrante <armaniferrante@berkeley.edu>
|
||||
Armin Braun <me@obrown.io>
|
||||
Aron Fischer <github@aron.guru>
|
||||
Bas van Kervel <bas@ethdev.com>
|
||||
Benjamin Brent <benjamin@benjaminbrent.com>
|
||||
Benoit Verkindt <benoit.verkindt@gmail.com>
|
||||
Bo <bohende@gmail.com>
|
||||
Bo Ye <boy.e.computer.1982@outlook.com>
|
||||
Bob Glickstein <bobg@users.noreply.github.com>
|
||||
Brian Schroeder <bts@gmail.com>
|
||||
Casey Detrio <cdetrio@gmail.com>
|
||||
Chase Wright <mysticryuujin@gmail.com>
|
||||
Christoph Jentzsch <jentzsch.software@gmail.com>
|
||||
Daniel A. Nagy <nagy.da@gmail.com>
|
||||
Daniel Sloof <goapsychadelic@gmail.com>
|
||||
Darrel Herbst <dherbst@gmail.com>
|
||||
Dave Appleton <calistralabs@gmail.com>
|
||||
Diego Siqueira <DiSiqueira@users.noreply.github.com>
|
||||
Dmitry Shulyak <yashulyak@gmail.com>
|
||||
Egon Elbre <egonelbre@gmail.com>
|
||||
Elias Naur <elias.naur@gmail.com>
|
||||
Elliot Shepherd <elliot@identitii.com>
|
||||
Enrique Fynn <enriquefynn@gmail.com>
|
||||
Ernesto del Toro <ernesto.deltoro@gmail.com>
|
||||
Ethan Buchman <ethan@coinculture.info>
|
||||
Eugene Valeyev <evgen.povt@gmail.com>
|
||||
Evangelos Pappas <epappas@evalonlabs.com>
|
||||
Evgeny Danilenko <6655321@bk.ru>
|
||||
Fabian Vogelsteller <fabian@frozeman.de>
|
||||
Fabio Barone <fabio.barone.co@gmail.com>
|
||||
Fabio Berger <fabioberger1991@gmail.com>
|
||||
FaceHo <facehoshi@gmail.com>
|
||||
Felix Lange <fjl@twurst.com>
|
||||
Fiisio <liangcszzu@163.com>
|
||||
Frank Wang <eternnoir@gmail.com>
|
||||
Furkan KAMACI <furkankamaci@gmail.com>
|
||||
Gary Rong <garyrong0905@gmail.com>
|
||||
George Ornbo <george@shapeshed.com>
|
||||
Gregg Dourgarian <greggd@tempworks.com>
|
||||
Guillaume Ballet <gballet@gmail.com>
|
||||
Guillaume Nicolas <guin56@gmail.com>
|
||||
Gustav Simonsson <gustav.simonsson@gmail.com>
|
||||
Hao Bryan Cheng <haobcheng@gmail.com>
|
||||
Henning Diedrich <hd@eonblast.com>
|
||||
Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
|
||||
Ivan Daniluk <ivan.daniluk@gmail.com>
|
||||
Jae Kwon <jkwon.work@gmail.com>
|
||||
Jamie Pitts <james.pitts@gmail.com>
|
||||
Janoš Guljaš <janos@users.noreply.github.com>
|
||||
Jason Carver <jacarver@linkedin.com>
|
||||
Jay Guo <guojiannan1101@gmail.com>
|
||||
Jeff R. Allen <jra@nella.org>
|
||||
Jeffrey Wilcke <jeffrey@ethereum.org>
|
||||
Jens Agerberg <github@agerberg.me>
|
||||
Jia Chenhui <jiachenhui1989@gmail.com>
|
||||
Jim McDonald <Jim@mcdee.net>
|
||||
Joel Burget <joelburget@gmail.com>
|
||||
Jonathan Brown <jbrown@bluedroplet.com>
|
||||
Joseph Chow <ethereum@outlook.com>
|
||||
Justin Clark-Casey <justincc@justincc.org>
|
||||
Justin Drake <drakefjustin@gmail.com>
|
||||
Kenji Siu <kenji@isuntv.com>
|
||||
Kobi Gurkan <kobigurk@gmail.com>
|
||||
Konrad Feldmeier <konrad@brainbot.com>
|
||||
Kurkó Mihály <kurkomisi@users.noreply.github.com>
|
||||
Kyuntae Ethan Kim <ethan.kyuntae.kim@gmail.com>
|
||||
Lefteris Karapetsas <lefteris@refu.co>
|
||||
Leif Jurvetson <leijurv@gmail.com>
|
||||
Leo Shklovskii <leo@thermopylae.net>
|
||||
Lewis Marshall <lewis@lmars.net>
|
||||
Lio李欧 <lionello@users.noreply.github.com>
|
||||
Louis Holbrook <dev@holbrook.no>
|
||||
Luca Zeug <luclu@users.noreply.github.com>
|
||||
Magicking <s@6120.eu>
|
||||
Maran Hidskes <maran.hidskes@gmail.com>
|
||||
Marek Kotewicz <marek.kotewicz@gmail.com>
|
||||
Mark <markya0616@gmail.com>
|
||||
Martin Holst Swende <martin@swende.se>
|
||||
Matthew Di Ferrante <mattdf@users.noreply.github.com>
|
||||
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
||||
Maximilian Meister <mmeister@suse.de>
|
||||
Micah Zoltu <micah@zoltu.net>
|
||||
Michael Ruminer <michael.ruminer+github@gmail.com>
|
||||
Miguel Mota <miguelmota2@gmail.com>
|
||||
Miya Chen <miyatlchen@gmail.com>
|
||||
Nchinda Nchinda <nchinda2@gmail.com>
|
||||
Nick Dodson <silentcicero@outlook.com>
|
||||
Nick Johnson <arachnid@notdot.net>
|
||||
Nicolas Guillaume <gunicolas@sqli.com>
|
||||
Noman <noman@noman.land>
|
||||
Oli Bye <olibye@users.noreply.github.com>
|
||||
Paul Litvak <litvakpol@012.net.il>
|
||||
Paulo L F Casaretto <pcasaretto@gmail.com>
|
||||
Paweł Bylica <chfast@gmail.com>
|
||||
Peter Pratscher <pratscher@gmail.com>
|
||||
Petr Mikusek <petr@mikusek.info>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
RJ Catalano <rj@erisindustries.com>
|
||||
RJ Catalano <catalanor0220@gmail.com>
|
||||
Ramesh Nair <ram@hiddentao.com>
|
||||
Ricardo Catalinas Jiménez <r@untroubled.be>
|
||||
Ricardo Domingos <ricardohsd@gmail.com>
|
||||
Richard Hart <richardhart92@gmail.com>
|
||||
Rob <robert@rojotek.com>
|
||||
Robert Zaremba <robert.zaremba@scale-it.pl>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Rémy Roy <remyroy@remyroy.com>
|
||||
S. Matthew English <s-matthew-english@users.noreply.github.com>
|
||||
Shintaro Kaneko <kaneshin0120@gmail.com>
|
||||
Sorin Neacsu <sorin.neacsu@gmail.com>
|
||||
Stein Dekker <dekker.stein@gmail.com>
|
||||
Steve Waldman <swaldman@mchange.com>
|
||||
Steven Roose <stevenroose@gmail.com>
|
||||
Taylor Gerring <taylor.gerring@gmail.com>
|
||||
Thomas Bocek <tom@tomp2p.net>
|
||||
Ti Zhou <tizhou1986@gmail.com>
|
||||
Tosh Camille <tochecamille@gmail.com>
|
||||
Valentin Wüstholz <wuestholz@users.noreply.github.com>
|
||||
Valentin Wüstholz <wuestholz@gmail.com>
|
||||
Victor Farazdagi <simple.square@gmail.com>
|
||||
Victor Tran <vu.tran54@gmail.com>
|
||||
Viktor Trón <viktor.tron@gmail.com>
|
||||
Ville Sundell <github@solarius.fi>
|
||||
Vincent G <caktux@gmail.com>
|
||||
Vitalik Buterin <v@buterin.com>
|
||||
Vitaly V <vvelikodny@gmail.com>
|
||||
Vivek Anand <vivekanand1101@users.noreply.github.com>
|
||||
Vlad Gluhovsky <gluk256@users.noreply.github.com>
|
||||
Yohann Léon <sybiload@gmail.com>
|
||||
Yoichi Hirai <i@yoichihirai.com>
|
||||
Yondon Fu <yondon.fu@gmail.com>
|
||||
Zach <zach.ramsay@gmail.com>
|
||||
Zahoor Mohamed <zahoor@zahoor.in>
|
||||
Zoe Nolan <github@zoenolan.org>
|
||||
Zsolt Felföldi <zsfelfoldi@gmail.com>
|
||||
am2rican5 <am2rican5@gmail.com>
|
||||
ayeowch <ayeowch@gmail.com>
|
||||
b00ris <b00ris@mail.ru>
|
||||
bailantaotao <Edwin@maicoin.com>
|
||||
baizhenxuan <nkbai@163.com>
|
||||
bloonfield <bloonfield@163.com>
|
||||
changhong <changhong.yu@shanbay.com>
|
||||
evgk <evgeniy.kamyshev@gmail.com>
|
||||
ferhat elmas <elmas.ferhat@gmail.com>
|
||||
holisticode <holistic.computing@gmail.com>
|
||||
jtakalai <juuso.takalainen@streamr.com>
|
||||
ken10100147 <sunhongping@kanjian.com>
|
||||
ligi <ligi@ligi.de>
|
||||
mark.lin <mark@maicoin.com>
|
||||
necaremus <necaremus@gmail.com>
|
||||
njupt-moon <1015041018@njupt.edu.cn>
|
||||
nkbai <nkbai@163.com>
|
||||
rhaps107 <dod-source@yandex.ru>
|
||||
slumber1122 <slumber1122@gmail.com>
|
||||
sunxiaojun2014 <sunxiaojun-xy@360.cn>
|
||||
terasum <terasum@163.com>
|
||||
tsarpaul <Litvakpol@012.net.il>
|
||||
xiekeyang <xiekeyang@users.noreply.github.com>
|
||||
yoza <yoza.is12s@gmail.com>
|
||||
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
|
||||
Максим Чусовлянов <mchusovlianov@gmail.com>
|
||||
|
21
Dockerfile
21
Dockerfile
@ -1,15 +1,16 @@
|
||||
FROM alpine:3.5
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.9-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
ADD . /go-ethereum
|
||||
RUN \
|
||||
apk add --update git go make gcc musl-dev linux-headers && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /usr/local/bin/ && \
|
||||
apk del git go make gcc musl-dev linux-headers && \
|
||||
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
|
||||
RUN cd /go-ethereum && make geth
|
||||
|
||||
EXPOSE 8545
|
||||
EXPOSE 30303
|
||||
EXPOSE 30303/udp
|
||||
# Pull Geth into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp 30304/udp
|
||||
ENTRYPOINT ["geth"]
|
||||
|
15
Dockerfile.alltools
Normal file
15
Dockerfile.alltools
Normal file
@ -0,0 +1,15 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.9-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
ADD . /go-ethereum
|
||||
RUN cd /go-ethereum && make all
|
||||
|
||||
# Pull all binaries into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp 30304/udp
|
13
Makefile
13
Makefile
@ -8,7 +8,7 @@
|
||||
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
||||
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
|
||||
|
||||
GOBIN = build/bin
|
||||
GOBIN = $(shell pwd)/build/bin
|
||||
GO ?= latest
|
||||
|
||||
geth:
|
||||
@ -21,11 +21,6 @@ swarm:
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/swarm\" to launch swarm."
|
||||
|
||||
evm:
|
||||
build/env.sh go run build/ci.go install ./cmd/evm
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/evm\" to start the evm."
|
||||
|
||||
all:
|
||||
build/env.sh go run build/ci.go install
|
||||
|
||||
@ -50,9 +45,13 @@ clean:
|
||||
|
||||
devtools:
|
||||
env GOBIN= go get -u golang.org/x/tools/cmd/stringer
|
||||
env GOBIN= go get -u github.com/jteeuwen/go-bindata/go-bindata
|
||||
env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata
|
||||
env GOBIN= go get -u github.com/fjl/gencodec
|
||||
env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go
|
||||
env GOBIN= go install ./cmd/abigen
|
||||
@type "npm" 2> /dev/null || echo 'Please install node.js and npm'
|
||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||
|
||||
# Cross Compilation Targets (xgo)
|
||||
|
||||
|
39
README.md
39
README.md
@ -35,11 +35,11 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `disasm` | Bytecode disassembler to convert EVM (Ethereum Virtual Machine) bytecode into more user friendly assembly-like opcodes (e.g. `echo "6001" | disasm`). For details on the individual opcodes, please see pages 22-30 of the [Ethereum Yellow Paper](http://gavwood.com/paper.pdf). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow insolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
|
||||
## Running geth
|
||||
|
||||
@ -56,16 +56,14 @@ the user doesn't care about years-old historical data, so we can fast-sync quick
|
||||
state of the network. To do so:
|
||||
|
||||
```
|
||||
$ geth --fast --cache=512 console
|
||||
$ geth console
|
||||
```
|
||||
|
||||
This command will:
|
||||
|
||||
* Start geth in fast sync mode (`--fast`), causing it to download more data in exchange for avoiding
|
||||
processing the entire history of the Ethereum network, which is very CPU intensive.
|
||||
* Bump the memory allowance of the database to 512MB (`--cache=512`), which can help significantly in
|
||||
sync times especially for HDD users. This flag is optional and you can set it as high or as low as
|
||||
you'd like, though we'd recommend the 512MB - 2GB range.
|
||||
* Start geth in fast sync mode (default, can be changed with the `--syncmode` flag), causing it to
|
||||
download more data in exchange for avoiding processing the entire history of the Ethereum network,
|
||||
which is very CPU intensive.
|
||||
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
@ -80,12 +78,11 @@ entire system. In other words, instead of attaching to the main network, you wan
|
||||
network with your node, which is fully equivalent to the main network, but with play-Ether only.
|
||||
|
||||
```
|
||||
$ geth --testnet --fast --cache=512 console
|
||||
$ geth --testnet console
|
||||
```
|
||||
|
||||
The `--fast`, `--cache` flags and `console` subcommand have the exact same meaning as above and they
|
||||
are equally useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||
here.
|
||||
The `console` subcommand have the exact same meaning as above and they are equally useful on the
|
||||
testnet too. Please see above for their explanations if you've skipped to here.
|
||||
|
||||
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
|
||||
|
||||
@ -102,6 +99,14 @@ over between the main network and test network, you should make sure to always u
|
||||
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
|
||||
separate the two networks and will not make any accounts available between them.*
|
||||
|
||||
### Full node on the Rinkeby test network
|
||||
|
||||
The above test network is a cross client one based on the ethash proof-of-work consensus algorithm. As such, it has certain extra overhead and is more susceptible to reorganization attacks due to the network's low difficulty / security. Go Ethereum also supports connecting to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io) (operated by members of the community). This network is lighter, more secure, but is only supported by go-ethereum.
|
||||
|
||||
```
|
||||
$ geth --rinkeby console
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a configuration file via:
|
||||
@ -116,7 +121,7 @@ To get an idea how the file should look like you can use the `dumpconfig` subcom
|
||||
$ geth --your-favourite-flags dumpconfig
|
||||
```
|
||||
|
||||
*Note: This works only with geth v1.6.0 and above*
|
||||
*Note: This works only with geth v1.6.0 and above.*
|
||||
|
||||
#### Docker quick start
|
||||
|
||||
@ -125,10 +130,12 @@ One of the quickest ways to get Ethereum up and running on your machine is by us
|
||||
```
|
||||
docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
|
||||
-p 8545:8545 -p 30303:30303 \
|
||||
ethereum/client-go --fast --cache=512
|
||||
ethereum/client-go
|
||||
```
|
||||
|
||||
This will start geth in fast sync mode with a DB memory allowance of 512MB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
|
||||
This will start geth in fast-sync mode with a DB memory allowance of 1GB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
|
||||
|
||||
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not accessible from the outside.
|
||||
|
||||
### Programatically interfacing Geth nodes
|
||||
|
||||
@ -264,7 +271,7 @@ instance for mining, run it with all your usual flags, extended by:
|
||||
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
```
|
||||
|
||||
Which will start mining bocks and transactions on a single CPU thread, crediting all proceedings to
|
||||
Which will start mining blocks and transactions on a single CPU thread, crediting all proceedings to
|
||||
the account specified by `--etherbase`. You can further tune the mining by changing the default gas
|
||||
limit blocks converge to (`--targetgaslimit`) and the price transactions are accepted at (`--gasprice`).
|
||||
|
||||
|
@ -17,13 +17,10 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// The ABI holds information about a contract's context and available
|
||||
@ -54,136 +51,52 @@ func JSON(reader io.Reader) (ABI, error) {
|
||||
// methods string signature. (signature = baz(uint32,string32))
|
||||
func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
// Fetch the ABI of the requested method
|
||||
var method Method
|
||||
|
||||
if name == "" {
|
||||
method = abi.Constructor
|
||||
} else {
|
||||
m, exist := abi.Methods[name]
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("method '%s' not found", name)
|
||||
// constructor
|
||||
arguments, err := abi.Constructor.Inputs.Pack(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
method = m
|
||||
return arguments, nil
|
||||
|
||||
}
|
||||
arguments, err := method.pack(args...)
|
||||
method, exist := abi.Methods[name]
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("method '%s' not found", name)
|
||||
}
|
||||
|
||||
arguments, err := method.Inputs.Pack(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Pack up the method ID too if not a constructor and return
|
||||
if name == "" {
|
||||
return arguments, nil
|
||||
}
|
||||
return append(method.Id(), arguments...), nil
|
||||
}
|
||||
|
||||
// these variable are used to determine certain types during type assertion for
|
||||
// assignment.
|
||||
var (
|
||||
r_interSlice = reflect.TypeOf([]interface{}{})
|
||||
r_hash = reflect.TypeOf(common.Hash{})
|
||||
r_bytes = reflect.TypeOf([]byte{})
|
||||
r_byte = reflect.TypeOf(byte(0))
|
||||
)
|
||||
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
var method = abi.Methods[name]
|
||||
|
||||
func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
|
||||
if len(output) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
if len(output)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output")
|
||||
}
|
||||
return method.Outputs.Unpack(v, output)
|
||||
} else if event, ok := abi.Events[name]; ok {
|
||||
return event.Inputs.Unpack(v, output)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
if len(method.Outputs) > 1 {
|
||||
switch value.Kind() {
|
||||
// struct will match named return values to the struct's field
|
||||
// names
|
||||
case reflect.Struct:
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
field := typ.Field(j)
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if field.Name == strings.ToUpper(method.Outputs[i].Name[:1])+method.Outputs[i].Name[1:] {
|
||||
if err := set(value.Field(j), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
if !value.Type().AssignableTo(r_interSlice) {
|
||||
return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
|
||||
}
|
||||
|
||||
// if the slice already contains values, set those instead of the interface slice itself.
|
||||
if value.Len() > 0 {
|
||||
if len(method.Outputs) > value.Len() {
|
||||
return fmt.Errorf("abi: cannot marshal in to slices of unequal size (require: %v, got: %v)", len(method.Outputs), value.Len())
|
||||
}
|
||||
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(value.Index(i).Elem(), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a new slice and start appending the unmarshalled
|
||||
// values to the new interface slice.
|
||||
z := reflect.MakeSlice(typ, 0, len(method.Outputs))
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z = reflect.Append(z, reflect.ValueOf(marshalledValue))
|
||||
}
|
||||
value.Set(z)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
|
||||
} else {
|
||||
marshalledValue, err := toGoType(0, method.Outputs[0], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := set(value, reflect.ValueOf(marshalledValue), method.Outputs[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return fmt.Errorf("abi: could not locate named method or event")
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
Name string
|
||||
Constant bool
|
||||
Indexed bool
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
@ -220,3 +133,14 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MethodById looks up a method by the 4-byte id
|
||||
// returns nil if none found
|
||||
func (abi *ABI) MethodById(sigdata []byte) *Method {
|
||||
for _, method := range abi.Methods {
|
||||
if bytes.Equal(method.Id(), sigdata[:4]) {
|
||||
return &method
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -18,36 +18,19 @@ package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// formatSilceOutput add padding to the value and adds a size
|
||||
func formatSliceOutput(v ...[]byte) []byte {
|
||||
off := common.LeftPadBytes(big.NewInt(int64(len(v))).Bytes(), 32)
|
||||
output := append(off, make([]byte, 0, len(v)*32)...)
|
||||
|
||||
for _, value := range v {
|
||||
output = append(output, common.LeftPadBytes(value, 32)...)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// quick helper padding
|
||||
func pad(input []byte, size int, left bool) []byte {
|
||||
if left {
|
||||
return common.LeftPadBytes(input, size)
|
||||
}
|
||||
return common.RightPadBytes(input, size)
|
||||
}
|
||||
|
||||
const jsondata = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
@ -93,9 +76,24 @@ func TestReader(t *testing.T) {
|
||||
}
|
||||
|
||||
// deep equal fails for some reason
|
||||
t.Skip()
|
||||
if !reflect.DeepEqual(abi, exp) {
|
||||
t.Errorf("\nabi: %v\ndoes not match exp: %v", abi, exp)
|
||||
for name, expM := range exp.Methods {
|
||||
gotM, exist := abi.Methods[name]
|
||||
if !exist {
|
||||
t.Errorf("Missing expected method %v", name)
|
||||
}
|
||||
if !reflect.DeepEqual(gotM, expM) {
|
||||
t.Errorf("\nGot abi method: \n%v\ndoes not match expected method\n%v", gotM, expM)
|
||||
}
|
||||
}
|
||||
|
||||
for name, gotM := range abi.Methods {
|
||||
expM, exist := exp.Methods[name]
|
||||
if !exist {
|
||||
t.Errorf("Found extra method %v", name)
|
||||
}
|
||||
if !reflect.DeepEqual(gotM, expM) {
|
||||
t.Errorf("\nGot abi method: \n%v\ndoes not match expected method\n%v", gotM, expM)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -191,7 +189,7 @@ func TestMethodSignature(t *testing.T) {
|
||||
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
|
||||
}
|
||||
|
||||
uintt, _ := NewType("uint")
|
||||
uintt, _ := NewType("uint256")
|
||||
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
|
||||
exp = "foo(uint256)"
|
||||
if m.Sig() != exp {
|
||||
@ -367,6 +365,188 @@ func TestInputVariableInputLength(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "type" : "function", "name" : "fixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "fixedArrBytes", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "mixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type": "uint256[2]" }, { "name" : "dynArr", "type": "uint256[]" } ] },
|
||||
{ "type" : "function", "name" : "doubleFixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "fixedArr2", "type": "uint256[3]" } ] },
|
||||
{ "type" : "function", "name" : "multipleMixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// test string, fixed array uint256[2]
|
||||
strin := "hello world"
|
||||
arrin := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
fixedArrStrPack, err := abi.Pack("fixedArrStr", strin, arrin)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
offset := make([]byte, 32)
|
||||
offset[31] = 96
|
||||
length := make([]byte, 32)
|
||||
length[31] = byte(len(strin))
|
||||
strvalue := common.RightPadBytes([]byte(strin), 32)
|
||||
arrinvalue1 := common.LeftPadBytes(arrin[0].Bytes(), 32)
|
||||
arrinvalue2 := common.LeftPadBytes(arrin[1].Bytes(), 32)
|
||||
exp := append(offset, arrinvalue1...)
|
||||
exp = append(exp, arrinvalue2...)
|
||||
exp = append(exp, append(length, strvalue...)...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
fixedArrStrPack = fixedArrStrPack[4:]
|
||||
if !bytes.Equal(fixedArrStrPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, fixedArrStrPack)
|
||||
}
|
||||
|
||||
// test byte array, fixed array uint256[2]
|
||||
bytesin := []byte(strin)
|
||||
arrin = [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
fixedArrBytesPack, err := abi.Pack("fixedArrBytes", bytesin, arrin)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
offset = make([]byte, 32)
|
||||
offset[31] = 96
|
||||
length = make([]byte, 32)
|
||||
length[31] = byte(len(strin))
|
||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||
arrinvalue1 = common.LeftPadBytes(arrin[0].Bytes(), 32)
|
||||
arrinvalue2 = common.LeftPadBytes(arrin[1].Bytes(), 32)
|
||||
exp = append(offset, arrinvalue1...)
|
||||
exp = append(exp, arrinvalue2...)
|
||||
exp = append(exp, append(length, strvalue...)...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
fixedArrBytesPack = fixedArrBytesPack[4:]
|
||||
if !bytes.Equal(fixedArrBytesPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, fixedArrBytesPack)
|
||||
}
|
||||
|
||||
// test string, fixed array uint256[2], dynamic array uint256[]
|
||||
strin = "hello world"
|
||||
fixedarrin := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
dynarrin := []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
|
||||
mixedArrStrPack, err := abi.Pack("mixedArrStr", strin, fixedarrin, dynarrin)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
stroffset := make([]byte, 32)
|
||||
stroffset[31] = 128
|
||||
strlength := make([]byte, 32)
|
||||
strlength[31] = byte(len(strin))
|
||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||
fixedarrinvalue1 := common.LeftPadBytes(fixedarrin[0].Bytes(), 32)
|
||||
fixedarrinvalue2 := common.LeftPadBytes(fixedarrin[1].Bytes(), 32)
|
||||
dynarroffset := make([]byte, 32)
|
||||
dynarroffset[31] = byte(160 + ((len(strin)/32)+1)*32)
|
||||
dynarrlength := make([]byte, 32)
|
||||
dynarrlength[31] = byte(len(dynarrin))
|
||||
dynarrinvalue1 := common.LeftPadBytes(dynarrin[0].Bytes(), 32)
|
||||
dynarrinvalue2 := common.LeftPadBytes(dynarrin[1].Bytes(), 32)
|
||||
dynarrinvalue3 := common.LeftPadBytes(dynarrin[2].Bytes(), 32)
|
||||
exp = append(stroffset, fixedarrinvalue1...)
|
||||
exp = append(exp, fixedarrinvalue2...)
|
||||
exp = append(exp, dynarroffset...)
|
||||
exp = append(exp, append(strlength, strvalue...)...)
|
||||
dynarrarg := append(dynarrlength, dynarrinvalue1...)
|
||||
dynarrarg = append(dynarrarg, dynarrinvalue2...)
|
||||
dynarrarg = append(dynarrarg, dynarrinvalue3...)
|
||||
exp = append(exp, dynarrarg...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
mixedArrStrPack = mixedArrStrPack[4:]
|
||||
if !bytes.Equal(mixedArrStrPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, mixedArrStrPack)
|
||||
}
|
||||
|
||||
// test string, fixed array uint256[2], fixed array uint256[3]
|
||||
strin = "hello world"
|
||||
fixedarrin1 := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
fixedarrin2 := [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
|
||||
doubleFixedArrStrPack, err := abi.Pack("doubleFixedArrStr", strin, fixedarrin1, fixedarrin2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
stroffset = make([]byte, 32)
|
||||
stroffset[31] = 192
|
||||
strlength = make([]byte, 32)
|
||||
strlength[31] = byte(len(strin))
|
||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||
fixedarrin1value1 := common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
|
||||
fixedarrin1value2 := common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
|
||||
fixedarrin2value1 := common.LeftPadBytes(fixedarrin2[0].Bytes(), 32)
|
||||
fixedarrin2value2 := common.LeftPadBytes(fixedarrin2[1].Bytes(), 32)
|
||||
fixedarrin2value3 := common.LeftPadBytes(fixedarrin2[2].Bytes(), 32)
|
||||
exp = append(stroffset, fixedarrin1value1...)
|
||||
exp = append(exp, fixedarrin1value2...)
|
||||
exp = append(exp, fixedarrin2value1...)
|
||||
exp = append(exp, fixedarrin2value2...)
|
||||
exp = append(exp, fixedarrin2value3...)
|
||||
exp = append(exp, append(strlength, strvalue...)...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
doubleFixedArrStrPack = doubleFixedArrStrPack[4:]
|
||||
if !bytes.Equal(doubleFixedArrStrPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, doubleFixedArrStrPack)
|
||||
}
|
||||
|
||||
// test string, fixed array uint256[2], dynamic array uint256[], fixed array uint256[3]
|
||||
strin = "hello world"
|
||||
fixedarrin1 = [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
dynarrin = []*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
fixedarrin2 = [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
|
||||
multipleMixedArrStrPack, err := abi.Pack("multipleMixedArrStr", strin, fixedarrin1, dynarrin, fixedarrin2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
stroffset = make([]byte, 32)
|
||||
stroffset[31] = 224
|
||||
strlength = make([]byte, 32)
|
||||
strlength[31] = byte(len(strin))
|
||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||
fixedarrin1value1 = common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
|
||||
fixedarrin1value2 = common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
|
||||
dynarroffset = U256(big.NewInt(int64(256 + ((len(strin)/32)+1)*32)))
|
||||
dynarrlength = make([]byte, 32)
|
||||
dynarrlength[31] = byte(len(dynarrin))
|
||||
dynarrinvalue1 = common.LeftPadBytes(dynarrin[0].Bytes(), 32)
|
||||
dynarrinvalue2 = common.LeftPadBytes(dynarrin[1].Bytes(), 32)
|
||||
fixedarrin2value1 = common.LeftPadBytes(fixedarrin2[0].Bytes(), 32)
|
||||
fixedarrin2value2 = common.LeftPadBytes(fixedarrin2[1].Bytes(), 32)
|
||||
fixedarrin2value3 = common.LeftPadBytes(fixedarrin2[2].Bytes(), 32)
|
||||
exp = append(stroffset, fixedarrin1value1...)
|
||||
exp = append(exp, fixedarrin1value2...)
|
||||
exp = append(exp, dynarroffset...)
|
||||
exp = append(exp, fixedarrin2value1...)
|
||||
exp = append(exp, fixedarrin2value2...)
|
||||
exp = append(exp, fixedarrin2value3...)
|
||||
exp = append(exp, append(strlength, strvalue...)...)
|
||||
dynarrarg = append(dynarrlength, dynarrinvalue1...)
|
||||
dynarrarg = append(dynarrarg, dynarrinvalue2...)
|
||||
exp = append(exp, dynarrarg...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
multipleMixedArrStrPack = multipleMixedArrStrPack[4:]
|
||||
if !bytes.Equal(multipleMixedArrStrPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, multipleMixedArrStrPack)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultFunctionParsing(t *testing.T) {
|
||||
const definition = `[{ "name" : "balance" }]`
|
||||
|
||||
@ -437,3 +617,82 @@ func TestBareEvents(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUnpackEvent is based on this contract:
|
||||
// contract T {
|
||||
// event received(address sender, uint amount, bytes memo);
|
||||
// function receive(bytes memo) external payable {
|
||||
// received(msg.sender, msg.value, memo);
|
||||
// }
|
||||
// }
|
||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||
func TestUnpackEvent(t *testing.T) {
|
||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
||||
abi, err := JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
const hexdata = `000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158`
|
||||
data, err := hex.DecodeString(hexdata)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(data)%32 == 0 {
|
||||
t.Errorf("len(data) is %d, want a non-multiple of 32", len(data))
|
||||
}
|
||||
|
||||
type ReceivedEvent struct {
|
||||
Address common.Address
|
||||
Amount *big.Int
|
||||
Memo []byte
|
||||
}
|
||||
var ev ReceivedEvent
|
||||
|
||||
err = abi.Unpack(&ev, "received", data)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
t.Logf("len(data): %d; received event: %+v", len(data), ev)
|
||||
}
|
||||
}
|
||||
|
||||
func TestABI_MethodById(t *testing.T) {
|
||||
const abiJSON = `[
|
||||
{"type":"function","name":"receive","constant":false,"inputs":[{"name":"memo","type":"bytes"}],"outputs":[],"payable":true,"stateMutability":"payable"},
|
||||
{"type":"event","name":"received","anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}]},
|
||||
{"type":"function","name":"fixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"}]},
|
||||
{"type":"function","name":"fixedArrBytes","constant":true,"inputs":[{"name":"str","type":"bytes"},{"name":"fixedArr","type":"uint256[2]"}]},
|
||||
{"type":"function","name":"mixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"}]},
|
||||
{"type":"function","name":"doubleFixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"fixedArr2","type":"uint256[3]"}]},
|
||||
{"type":"function","name":"multipleMixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"},{"name":"fixedArr2","type":"uint256[3]"}]},
|
||||
{"type":"function","name":"balance","constant":true},
|
||||
{"type":"function","name":"send","constant":false,"inputs":[{"name":"amount","type":"uint256"}]},
|
||||
{"type":"function","name":"test","constant":false,"inputs":[{"name":"number","type":"uint32"}]},
|
||||
{"type":"function","name":"string","constant":false,"inputs":[{"name":"inputs","type":"string"}]},
|
||||
{"type":"function","name":"bool","constant":false,"inputs":[{"name":"inputs","type":"bool"}]},
|
||||
{"type":"function","name":"address","constant":false,"inputs":[{"name":"inputs","type":"address"}]},
|
||||
{"type":"function","name":"uint64[2]","constant":false,"inputs":[{"name":"inputs","type":"uint64[2]"}]},
|
||||
{"type":"function","name":"uint64[]","constant":false,"inputs":[{"name":"inputs","type":"uint64[]"}]},
|
||||
{"type":"function","name":"foo","constant":false,"inputs":[{"name":"inputs","type":"uint32"}]},
|
||||
{"type":"function","name":"bar","constant":false,"inputs":[{"name":"inputs","type":"uint32"},{"name":"string","type":"uint16"}]},
|
||||
{"type":"function","name":"_slice","constant":false,"inputs":[{"name":"inputs","type":"uint32[2]"}]},
|
||||
{"type":"function","name":"__slice256","constant":false,"inputs":[{"name":"inputs","type":"uint256[2]"}]},
|
||||
{"type":"function","name":"sliceAddress","constant":false,"inputs":[{"name":"inputs","type":"address[]"}]},
|
||||
{"type":"function","name":"sliceMultiAddress","constant":false,"inputs":[{"name":"a","type":"address[]"},{"name":"b","type":"address[]"}]}
|
||||
]
|
||||
`
|
||||
abi, err := JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for name, m := range abi.Methods {
|
||||
a := fmt.Sprintf("%v", m)
|
||||
b := fmt.Sprintf("%v", abi.MethodById(m.Id()))
|
||||
if a != b {
|
||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -19,6 +19,8 @@ package abi
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Argument holds the name of the argument and the corresponding type.
|
||||
@ -29,7 +31,10 @@ type Argument struct {
|
||||
Indexed bool // indexed is only used by events
|
||||
}
|
||||
|
||||
func (a *Argument) UnmarshalJSON(data []byte) error {
|
||||
type Arguments []Argument
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||
var extarg struct {
|
||||
Name string
|
||||
Type string
|
||||
@ -40,12 +45,206 @@ func (a *Argument) UnmarshalJSON(data []byte) error {
|
||||
return fmt.Errorf("argument json err: %v", err)
|
||||
}
|
||||
|
||||
a.Type, err = NewType(extarg.Type)
|
||||
argument.Type, err = NewType(extarg.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Name = extarg.Name
|
||||
a.Indexed = extarg.Indexed
|
||||
argument.Name = extarg.Name
|
||||
argument.Indexed = extarg.Indexed
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LengthNonIndexed returns the number of arguments when not counting 'indexed' ones. Only events
|
||||
// can ever have 'indexed' arguments, it should always be false on arguments for method input/output
|
||||
func (arguments Arguments) LengthNonIndexed() int {
|
||||
out := 0
|
||||
for _, arg := range arguments {
|
||||
if !arg.Indexed {
|
||||
out++
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
|
||||
func (arguments Arguments) isTuple() bool {
|
||||
return len(arguments) > 1
|
||||
}
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
if arguments.isTuple() {
|
||||
return arguments.unpackTuple(v, data)
|
||||
}
|
||||
return arguments.unpackAtomic(v, data)
|
||||
}
|
||||
|
||||
func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
||||
// make sure the passed value is arguments pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
kind = value.Kind()
|
||||
)
|
||||
|
||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the output interface is a struct, make sure names don't collide
|
||||
if kind == reflect.Struct {
|
||||
exists := make(map[string]bool)
|
||||
for _, arg := range arguments {
|
||||
field := capitalise(arg.Name)
|
||||
if field == "" {
|
||||
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
}
|
||||
if exists[field] {
|
||||
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
|
||||
}
|
||||
exists[field] = true
|
||||
}
|
||||
}
|
||||
// `i` counts the nonindexed arguments.
|
||||
// `j` counts the number of complex types.
|
||||
// both `i` and `j` are used to to correctly compute `data` offset.
|
||||
|
||||
i, j := -1, 0
|
||||
for _, arg := range arguments {
|
||||
|
||||
if arg.Indexed {
|
||||
// can't read, continue
|
||||
continue
|
||||
}
|
||||
i++
|
||||
marshalledValue, err := toGoType((i+j)*32, arg.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if arg.Type.T == ArrayTy {
|
||||
// combined index ('i' + 'j') need to be adjusted only by size of array, thus
|
||||
// we need to decrement 'j' because 'i' was incremented
|
||||
j += arg.Type.Size - 1
|
||||
}
|
||||
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
name := capitalise(arg.Name)
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if typ.Field(j).Name == name {
|
||||
if err := set(value.Field(j), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
|
||||
}
|
||||
v := value.Index(i)
|
||||
if err := requireAssignable(v, reflectValue); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := set(v.Elem(), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||
func (arguments Arguments) unpackAtomic(v interface{}, output []byte) error {
|
||||
// make sure the passed value is arguments pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
arg := arguments[0]
|
||||
if arg.Indexed {
|
||||
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
|
||||
}
|
||||
|
||||
value := valueOf.Elem()
|
||||
|
||||
marshalledValue, err := toGoType(0, arg.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return set(value, reflect.ValueOf(marshalledValue), arg)
|
||||
}
|
||||
|
||||
// Unpack performs the operation Go format -> Hexdata
|
||||
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
abiArgs := arguments
|
||||
if len(args) != len(abiArgs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
|
||||
}
|
||||
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
|
||||
// input offset is the bytes offset for packed output
|
||||
inputOffset := 0
|
||||
for _, abiArg := range abiArgs {
|
||||
if abiArg.Type.T == ArrayTy {
|
||||
inputOffset += 32 * abiArg.Type.Size
|
||||
} else {
|
||||
inputOffset += 32
|
||||
}
|
||||
}
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := abiArgs[i]
|
||||
// pack the input
|
||||
packed, err := input.Type.pack(reflect.ValueOf(a))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.requiresLengthPrefix() {
|
||||
// calculate the offset
|
||||
offset := inputOffset + len(variableInput)
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
ret = append(ret, packed...)
|
||||
}
|
||||
}
|
||||
// append the variable input at the end of the packed input
|
||||
ret = append(ret, variableInput...)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// capitalise makes the first character of a string upper case, also removing any
|
||||
// prefixing underscores from the variable names.
|
||||
func capitalise(input string) string {
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
}
|
||||
|
@ -52,12 +52,6 @@ type ContractCaller interface {
|
||||
CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
|
||||
}
|
||||
|
||||
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
||||
type DeployBackend interface {
|
||||
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
|
||||
CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error)
|
||||
}
|
||||
|
||||
// PendingContractCaller defines methods to perform contract calls on the pending state.
|
||||
// Call will try to discover this interface when access to the pending state is requested.
|
||||
// If the backend does not support the pending state, Call returns ErrNoPendingState.
|
||||
@ -85,13 +79,34 @@ type ContractTransactor interface {
|
||||
// There is no guarantee that this is the true gas limit requirement as other
|
||||
// transactions may be added or removed by miners, but it should provide a basis
|
||||
// for setting a reasonable default.
|
||||
EstimateGas(ctx context.Context, call ethereum.CallMsg) (usedGas *big.Int, err error)
|
||||
EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error)
|
||||
// SendTransaction injects the transaction into the pending pool for execution.
|
||||
SendTransaction(ctx context.Context, tx *types.Transaction) error
|
||||
}
|
||||
|
||||
// ContractFilterer defines the methods needed to access log events using one-off
|
||||
// queries or continuous event subscriptions.
|
||||
type ContractFilterer interface {
|
||||
// FilterLogs executes a log filter operation, blocking during execution and
|
||||
// returning all the results in one batch.
|
||||
//
|
||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
||||
FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error)
|
||||
|
||||
// SubscribeFilterLogs creates a background log filtering operation, returning
|
||||
// a subscription immediately, which can be used to stream the found events.
|
||||
SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error)
|
||||
}
|
||||
|
||||
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
||||
type DeployBackend interface {
|
||||
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
|
||||
CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error)
|
||||
}
|
||||
|
||||
// ContractBackend defines the methods needed to work with contracts on a read-write basis.
|
||||
type ContractBackend interface {
|
||||
ContractCaller
|
||||
ContractTransactor
|
||||
ContractFilterer
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
@ -29,18 +30,22 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend.
|
||||
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
||||
|
||||
var errBlockNumberUnsupported = errors.New("SimulatedBackend cannot access blocks other than the latest block")
|
||||
var errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
|
||||
|
||||
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
|
||||
// the background. Its main purpose is to allow easily testing contract bindings.
|
||||
@ -52,6 +57,8 @@ type SimulatedBackend struct {
|
||||
pendingBlock *types.Block // Currently pending block that will be imported on request
|
||||
pendingState *state.StateDB // Currently pending state that will be the active on on request
|
||||
|
||||
events *filters.EventSystem // Event system for filtering log events live
|
||||
|
||||
config *params.ChainConfig
|
||||
}
|
||||
|
||||
@ -59,10 +66,16 @@ type SimulatedBackend struct {
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
database, _ := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc}
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
|
||||
backend := &SimulatedBackend{
|
||||
database: database,
|
||||
blockchain: blockchain,
|
||||
config: genesis.Config,
|
||||
events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
|
||||
}
|
||||
backend.rollback()
|
||||
return backend
|
||||
}
|
||||
@ -88,9 +101,11 @@ func (b *SimulatedBackend) Rollback() {
|
||||
}
|
||||
|
||||
func (b *SimulatedBackend) rollback() {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||
}
|
||||
|
||||
// CodeAt returns the code associated with a certain account in the blockchain.
|
||||
@ -144,7 +159,8 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction.
|
||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
return core.GetReceipt(b.database, txHash), nil
|
||||
receipt, _, _, _ := core.GetReceipt(b.database, txHash)
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
// PendingCodeAt returns the code associated with an account in the pending state.
|
||||
@ -167,7 +183,7 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rval, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
||||
rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
||||
return rval, err
|
||||
}
|
||||
|
||||
@ -177,7 +193,7 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu
|
||||
defer b.mu.Unlock()
|
||||
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
|
||||
|
||||
rval, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
return rval, err
|
||||
}
|
||||
|
||||
@ -198,46 +214,63 @@ func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error
|
||||
|
||||
// EstimateGas executes the requested code against the currently pending block/state and
|
||||
// returns the used amount of gas.
|
||||
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (*big.Int, error) {
|
||||
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// Binary search the gas requirement, as it may be higher than the amount used
|
||||
var lo, hi uint64
|
||||
if call.Gas != nil {
|
||||
hi = call.Gas.Uint64()
|
||||
// Determine the lowest and highest possible gas limits to binary search in between
|
||||
var (
|
||||
lo uint64 = params.TxGas - 1
|
||||
hi uint64
|
||||
cap uint64
|
||||
)
|
||||
if call.Gas >= params.TxGas {
|
||||
hi = call.Gas
|
||||
} else {
|
||||
hi = b.pendingBlock.GasLimit().Uint64()
|
||||
hi = b.pendingBlock.GasLimit()
|
||||
}
|
||||
for lo+1 < hi {
|
||||
// Take a guess at the gas, and check transaction validity
|
||||
mid := (hi + lo) / 2
|
||||
call.Gas = new(big.Int).SetUint64(mid)
|
||||
cap = hi
|
||||
|
||||
// Create a helper to check if a gas allowance results in an executable transaction
|
||||
executable := func(gas uint64) bool {
|
||||
call.Gas = gas
|
||||
|
||||
snapshot := b.pendingState.Snapshot()
|
||||
_, gas, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
_, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
b.pendingState.RevertToSnapshot(snapshot)
|
||||
|
||||
// If the transaction became invalid or used all the gas (failed), raise the gas limit
|
||||
if err != nil || gas.Cmp(call.Gas) == 0 {
|
||||
lo = mid
|
||||
continue
|
||||
if err != nil || failed {
|
||||
return false
|
||||
}
|
||||
// Otherwise assume the transaction succeeded, lower the gas limit
|
||||
hi = mid
|
||||
return true
|
||||
}
|
||||
return new(big.Int).SetUint64(hi), nil
|
||||
// Execute the binary search and hone in on an executable gas limit
|
||||
for lo+1 < hi {
|
||||
mid := (hi + lo) / 2
|
||||
if !executable(mid) {
|
||||
lo = mid
|
||||
} else {
|
||||
hi = mid
|
||||
}
|
||||
}
|
||||
// Reject the transaction as invalid if it still fails at the highest allowance
|
||||
if hi == cap {
|
||||
if !executable(hi) {
|
||||
return 0, errGasEstimationFailed
|
||||
}
|
||||
}
|
||||
return hi, nil
|
||||
}
|
||||
|
||||
// callContract implemens common code between normal and pending contract calls.
|
||||
// callContract implements common code between normal and pending contract calls.
|
||||
// state is modified during execution, make sure to copy it if necessary.
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, *big.Int, error) {
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, uint64, bool, error) {
|
||||
// Ensure message is initialized properly.
|
||||
if call.GasPrice == nil {
|
||||
call.GasPrice = big.NewInt(1)
|
||||
}
|
||||
if call.Gas == nil || call.Gas.Sign() == 0 {
|
||||
call.Gas = big.NewInt(50000000)
|
||||
if call.Gas == 0 {
|
||||
call.Gas = 50000000
|
||||
}
|
||||
if call.Value == nil {
|
||||
call.Value = new(big.Int)
|
||||
@ -252,9 +285,9 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
||||
// Create a new environment which holds all relevant information
|
||||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxBig256)
|
||||
ret, gasUsed, _, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return ret, gasUsed, err
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxUint64)
|
||||
|
||||
return core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
}
|
||||
|
||||
// SendTransaction updates the pending block to include the given transaction.
|
||||
@ -272,14 +305,96 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
|
||||
}
|
||||
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
}
|
||||
block.AddTx(tx)
|
||||
})
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilterLogs executes a log filter operation, blocking during execution and
|
||||
// returning all the results in one batch.
|
||||
//
|
||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
||||
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
}
|
||||
to := int64(-1)
|
||||
if query.ToBlock != nil {
|
||||
to = query.ToBlock.Int64()
|
||||
}
|
||||
// Construct and execute the filter
|
||||
filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
|
||||
|
||||
logs, err := filter.Logs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]types.Log, len(logs))
|
||||
for i, log := range logs {
|
||||
res[i] = *log
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// SubscribeFilterLogs creates a background log filtering operation, returning a
|
||||
// subscription immediately, which can be used to stream the found events.
|
||||
func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
|
||||
// Subscribe to contract events
|
||||
sink := make(chan []*types.Log)
|
||||
|
||||
sub, err := b.events.SubscribeLogs(query, sink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Since we're getting logs in batches, we need to flatten them into a plain stream
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case logs := <-sink:
|
||||
for _, log := range logs {
|
||||
select {
|
||||
case ch <- *log:
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
|
||||
// AdjustTime adds a time shift to the simulated clock.
|
||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
}
|
||||
block.OffsetTime(int64(adjustment.Seconds()))
|
||||
})
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -293,6 +408,47 @@ func (m callmsg) Nonce() uint64 { return 0 }
|
||||
func (m callmsg) CheckNonce() bool { return false }
|
||||
func (m callmsg) To() *common.Address { return m.CallMsg.To }
|
||||
func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
|
||||
func (m callmsg) Gas() *big.Int { return m.CallMsg.Gas }
|
||||
func (m callmsg) Gas() uint64 { return m.CallMsg.Gas }
|
||||
func (m callmsg) Value() *big.Int { return m.CallMsg.Value }
|
||||
func (m callmsg) Data() []byte { return m.CallMsg.Data }
|
||||
|
||||
// filterBackend implements filters.Backend to support filtering for logs without
|
||||
// taking bloom-bits acceleration structures into account.
|
||||
type filterBackend struct {
|
||||
db ethdb.Database
|
||||
bc *core.BlockChain
|
||||
}
|
||||
|
||||
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
|
||||
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
|
||||
|
||||
func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) {
|
||||
if block == rpc.LatestBlockNumber {
|
||||
return fb.bc.CurrentHeader(), nil
|
||||
}
|
||||
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||
}
|
||||
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
<-quit
|
||||
return nil
|
||||
})
|
||||
}
|
||||
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return fb.bc.SubscribeChainEvent(ch)
|
||||
}
|
||||
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
||||
}
|
||||
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return fb.bc.SubscribeLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
||||
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
// SignerFn is a signer function callback when a contract requires a method to
|
||||
@ -50,11 +51,27 @@ type TransactOpts struct {
|
||||
|
||||
Value *big.Int // Funds to transfer along along the transaction (nil = 0 = no funds)
|
||||
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
|
||||
GasLimit *big.Int // Gas limit to set for the transaction execution (nil = estimate + 10%)
|
||||
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
|
||||
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
||||
// FilterOpts is the collection of options to fine tune filtering for events
|
||||
// within a bound contract.
|
||||
type FilterOpts struct {
|
||||
Start uint64 // Start of the queried range
|
||||
End *uint64 // End of the range (nil = latest)
|
||||
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
||||
// WatchOpts is the collection of options to fine tune subscribing for events
|
||||
// within a bound contract.
|
||||
type WatchOpts struct {
|
||||
Start *uint64 // Start of the queried range (nil = latest)
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
||||
// BoundContract is the base wrapper object that reflects a contract on the
|
||||
// Ethereum network. It contains a collection of methods that are used by the
|
||||
// higher level contract bindings to operate.
|
||||
@ -63,16 +80,18 @@ type BoundContract struct {
|
||||
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
|
||||
caller ContractCaller // Read interface to interact with the blockchain
|
||||
transactor ContractTransactor // Write interface to interact with the blockchain
|
||||
filterer ContractFilterer // Event filtering to interact with the blockchain
|
||||
}
|
||||
|
||||
// NewBoundContract creates a low level contract interface through which calls
|
||||
// and transactions may be made through.
|
||||
func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller, transactor ContractTransactor) *BoundContract {
|
||||
func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller, transactor ContractTransactor, filterer ContractFilterer) *BoundContract {
|
||||
return &BoundContract{
|
||||
address: address,
|
||||
abi: abi,
|
||||
caller: caller,
|
||||
transactor: transactor,
|
||||
filterer: filterer,
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,7 +99,7 @@ func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller
|
||||
// deployment address with a Go wrapper.
|
||||
func DeployContract(opts *TransactOpts, abi abi.ABI, bytecode []byte, backend ContractBackend, params ...interface{}) (common.Address, *types.Transaction, *BoundContract, error) {
|
||||
// Otherwise try to deploy the contract
|
||||
c := NewBoundContract(common.Address{}, abi, backend, backend)
|
||||
c := NewBoundContract(common.Address{}, abi, backend, backend, backend)
|
||||
|
||||
input, err := c.abi.Pack("", params...)
|
||||
if err != nil {
|
||||
@ -189,7 +208,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
}
|
||||
}
|
||||
gasLimit := opts.GasLimit
|
||||
if gasLimit == nil {
|
||||
if gasLimit == 0 {
|
||||
// Gas estimation cannot succeed without code for method invocations
|
||||
if contract != nil {
|
||||
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
|
||||
@ -225,6 +244,104 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
return signedTx, nil
|
||||
}
|
||||
|
||||
// FilterLogs filters contract logs for past blocks, returning the necessary
|
||||
// channels to construct a strongly typed bound iterator on top of them.
|
||||
func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]interface{}) (chan types.Log, event.Subscription, error) {
|
||||
// Don't crash on a lazy user
|
||||
if opts == nil {
|
||||
opts = new(FilterOpts)
|
||||
}
|
||||
// Append the event selector to the query parameters and construct the topic set
|
||||
query = append([][]interface{}{{c.abi.Events[name].Id()}}, query...)
|
||||
|
||||
topics, err := makeTopics(query...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Start the background filtering
|
||||
logs := make(chan types.Log, 128)
|
||||
|
||||
config := ethereum.FilterQuery{
|
||||
Addresses: []common.Address{c.address},
|
||||
Topics: topics,
|
||||
FromBlock: new(big.Int).SetUint64(opts.Start),
|
||||
}
|
||||
if opts.End != nil {
|
||||
config.ToBlock = new(big.Int).SetUint64(*opts.End)
|
||||
}
|
||||
/* TODO(karalabe): Replace the rest of the method below with this when supported
|
||||
sub, err := c.filterer.SubscribeFilterLogs(ensureContext(opts.Context), config, logs)
|
||||
*/
|
||||
buff, err := c.filterer.FilterLogs(ensureContext(opts.Context), config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sub, err := event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
for _, log := range buff {
|
||||
select {
|
||||
case logs <- log:
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}), nil
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return logs, sub, nil
|
||||
}
|
||||
|
||||
// WatchLogs filters subscribes to contract logs for future blocks, returning a
|
||||
// subscription object that can be used to tear down the watcher.
|
||||
func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]interface{}) (chan types.Log, event.Subscription, error) {
|
||||
// Don't crash on a lazy user
|
||||
if opts == nil {
|
||||
opts = new(WatchOpts)
|
||||
}
|
||||
// Append the event selector to the query parameters and construct the topic set
|
||||
query = append([][]interface{}{{c.abi.Events[name].Id()}}, query...)
|
||||
|
||||
topics, err := makeTopics(query...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Start the background filtering
|
||||
logs := make(chan types.Log, 128)
|
||||
|
||||
config := ethereum.FilterQuery{
|
||||
Addresses: []common.Address{c.address},
|
||||
Topics: topics,
|
||||
}
|
||||
if opts.Start != nil {
|
||||
config.FromBlock = new(big.Int).SetUint64(*opts.Start)
|
||||
}
|
||||
sub, err := c.filterer.SubscribeFilterLogs(ensureContext(opts.Context), config, logs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return logs, sub, nil
|
||||
}
|
||||
|
||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.abi.Unpack(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.abi.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return parseTopics(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// ensureContext is a helper method to ensure a context is not nil, even if the
|
||||
// user specified it as such.
|
||||
func ensureContext(ctx context.Context) context.Context {
|
||||
if ctx == nil {
|
||||
return context.TODO()
|
||||
|
@ -63,10 +63,11 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
return r
|
||||
}, abis[i])
|
||||
|
||||
// Extract the call and transact methods, and sort them alphabetically
|
||||
// Extract the call and transact methods; events; and sort them alphabetically
|
||||
var (
|
||||
calls = make(map[string]*tmplMethod)
|
||||
transacts = make(map[string]*tmplMethod)
|
||||
events = make(map[string]*tmplEvent)
|
||||
)
|
||||
for _, original := range evmABI.Methods {
|
||||
// Normalize the method for capital cases and non-anonymous inputs/outputs
|
||||
@ -89,11 +90,33 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
}
|
||||
// Append the methods to the call or transact lists
|
||||
if original.Const {
|
||||
calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original)}
|
||||
calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
|
||||
} else {
|
||||
transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original)}
|
||||
transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
|
||||
}
|
||||
}
|
||||
for _, original := range evmABI.Events {
|
||||
// Skip anonymous events as they don't support explicit filtering
|
||||
if original.Anonymous {
|
||||
continue
|
||||
}
|
||||
// Normalize the event for capital cases and non-anonymous outputs
|
||||
normalized := original
|
||||
normalized.Name = methodNormalizer[lang](original.Name)
|
||||
|
||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||
copy(normalized.Inputs, original.Inputs)
|
||||
for j, input := range normalized.Inputs {
|
||||
// Indexed fields are input, non-indexed ones are outputs
|
||||
if input.Indexed {
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Append the event to the accumulator list
|
||||
events[original.Name] = &tmplEvent{Original: original, Normalized: normalized}
|
||||
}
|
||||
contracts[types[i]] = &tmplContract{
|
||||
Type: capitalise(types[i]),
|
||||
InputABI: strings.Replace(strippedABI, "\"", "\\\"", -1),
|
||||
@ -101,6 +124,7 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
Constructor: evmABI.Constructor,
|
||||
Calls: calls,
|
||||
Transacts: transacts,
|
||||
Events: events,
|
||||
}
|
||||
}
|
||||
// Generate the contract template data content and render it
|
||||
@ -111,10 +135,11 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
funcs := map[string]interface{}{
|
||||
"bindtype": bindType[lang],
|
||||
"namedtype": namedType[lang],
|
||||
"capitalise": capitalise,
|
||||
"decapitalise": decapitalise,
|
||||
"bindtype": bindType[lang],
|
||||
"bindtopictype": bindTopicType[lang],
|
||||
"namedtype": namedType[lang],
|
||||
"capitalise": capitalise,
|
||||
"decapitalise": decapitalise,
|
||||
}
|
||||
tmpl := template.Must(template.New("").Funcs(funcs).Parse(tmplSource[lang]))
|
||||
if err := tmpl.Execute(buffer, data); err != nil {
|
||||
@ -122,18 +147,18 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
}
|
||||
// For Go bindings pass the code through goimports to clean it up and double check
|
||||
if lang == LangGo {
|
||||
code, err := imports.Process("", buffer.Bytes(), nil)
|
||||
code, err := imports.Process(".", buffer.Bytes(), nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%v\n%s", err, buffer)
|
||||
}
|
||||
return string(code), nil
|
||||
}
|
||||
// For all others just return as is for now
|
||||
return string(buffer.Bytes()), nil
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
// bindType is a set of type binders that convert Solidity types to some supported
|
||||
// programming language.
|
||||
// programming language types.
|
||||
var bindType = map[Lang]func(kind abi.Type) string{
|
||||
LangGo: bindTypeGo,
|
||||
LangJava: bindTypeJava,
|
||||
@ -254,6 +279,33 @@ func bindTypeJava(kind abi.Type) string {
|
||||
}
|
||||
}
|
||||
|
||||
// bindTopicType is a set of type binders that convert Solidity types to some
|
||||
// supported programming language topic types.
|
||||
var bindTopicType = map[Lang]func(kind abi.Type) string{
|
||||
LangGo: bindTopicTypeGo,
|
||||
LangJava: bindTopicTypeJava,
|
||||
}
|
||||
|
||||
// bindTypeGo converts a Solidity topic type to a Go one. It is almost the same
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeGo(kind abi.Type) string {
|
||||
bound := bindTypeGo(kind)
|
||||
if bound == "string" || bound == "[]byte" {
|
||||
bound = "common.Hash"
|
||||
}
|
||||
return bound
|
||||
}
|
||||
|
||||
// bindTypeGo converts a Solidity topic type to a Java one. It is almost the same
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeJava(kind abi.Type) string {
|
||||
bound := bindTypeJava(kind)
|
||||
if bound == "String" || bound == "Bytes" {
|
||||
bound = "Hash"
|
||||
}
|
||||
return bound
|
||||
}
|
||||
|
||||
// namedType is a set of functions that transform language specific types to
|
||||
// named versions that my be used inside method names.
|
||||
var namedType = map[Lang]func(string, abi.Type) string{
|
||||
@ -304,8 +356,15 @@ var methodNormalizer = map[Lang]func(string) string{
|
||||
LangJava: decapitalise,
|
||||
}
|
||||
|
||||
// capitalise makes the first character of a string upper case.
|
||||
// capitalise makes the first character of a string upper case, also removing any
|
||||
// prefixing underscores from the variable names.
|
||||
func capitalise(input string) string {
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
}
|
||||
|
||||
@ -314,16 +373,25 @@ func decapitalise(input string) string {
|
||||
return strings.ToLower(input[:1]) + input[1:]
|
||||
}
|
||||
|
||||
// structured checks whether a method has enough information to return a proper
|
||||
// Go struct ot if flat returns are needed.
|
||||
func structured(method abi.Method) bool {
|
||||
if len(method.Outputs) < 2 {
|
||||
// structured checks whether a list of ABI data types has enough information to
|
||||
// operate through a proper Go struct or if flat returns are needed.
|
||||
func structured(args abi.Arguments) bool {
|
||||
if len(args) < 2 {
|
||||
return false
|
||||
}
|
||||
for _, out := range method.Outputs {
|
||||
exists := make(map[string]bool)
|
||||
for _, out := range args {
|
||||
// If the name is anonymous, we can't organize into a struct
|
||||
if out.Name == "" {
|
||||
return false
|
||||
}
|
||||
// If the field name is empty when normalized or collides (var, Var, _var, _Var),
|
||||
// we can't organize into a struct
|
||||
field := capitalise(out.Name)
|
||||
if field == "" || exists[field] {
|
||||
return false
|
||||
}
|
||||
exists[field] = true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -126,6 +126,7 @@ var bindTests = []struct {
|
||||
{"type":"function","name":"namedOutput","constant":true,"inputs":[],"outputs":[{"name":"str","type":"string"}]},
|
||||
{"type":"function","name":"anonOutput","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"}]},
|
||||
{"type":"function","name":"namedOutputs","constant":true,"inputs":[],"outputs":[{"name":"str1","type":"string"},{"name":"str2","type":"string"}]},
|
||||
{"type":"function","name":"collidingOutputs","constant":true,"inputs":[],"outputs":[{"name":"str","type":"string"},{"name":"Str","type":"string"}]},
|
||||
{"type":"function","name":"anonOutputs","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"},{"name":"","type":"string"}]},
|
||||
{"type":"function","name":"mixedOutputs","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"},{"name":"str","type":"string"}]}
|
||||
]
|
||||
@ -140,12 +141,71 @@ var bindTests = []struct {
|
||||
str1, err = b.NamedOutput(nil)
|
||||
str1, err = b.AnonOutput(nil)
|
||||
res, _ := b.NamedOutputs(nil)
|
||||
str1, str2, err = b.CollidingOutputs(nil)
|
||||
str1, str2, err = b.AnonOutputs(nil)
|
||||
str1, str2, err = b.MixedOutputs(nil)
|
||||
|
||||
fmt.Println(str1, str2, res.Str1, res.Str2, err)
|
||||
}`,
|
||||
},
|
||||
// Tests that named, anonymous and indexed events are handled correctly
|
||||
{
|
||||
`EventChecker`, ``, ``,
|
||||
`
|
||||
[
|
||||
{"type":"event","name":"empty","inputs":[]},
|
||||
{"type":"event","name":"indexed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256","indexed":true}]},
|
||||
{"type":"event","name":"mixed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256"}]},
|
||||
{"type":"event","name":"anonymous","anonymous":true,"inputs":[]},
|
||||
{"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]}
|
||||
]
|
||||
`,
|
||||
`if e, err := NewEventChecker(common.Address{}, nil); e == nil || err != nil {
|
||||
t.Fatalf("binding (%v) nil or error (%v) not nil", e, nil)
|
||||
} else if false { // Don't run, just compile and test types
|
||||
var (
|
||||
err error
|
||||
res bool
|
||||
str string
|
||||
dat []byte
|
||||
hash common.Hash
|
||||
)
|
||||
_, err = e.FilterEmpty(nil)
|
||||
_, err = e.FilterIndexed(nil, []common.Address{}, []*big.Int{})
|
||||
|
||||
mit, err := e.FilterMixed(nil, []common.Address{})
|
||||
|
||||
res = mit.Next() // Make sure the iterator has a Next method
|
||||
err = mit.Error() // Make sure the iterator has an Error method
|
||||
err = mit.Close() // Make sure the iterator has a Close method
|
||||
|
||||
fmt.Println(mit.Event.Raw.BlockHash) // Make sure the raw log is contained within the results
|
||||
fmt.Println(mit.Event.Num) // Make sure the unpacked non-indexed fields are present
|
||||
fmt.Println(mit.Event.Addr) // Make sure the reconstructed indexed fields are present
|
||||
|
||||
dit, err := e.FilterDynamic(nil, []string{}, [][]byte{})
|
||||
|
||||
str = dit.Event.Str // Make sure non-indexed strings retain their type
|
||||
dat = dit.Event.Dat // Make sure non-indexed bytes retain their type
|
||||
hash = dit.Event.IdxStr // Make sure indexed strings turn into hashes
|
||||
hash = dit.Event.IdxDat // Make sure indexed bytes turn into hashes
|
||||
|
||||
sink := make(chan *EventCheckerMixed)
|
||||
sub, err := e.WatchMixed(nil, sink, []common.Address{})
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
event := <-sink
|
||||
fmt.Println(event.Raw.BlockHash) // Make sure the raw log is contained within the results
|
||||
fmt.Println(event.Num) // Make sure the unpacked non-indexed fields are present
|
||||
fmt.Println(event.Addr) // Make sure the reconstructed indexed fields are present
|
||||
|
||||
fmt.Println(res, str, dat, hash, err)
|
||||
}
|
||||
// Run a tiny reflection test to ensure disallowed methods don't appear
|
||||
if _, ok := reflect.TypeOf(&EventChecker{}).MethodByName("FilterAnonymous"); ok {
|
||||
t.Errorf("binding has disallowed method (FilterAnonymous)")
|
||||
}`,
|
||||
},
|
||||
// Test that contract interactions (deploy, transact and call) generate working code
|
||||
{
|
||||
`Interactor`,
|
||||
@ -397,7 +457,6 @@ var bindTests = []struct {
|
||||
sim.Commit()
|
||||
|
||||
// Set the field with automatic estimation and check that it succeeds
|
||||
auth.GasLimit = nil
|
||||
if _, err := limiter.SetField(auth, "automatic"); err != nil {
|
||||
t.Fatalf("Failed to call automatically gased transaction: %v", err)
|
||||
}
|
||||
@ -447,6 +506,237 @@ var bindTests = []struct {
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
`Underscorer`,
|
||||
`
|
||||
contract Underscorer {
|
||||
function UnderscoredOutput() constant returns (int _int, string _string) {
|
||||
return (314, "pi");
|
||||
}
|
||||
function LowerLowerCollision() constant returns (int _res, int res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function LowerUpperCollision() constant returns (int _res, int Res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function UpperLowerCollision() constant returns (int _Res, int res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function UpperUpperCollision() constant returns (int _Res, int Res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function PurelyUnderscoredOutput() constant returns (int _, int res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function AllPurelyUnderscoredOutput() constant returns (int _, int __) {
|
||||
return (1, 2);
|
||||
}
|
||||
}
|
||||
`, `6060604052341561000f57600080fd5b6103498061001e6000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806303a592131461008857806367e6633d146100b85780639df484851461014d578063af7486ab1461017d578063b564b34d146101ad578063e02ab24d146101dd578063e409ca451461020d575b600080fd5b341561009357600080fd5b61009b61023d565b604051808381526020018281526020019250505060405180910390f35b34156100c357600080fd5b6100cb610252565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156101115780820151818401526020810190506100f6565b50505050905090810190601f16801561013e5780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b341561015857600080fd5b6101606102a0565b604051808381526020018281526020019250505060405180910390f35b341561018857600080fd5b6101906102b5565b604051808381526020018281526020019250505060405180910390f35b34156101b857600080fd5b6101c06102ca565b604051808381526020018281526020019250505060405180910390f35b34156101e857600080fd5b6101f06102df565b604051808381526020018281526020019250505060405180910390f35b341561021857600080fd5b6102206102f4565b604051808381526020018281526020019250505060405180910390f35b60008060016002819150809050915091509091565b600061025c610309565b61013a8090506040805190810160405280600281526020017f7069000000000000000000000000000000000000000000000000000000000000815250915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b6020604051908101604052806000815250905600a165627a7a72305820c11dcfa136fc7d182ee4d34f0b12d988496228f7e2d02d2b5376d996ca1743d00029`,
|
||||
`[{"constant":true,"inputs":[],"name":"LowerUpperCollision","outputs":[{"name":"_res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UnderscoredOutput","outputs":[{"name":"_int","type":"int256"},{"name":"_string","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperLowerCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"AllPurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"__","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperUpperCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"LowerLowerCollision","outputs":[{"name":"_res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"}]`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
|
||||
// Deploy a underscorer tester contract and execute a structured call on it
|
||||
_, _, underscorer, err := DeployUnderscorer(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy underscorer contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
// Verify that underscored return values correctly parse into structs
|
||||
if res, err := underscorer.UnderscoredOutput(nil); err != nil {
|
||||
t.Errorf("Failed to call constant function: %v", err)
|
||||
} else if res.Int.Cmp(big.NewInt(314)) != 0 || res.String != "pi" {
|
||||
t.Errorf("Invalid result, want: {314, \"pi\"}, got: %+v", res)
|
||||
}
|
||||
// Verify that underscored and non-underscored name collisions force tuple outputs
|
||||
var a, b *big.Int
|
||||
|
||||
a, b, _ = underscorer.LowerLowerCollision(nil)
|
||||
a, b, _ = underscorer.LowerUpperCollision(nil)
|
||||
a, b, _ = underscorer.UpperLowerCollision(nil)
|
||||
a, b, _ = underscorer.UpperUpperCollision(nil)
|
||||
a, b, _ = underscorer.PurelyUnderscoredOutput(nil)
|
||||
a, b, _ = underscorer.AllPurelyUnderscoredOutput(nil)
|
||||
|
||||
fmt.Println(a, b, err)
|
||||
`,
|
||||
},
|
||||
// Tests that logs can be successfully filtered and decoded.
|
||||
{
|
||||
`Eventer`,
|
||||
`
|
||||
contract Eventer {
|
||||
event SimpleEvent (
|
||||
address indexed Addr,
|
||||
bytes32 indexed Id,
|
||||
bool indexed Flag,
|
||||
uint Value
|
||||
);
|
||||
function raiseSimpleEvent(address addr, bytes32 id, bool flag, uint value) {
|
||||
SimpleEvent(addr, id, flag, value);
|
||||
}
|
||||
|
||||
event NodataEvent (
|
||||
uint indexed Number,
|
||||
int16 indexed Short,
|
||||
uint32 indexed Long
|
||||
);
|
||||
function raiseNodataEvent(uint number, int16 short, uint32 long) {
|
||||
NodataEvent(number, short, long);
|
||||
}
|
||||
|
||||
event DynamicEvent (
|
||||
string indexed IndexedString,
|
||||
bytes indexed IndexedBytes,
|
||||
string NonIndexedString,
|
||||
bytes NonIndexedBytes
|
||||
);
|
||||
function raiseDynamicEvent(string str, bytes blob) {
|
||||
DynamicEvent(str, blob, str, blob);
|
||||
}
|
||||
}
|
||||
`,
|
||||
`6060604052341561000f57600080fd5b61042c8061001e6000396000f300606060405260043610610057576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063528300ff1461005c578063630c31e2146100fc578063c7d116dd14610156575b600080fd5b341561006757600080fd5b6100fa600480803590602001908201803590602001908080601f0160208091040260200160405190810160405280939291908181526020018383808284378201915050505050509190803590602001908201803590602001908080601f01602080910402602001604051908101604052809392919081815260200183838082843782019150505050505091905050610194565b005b341561010757600080fd5b610154600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035600019169060200190919080351515906020019091908035906020019091905050610367565b005b341561016157600080fd5b610192600480803590602001909190803560010b90602001909190803563ffffffff169060200190919050506103c3565b005b806040518082805190602001908083835b6020831015156101ca57805182526020820191506020810190506020830392506101a5565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040518091039020826040518082805190602001908083835b60208310151561022d5780518252602082019150602081019050602083039250610208565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405180910390207f3281fd4f5e152dd3385df49104a3f633706e21c9e80672e88d3bcddf33101f008484604051808060200180602001838103835285818151815260200191508051906020019080838360005b838110156102c15780820151818401526020810190506102a6565b50505050905090810190601f1680156102ee5780820380516001836020036101000a031916815260200191505b50838103825284818151815260200191508051906020019080838360005b8381101561032757808201518184015260208101905061030c565b50505050905090810190601f1680156103545780820380516001836020036101000a031916815260200191505b5094505050505060405180910390a35050565b81151583600019168573ffffffffffffffffffffffffffffffffffffffff167f1f097de4289df643bd9c11011cc61367aa12983405c021056e706eb5ba1250c8846040518082815260200191505060405180910390a450505050565b8063ffffffff168260010b847f3ca7f3a77e5e6e15e781850bc82e32adfa378a2a609370db24b4d0fae10da2c960405160405180910390a45050505600a165627a7a72305820d1f8a8bbddbc5bb29f285891d6ae1eef8420c52afdc05e1573f6114d8e1714710029`,
|
||||
`[{"constant":false,"inputs":[{"name":"str","type":"string"},{"name":"blob","type":"bytes"}],"name":"raiseDynamicEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"},{"name":"id","type":"bytes32"},{"name":"flag","type":"bool"},{"name":"value","type":"uint256"}],"name":"raiseSimpleEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"number","type":"uint256"},{"name":"short","type":"int16"},{"name":"long","type":"uint32"}],"name":"raiseNodataEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"Addr","type":"address"},{"indexed":true,"name":"Id","type":"bytes32"},{"indexed":true,"name":"Flag","type":"bool"},{"indexed":false,"name":"Value","type":"uint256"}],"name":"SimpleEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"Number","type":"uint256"},{"indexed":true,"name":"Short","type":"int16"},{"indexed":true,"name":"Long","type":"uint32"}],"name":"NodataEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"IndexedString","type":"string"},{"indexed":true,"name":"IndexedBytes","type":"bytes"},{"indexed":false,"name":"NonIndexedString","type":"string"},{"indexed":false,"name":"NonIndexedBytes","type":"bytes"}],"name":"DynamicEvent","type":"event"}]`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
|
||||
// Deploy an eventer contract
|
||||
_, _, eventer, err := DeployEventer(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy eventer contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
// Inject a few events into the contract, gradually more in each block
|
||||
for i := 1; i <= 3; i++ {
|
||||
for j := 1; j <= i; j++ {
|
||||
if _, err := eventer.RaiseSimpleEvent(auth, common.Address{byte(j)}, [32]byte{byte(j)}, true, big.NewInt(int64(10*i+j))); err != nil {
|
||||
t.Fatalf("block %d, event %d: raise failed: %v", i, j, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
}
|
||||
// Test filtering for certain events and ensure they can be found
|
||||
sit, err := eventer.FilterSimpleEvent(nil, []common.Address{common.Address{1}, common.Address{3}}, [][32]byte{{byte(1)}, {byte(2)}, {byte(3)}}, []bool{true})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to filter for simple events: %v", err)
|
||||
}
|
||||
defer sit.Close()
|
||||
|
||||
sit.Next()
|
||||
if sit.Event.Value.Uint64() != 11 || !sit.Event.Flag {
|
||||
t.Errorf("simple log content mismatch: have %v, want {11, true}", sit.Event)
|
||||
}
|
||||
sit.Next()
|
||||
if sit.Event.Value.Uint64() != 21 || !sit.Event.Flag {
|
||||
t.Errorf("simple log content mismatch: have %v, want {21, true}", sit.Event)
|
||||
}
|
||||
sit.Next()
|
||||
if sit.Event.Value.Uint64() != 31 || !sit.Event.Flag {
|
||||
t.Errorf("simple log content mismatch: have %v, want {31, true}", sit.Event)
|
||||
}
|
||||
sit.Next()
|
||||
if sit.Event.Value.Uint64() != 33 || !sit.Event.Flag {
|
||||
t.Errorf("simple log content mismatch: have %v, want {33, true}", sit.Event)
|
||||
}
|
||||
|
||||
if sit.Next() {
|
||||
t.Errorf("unexpected simple event found: %+v", sit.Event)
|
||||
}
|
||||
if err = sit.Error(); err != nil {
|
||||
t.Fatalf("simple event iteration failed: %v", err)
|
||||
}
|
||||
// Test raising and filtering for an event with no data component
|
||||
if _, err := eventer.RaiseNodataEvent(auth, big.NewInt(314), 141, 271); err != nil {
|
||||
t.Fatalf("failed to raise nodata event: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
nit, err := eventer.FilterNodataEvent(nil, []*big.Int{big.NewInt(314)}, []int16{140, 141, 142}, []uint32{271})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to filter for nodata events: %v", err)
|
||||
}
|
||||
defer nit.Close()
|
||||
|
||||
if !nit.Next() {
|
||||
t.Fatalf("nodata log not found: %v", nit.Error())
|
||||
}
|
||||
if nit.Event.Number.Uint64() != 314 {
|
||||
t.Errorf("nodata log content mismatch: have %v, want 314", nit.Event.Number)
|
||||
}
|
||||
if nit.Next() {
|
||||
t.Errorf("unexpected nodata event found: %+v", nit.Event)
|
||||
}
|
||||
if err = nit.Error(); err != nil {
|
||||
t.Fatalf("nodata event iteration failed: %v", err)
|
||||
}
|
||||
// Test raising and filtering for events with dynamic indexed components
|
||||
if _, err := eventer.RaiseDynamicEvent(auth, "Hello", []byte("World")); err != nil {
|
||||
t.Fatalf("failed to raise dynamic event: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
dit, err := eventer.FilterDynamicEvent(nil, []string{"Hi", "Hello", "Bye"}, [][]byte{[]byte("World")})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to filter for dynamic events: %v", err)
|
||||
}
|
||||
defer dit.Close()
|
||||
|
||||
if !dit.Next() {
|
||||
t.Fatalf("dynamic log not found: %v", dit.Error())
|
||||
}
|
||||
if dit.Event.NonIndexedString != "Hello" || string(dit.Event.NonIndexedBytes) != "World" || dit.Event.IndexedString != common.HexToHash("0x06b3dfaec148fb1bb2b066f10ec285e7c9bf402ab32aa78a5d38e34566810cd2") || dit.Event.IndexedBytes != common.HexToHash("0xf2208c967df089f60420785795c0a9ba8896b0f6f1867fa7f1f12ad6f79c1a18") {
|
||||
t.Errorf("dynamic log content mismatch: have %v, want {'0x06b3dfaec148fb1bb2b066f10ec285e7c9bf402ab32aa78a5d38e34566810cd2, '0xf2208c967df089f60420785795c0a9ba8896b0f6f1867fa7f1f12ad6f79c1a18', 'Hello', 'World'}", dit.Event)
|
||||
}
|
||||
if dit.Next() {
|
||||
t.Errorf("unexpected dynamic event found: %+v", dit.Event)
|
||||
}
|
||||
if err = dit.Error(); err != nil {
|
||||
t.Fatalf("dynamic event iteration failed: %v", err)
|
||||
}
|
||||
// Test subscribing to an event and raising it afterwards
|
||||
ch := make(chan *EventerSimpleEvent, 16)
|
||||
sub, err := eventer.WatchSimpleEvent(nil, ch, nil, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe to simple events: %v", err)
|
||||
}
|
||||
if _, err := eventer.RaiseSimpleEvent(auth, common.Address{255}, [32]byte{255}, true, big.NewInt(255)); err != nil {
|
||||
t.Fatalf("failed to raise subscribed simple event: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
select {
|
||||
case event := <-ch:
|
||||
if event.Value.Uint64() != 255 {
|
||||
t.Errorf("simple log content mismatch: have %v, want 255", event)
|
||||
}
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
t.Fatalf("subscribed simple event didn't arrive")
|
||||
}
|
||||
// Unsubscribe from the event and make sure we're not delivered more
|
||||
sub.Unsubscribe()
|
||||
|
||||
if _, err := eventer.RaiseSimpleEvent(auth, common.Address{254}, [32]byte{254}, true, big.NewInt(254)); err != nil {
|
||||
t.Fatalf("failed to raise subscribed simple event: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
select {
|
||||
case event := <-ch:
|
||||
t.Fatalf("unsubscribed simple event arrived: %v", event)
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
// Tests that packages generated by the binder can be successfully compiled and
|
||||
@ -459,7 +749,7 @@ func TestBindings(t *testing.T) {
|
||||
}
|
||||
// Skip the test if the go-ethereum sources are symlinked (https://github.com/golang/go/issues/14845)
|
||||
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil))\n}")
|
||||
linkTestDeps, err := imports.Process("", []byte(linkTestCode), nil)
|
||||
linkTestDeps, err := imports.Process(os.TempDir(), []byte(linkTestCode), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed check for goimports symlink bug: %v", err)
|
||||
}
|
||||
@ -498,7 +788,7 @@ func TestBindings(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Test the entire package and report any failures
|
||||
cmd := exec.Command(gocmd, "test", "-v")
|
||||
cmd := exec.Command(gocmd, "test", "-v", "-count", "1")
|
||||
cmd.Dir = pkg
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("failed to run binding test: %v\n%s", err, out)
|
||||
|
@ -32,6 +32,7 @@ type tmplContract struct {
|
||||
Constructor abi.Method // Contract constructor for deploy parametrization
|
||||
Calls map[string]*tmplMethod // Contract calls that only read state data
|
||||
Transacts map[string]*tmplMethod // Contract calls that write state data
|
||||
Events map[string]*tmplEvent // Contract events accessors
|
||||
}
|
||||
|
||||
// tmplMethod is a wrapper around an abi.Method that contains a few preprocessed
|
||||
@ -39,7 +40,13 @@ type tmplContract struct {
|
||||
type tmplMethod struct {
|
||||
Original abi.Method // Original method as parsed by the abi package
|
||||
Normalized abi.Method // Normalized version of the parsed method (capitalized names, non-anonymous args/returns)
|
||||
Structured bool // Whether the returns should be accumulated into a contract
|
||||
Structured bool // Whether the returns should be accumulated into a struct
|
||||
}
|
||||
|
||||
// tmplEvent is a wrapper around an a
|
||||
type tmplEvent struct {
|
||||
Original abi.Event // Original event as parsed by the abi package
|
||||
Normalized abi.Event // Normalized version of the parsed fields
|
||||
}
|
||||
|
||||
// tmplSource is language to template mapping containing all the supported
|
||||
@ -52,8 +59,8 @@ var tmplSource = map[Lang]string{
|
||||
// tmplSourceGo is the Go source template use to generate the contract binding
|
||||
// based on.
|
||||
const tmplSourceGo = `
|
||||
// This file is an automatically generated Go binding. Do not modify as any
|
||||
// change will likely be lost upon the next re-generation!
|
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
@ -75,7 +82,7 @@ package {{.Package}}
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
}
|
||||
return address, tx, &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract} }, nil
|
||||
return address, tx, &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
|
||||
}
|
||||
{{end}}
|
||||
|
||||
@ -83,6 +90,7 @@ package {{.Package}}
|
||||
type {{.Type}} struct {
|
||||
{{.Type}}Caller // Read-only binding to the contract
|
||||
{{.Type}}Transactor // Write-only binding to the contract
|
||||
{{.Type}}Filterer // Log filterer for contract events
|
||||
}
|
||||
|
||||
// {{.Type}}Caller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
@ -95,6 +103,11 @@ package {{.Package}}
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// {{.Type}}Filterer is an auto generated log filtering Go binding around an Ethereum contract events.
|
||||
type {{.Type}}Filterer struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// {{.Type}}Session is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type {{.Type}}Session struct {
|
||||
@ -134,16 +147,16 @@ package {{.Package}}
|
||||
|
||||
// New{{.Type}} creates a new instance of {{.Type}}, bound to a specific deployed contract.
|
||||
func New{{.Type}}(address common.Address, backend bind.ContractBackend) (*{{.Type}}, error) {
|
||||
contract, err := bind{{.Type}}(address, backend, backend)
|
||||
contract, err := bind{{.Type}}(address, backend, backend, backend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract} }, nil
|
||||
return &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
|
||||
}
|
||||
|
||||
// New{{.Type}}Caller creates a new read-only instance of {{.Type}}, bound to a specific deployed contract.
|
||||
func New{{.Type}}Caller(address common.Address, caller bind.ContractCaller) (*{{.Type}}Caller, error) {
|
||||
contract, err := bind{{.Type}}(address, caller, nil)
|
||||
contract, err := bind{{.Type}}(address, caller, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -152,20 +165,29 @@ package {{.Package}}
|
||||
|
||||
// New{{.Type}}Transactor creates a new write-only instance of {{.Type}}, bound to a specific deployed contract.
|
||||
func New{{.Type}}Transactor(address common.Address, transactor bind.ContractTransactor) (*{{.Type}}Transactor, error) {
|
||||
contract, err := bind{{.Type}}(address, nil, transactor)
|
||||
contract, err := bind{{.Type}}(address, nil, transactor, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &{{.Type}}Transactor{contract: contract}, nil
|
||||
}
|
||||
|
||||
// New{{.Type}}Filterer creates a new log filterer instance of {{.Type}}, bound to a specific deployed contract.
|
||||
func New{{.Type}}Filterer(address common.Address, filterer bind.ContractFilterer) (*{{.Type}}Filterer, error) {
|
||||
contract, err := bind{{.Type}}(address, nil, nil, filterer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &{{.Type}}Filterer{contract: contract}, nil
|
||||
}
|
||||
|
||||
// bind{{.Type}} binds a generic wrapper to an already deployed contract.
|
||||
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor) (*bind.BoundContract, error) {
|
||||
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor), nil
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
@ -263,6 +285,137 @@ package {{.Package}}
|
||||
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{range .Events}}
|
||||
// {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
|
||||
type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
|
||||
Event *{{$contract.Type}}{{.Normalized.Name}} // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
}
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Next() bool {
|
||||
// If the iterator failed, stop iterating
|
||||
if (it.fail != nil) {
|
||||
return false
|
||||
}
|
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if (it.done) {
|
||||
select {
|
||||
case log := <-it.logs:
|
||||
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
|
||||
it.fail = err
|
||||
return false
|
||||
}
|
||||
it.Event.Raw = log
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select {
|
||||
case log := <-it.logs:
|
||||
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
|
||||
it.fail = err
|
||||
return false
|
||||
}
|
||||
it.Event.Raw = log
|
||||
return true
|
||||
|
||||
case err := <-it.sub.Err():
|
||||
it.done = true
|
||||
it.fail = err
|
||||
return it.Next()
|
||||
}
|
||||
}
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Error() error {
|
||||
return it.fail
|
||||
}
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Close() error {
|
||||
it.sub.Unsubscribe()
|
||||
return nil
|
||||
}
|
||||
|
||||
// {{$contract.Type}}{{.Normalized.Name}} represents a {{.Normalized.Name}} event raised by the {{$contract.Type}} contract.
|
||||
type {{$contract.Type}}{{.Normalized.Name}} struct { {{range .Normalized.Inputs}}
|
||||
{{capitalise .Name}} {{if .Indexed}}{{bindtopictype .Type}}{{else}}{{bindtype .Type}}{{end}}; {{end}}
|
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
}
|
||||
|
||||
// Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.Id}}.
|
||||
//
|
||||
// Solidity: {{.Original.String}}
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
|
||||
{{range .Normalized.Inputs}}
|
||||
{{if .Indexed}}var {{.Name}}Rule []interface{}
|
||||
for _, {{.Name}}Item := range {{.Name}} {
|
||||
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
|
||||
}{{end}}{{end}}
|
||||
|
||||
logs, sub, err := _{{$contract.Type}}.contract.FilterLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &{{$contract.Type}}{{.Normalized.Name}}Iterator{contract: _{{$contract.Type}}.contract, event: "{{.Original.Name}}", logs: logs, sub: sub}, nil
|
||||
}
|
||||
|
||||
// Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.Id}}.
|
||||
//
|
||||
// Solidity: {{.Original.String}}
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type}}{{end}}{{end}}) (event.Subscription, error) {
|
||||
{{range .Normalized.Inputs}}
|
||||
{{if .Indexed}}var {{.Name}}Rule []interface{}
|
||||
for _, {{.Name}}Item := range {{.Name}} {
|
||||
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
|
||||
}{{end}}{{end}}
|
||||
|
||||
logs, sub, err := _{{$contract.Type}}.contract.WatchLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case log := <-logs:
|
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
|
||||
return err
|
||||
}
|
||||
event.Raw = log
|
||||
|
||||
select {
|
||||
case sink <- event:
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
|
189
accounts/abi/bind/topics.go
Normal file
189
accounts/abi/bind/topics.go
Normal file
@ -0,0 +1,189 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bind
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// makeTopics converts a filter query argument list into a filter topic set.
|
||||
func makeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
||||
topics := make([][]common.Hash, len(query))
|
||||
for i, filter := range query {
|
||||
for _, rule := range filter {
|
||||
var topic common.Hash
|
||||
|
||||
// Try to generate the topic based on simple types
|
||||
switch rule := rule.(type) {
|
||||
case common.Hash:
|
||||
copy(topic[:], rule[:])
|
||||
case common.Address:
|
||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||
case *big.Int:
|
||||
blob := rule.Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case bool:
|
||||
if rule {
|
||||
topic[common.HashLength-1] = 1
|
||||
}
|
||||
case int8:
|
||||
blob := big.NewInt(int64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case int16:
|
||||
blob := big.NewInt(int64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case int32:
|
||||
blob := big.NewInt(int64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case int64:
|
||||
blob := big.NewInt(rule).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case uint8:
|
||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case uint16:
|
||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case uint32:
|
||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case uint64:
|
||||
blob := new(big.Int).SetUint64(rule).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case string:
|
||||
hash := crypto.Keccak256Hash([]byte(rule))
|
||||
copy(topic[:], hash[:])
|
||||
case []byte:
|
||||
hash := crypto.Keccak256Hash(rule)
|
||||
copy(topic[:], hash[:])
|
||||
|
||||
default:
|
||||
// Attempt to generate the topic from funky types
|
||||
val := reflect.ValueOf(rule)
|
||||
|
||||
switch {
|
||||
case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8:
|
||||
reflect.Copy(reflect.ValueOf(topic[common.HashLength-val.Len():]), val)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported indexed type: %T", rule)
|
||||
}
|
||||
}
|
||||
topics[i] = append(topics[i], topic)
|
||||
}
|
||||
}
|
||||
return topics, nil
|
||||
}
|
||||
|
||||
// Big batch of reflect types for topic reconstruction.
|
||||
var (
|
||||
reflectHash = reflect.TypeOf(common.Hash{})
|
||||
reflectAddress = reflect.TypeOf(common.Address{})
|
||||
reflectBigInt = reflect.TypeOf(new(big.Int))
|
||||
)
|
||||
|
||||
// parseTopics converts the indexed topic fields into actual log field values.
|
||||
//
|
||||
// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256
|
||||
// hashes as the topic value!
|
||||
func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) error {
|
||||
// Sanity check that the fields and topics match up
|
||||
if len(fields) != len(topics) {
|
||||
return errors.New("topic/field count mismatch")
|
||||
}
|
||||
// Iterate over all the fields and reconstruct them from topics
|
||||
for _, arg := range fields {
|
||||
if !arg.Indexed {
|
||||
return errors.New("non-indexed field in topic reconstruction")
|
||||
}
|
||||
field := reflect.ValueOf(out).Elem().FieldByName(capitalise(arg.Name))
|
||||
|
||||
// Try to parse the topic back into the fields based on primitive types
|
||||
switch field.Kind() {
|
||||
case reflect.Bool:
|
||||
if topics[0][common.HashLength-1] == 1 {
|
||||
field.Set(reflect.ValueOf(true))
|
||||
}
|
||||
case reflect.Int8:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(int8(num.Int64())))
|
||||
|
||||
case reflect.Int16:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(int16(num.Int64())))
|
||||
|
||||
case reflect.Int32:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(int32(num.Int64())))
|
||||
|
||||
case reflect.Int64:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(num.Int64()))
|
||||
|
||||
case reflect.Uint8:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(uint8(num.Uint64())))
|
||||
|
||||
case reflect.Uint16:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(uint16(num.Uint64())))
|
||||
|
||||
case reflect.Uint32:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(uint32(num.Uint64())))
|
||||
|
||||
case reflect.Uint64:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(num.Uint64()))
|
||||
|
||||
default:
|
||||
// Ran out of plain primitive types, try custom types
|
||||
switch field.Type() {
|
||||
case reflectHash: // Also covers all dynamic types
|
||||
field.Set(reflect.ValueOf(topics[0]))
|
||||
|
||||
case reflectAddress:
|
||||
var addr common.Address
|
||||
copy(addr[:], topics[0][common.HashLength-common.AddressLength:])
|
||||
field.Set(reflect.ValueOf(addr))
|
||||
|
||||
case reflectBigInt:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(num))
|
||||
|
||||
default:
|
||||
// Ran out of custom types, try the crazies
|
||||
switch {
|
||||
case arg.Type.T == abi.FixedBytesTy:
|
||||
reflect.Copy(field, reflect.ValueOf(topics[0][common.HashLength-arg.Type.Size:]))
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported indexed type: %v", arg.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
topics = topics[1:]
|
||||
}
|
||||
return nil
|
||||
}
|
@ -34,18 +34,18 @@ var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d
|
||||
|
||||
var waitDeployedTests = map[string]struct {
|
||||
code string
|
||||
gas *big.Int
|
||||
gas uint64
|
||||
wantAddress common.Address
|
||||
wantErr error
|
||||
}{
|
||||
"successful deploy": {
|
||||
code: `6060604052600a8060106000396000f360606040526008565b00`,
|
||||
gas: big.NewInt(3000000),
|
||||
gas: 3000000,
|
||||
wantAddress: common.HexToAddress("0x3a220f351252089d385b29beca14e27f204c296a"),
|
||||
},
|
||||
"empty code": {
|
||||
code: ``,
|
||||
gas: big.NewInt(300000),
|
||||
gas: 300000,
|
||||
wantErr: bind.ErrNoCodeAfterDeploy,
|
||||
wantAddress: common.HexToAddress("0x3a220f351252089d385b29beca14e27f204c296a"),
|
||||
},
|
||||
|
@ -39,22 +39,23 @@ func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
||||
// type in t.
|
||||
func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
|
||||
return typeErr(formatSliceString(t.Kind, t.SliceSize), val.Type())
|
||||
}
|
||||
if t.IsArray && val.Len() != t.SliceSize {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
||||
return typeErr(formatSliceString(t.Kind, t.Size), val.Type())
|
||||
}
|
||||
|
||||
if t.Elem.IsSlice {
|
||||
if t.T == ArrayTy && val.Len() != t.Size {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
||||
}
|
||||
|
||||
if t.Elem.T == SliceTy {
|
||||
if val.Len() > 0 {
|
||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||
}
|
||||
} else if t.Elem.IsArray {
|
||||
} else if t.Elem.T == ArrayTy {
|
||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||
}
|
||||
|
||||
if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), val.Type())
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.Size), val.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -62,20 +63,19 @@ func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||
// typeCheck checks that the given reflection value can be assigned to the reflection
|
||||
// type in t.
|
||||
func typeCheck(t Type, value reflect.Value) error {
|
||||
if t.IsSlice || t.IsArray {
|
||||
if t.T == SliceTy || t.T == ArrayTy {
|
||||
return sliceTypeCheck(t, value)
|
||||
}
|
||||
|
||||
// Check base type validity. Element types will be checked later on.
|
||||
if t.Kind != value.Kind() {
|
||||
return typeErr(t.Kind, value.Kind())
|
||||
} else if t.T == FixedBytesTy && t.Size != value.Len() {
|
||||
return typeErr(t.Type, value.Type())
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// varErr returns a formatted error.
|
||||
func varErr(expected, got reflect.Kind) error {
|
||||
return typeErr(expected, got)
|
||||
}
|
||||
|
||||
// typeErr returns a formatted type casting error.
|
||||
|
@ -30,7 +30,18 @@ import (
|
||||
type Event struct {
|
||||
Name string
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Inputs Arguments
|
||||
}
|
||||
|
||||
func (event Event) String() string {
|
||||
inputs := make([]string, len(event.Inputs))
|
||||
for i, input := range event.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
if input.Indexed {
|
||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("event %v(%v)", event.Name, strings.Join(inputs, ", "))
|
||||
}
|
||||
|
||||
// Id returns the canonical representation of the event's signature used by the
|
||||
|
@ -17,13 +17,53 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var jsonEventTransfer = []byte(`{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true, "name": "from", "type": "address"
|
||||
}, {
|
||||
"indexed": true, "name": "to", "type": "address"
|
||||
}, {
|
||||
"indexed": false, "name": "value", "type": "uint256"
|
||||
}],
|
||||
"name": "Transfer",
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
var jsonEventPledge = []byte(`{
|
||||
"anonymous": false,
|
||||
"inputs": [{
|
||||
"indexed": false, "name": "who", "type": "address"
|
||||
}, {
|
||||
"indexed": false, "name": "wad", "type": "uint128"
|
||||
}, {
|
||||
"indexed": false, "name": "currency", "type": "bytes3"
|
||||
}],
|
||||
"name": "Pledge",
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
// 1000000
|
||||
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
|
||||
|
||||
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
|
||||
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
func TestEventId(t *testing.T) {
|
||||
var table = []struct {
|
||||
definition string
|
||||
@ -31,7 +71,7 @@ func TestEventId(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
definition: `[
|
||||
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint" }] },
|
||||
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
|
||||
{ "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
|
||||
]`,
|
||||
expectations: map[string]common.Hash{
|
||||
@ -54,3 +94,223 @@ func TestEventId(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
||||
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 [2]uint8
|
||||
Value2 uint8
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
require.NoError(t, err)
|
||||
var b bytes.Buffer
|
||||
var i uint8 = 1
|
||||
for ; i <= 3; i++ {
|
||||
b.Write(packNum(reflect.ValueOf(i)))
|
||||
}
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, [2]uint8{1, 2}, rst.Value1)
|
||||
require.Equal(t, uint8(3), rst.Value2)
|
||||
}
|
||||
|
||||
func TestEventTupleUnpack(t *testing.T) {
|
||||
|
||||
type EventTransfer struct {
|
||||
Value *big.Int
|
||||
}
|
||||
|
||||
type EventPledge struct {
|
||||
Who common.Address
|
||||
Wad *big.Int
|
||||
Currency [3]byte
|
||||
}
|
||||
|
||||
type BadEventPledge struct {
|
||||
Who string
|
||||
Wad int
|
||||
Currency [3]byte
|
||||
}
|
||||
|
||||
bigint := new(big.Int)
|
||||
bigintExpected := big.NewInt(1000000)
|
||||
bigintExpected2 := big.NewInt(2218516807680)
|
||||
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
|
||||
var testCases = []struct {
|
||||
data string
|
||||
dest interface{}
|
||||
expected interface{}
|
||||
jsonLog []byte
|
||||
error string
|
||||
name string
|
||||
}{{
|
||||
transferData1,
|
||||
&EventTransfer{},
|
||||
&EventTransfer{Value: bigintExpected},
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into structure",
|
||||
}, {
|
||||
transferData1,
|
||||
&[]interface{}{&bigint},
|
||||
&[]interface{}{&bigintExpected},
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into slice",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&EventPledge{},
|
||||
&EventPledge{
|
||||
addr,
|
||||
bigintExpected2,
|
||||
[3]byte{'u', 's', 'd'}},
|
||||
jsonEventPledge,
|
||||
"",
|
||||
"Can unpack Pledge event into structure",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&[]interface{}{&common.Address{}, &bigint, &[3]byte{}},
|
||||
&[]interface{}{
|
||||
&addr,
|
||||
&bigintExpected2,
|
||||
&[3]byte{'u', 's', 'd'}},
|
||||
jsonEventPledge,
|
||||
"",
|
||||
"Can unpack Pledge event into slice",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&[3]interface{}{&common.Address{}, &bigint, &[3]byte{}},
|
||||
&[3]interface{}{
|
||||
&addr,
|
||||
&bigintExpected2,
|
||||
&[3]byte{'u', 's', 'd'}},
|
||||
jsonEventPledge,
|
||||
"",
|
||||
"Can unpack Pledge event into an array",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&[]interface{}{new(int), 0, 0},
|
||||
&[]interface{}{},
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal common.Address in to int",
|
||||
"Can not unpack Pledge event into slice with wrong types",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&BadEventPledge{},
|
||||
&BadEventPledge{},
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal common.Address in to string",
|
||||
"Can not unpack Pledge event into struct with wrong filed types",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&[]interface{}{common.Address{}, new(big.Int)},
|
||||
&[]interface{}{},
|
||||
jsonEventPledge,
|
||||
"abi: insufficient number of elements in the list/array for unpack, want 3, got 2",
|
||||
"Can not unpack Pledge event into too short slice",
|
||||
}, {
|
||||
pledgeData1,
|
||||
new(map[string]interface{}),
|
||||
&[]interface{}{},
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal tuple into map[string]interface {}",
|
||||
"Can not unpack Pledge event into map",
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
assert := assert.New(t)
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
|
||||
if tc.error == "" {
|
||||
assert.Nil(err, "Should be able to unpack event data.")
|
||||
assert.Equal(tc.expected, tc.dest, tc.name)
|
||||
} else {
|
||||
assert.EqualError(err, tc.error)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, assert *assert.Assertions) error {
|
||||
data, err := hex.DecodeString(hexData)
|
||||
assert.NoError(err, "Hex data should be a correct hex-string")
|
||||
var e Event
|
||||
assert.NoError(json.Unmarshal(jsonEvent, &e), "Should be able to unmarshal event ABI")
|
||||
a := ABI{Events: map[string]Event{"e": e}}
|
||||
return a.Unpack(dest, "e", data)
|
||||
}
|
||||
|
||||
/*
|
||||
Taken from
|
||||
https://github.com/ethereum/go-ethereum/pull/15568
|
||||
*/
|
||||
|
||||
type testResult struct {
|
||||
Values [2]*big.Int
|
||||
Value1 *big.Int
|
||||
Value2 *big.Int
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
definition string
|
||||
want testResult
|
||||
}
|
||||
|
||||
func (tc testCase) encoded(intType, arrayType Type) []byte {
|
||||
var b bytes.Buffer
|
||||
if tc.want.Value1 != nil {
|
||||
val, _ := intType.pack(reflect.ValueOf(tc.want.Value1))
|
||||
b.Write(val)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want.Values, [2]*big.Int{nil, nil}) {
|
||||
val, _ := arrayType.pack(reflect.ValueOf(tc.want.Values))
|
||||
b.Write(val)
|
||||
}
|
||||
if tc.want.Value2 != nil {
|
||||
val, _ := intType.pack(reflect.ValueOf(tc.want.Value2))
|
||||
b.Write(val)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
||||
func TestEventUnpackIndexed(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 uint8
|
||||
Value2 uint8
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
require.NoError(t, err)
|
||||
var b bytes.Buffer
|
||||
b.Write(packNum(reflect.ValueOf(uint8(8))))
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, uint8(0), rst.Value1)
|
||||
require.Equal(t, uint8(8), rst.Value2)
|
||||
}
|
||||
|
||||
// TestEventIndexedWithArrayUnpack verifies that decoder will not overlow when static array is indexed input.
|
||||
func TestEventIndexedWithArrayUnpack(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 [2]uint8
|
||||
Value2 string
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
require.NoError(t, err)
|
||||
var b bytes.Buffer
|
||||
stringOut := "abc"
|
||||
// number of fields that will be encoded * 32
|
||||
b.Write(packNum(reflect.ValueOf(32)))
|
||||
b.Write(packNum(reflect.ValueOf(len(stringOut))))
|
||||
b.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, [2]uint8{0, 0}, rst.Value1)
|
||||
require.Equal(t, stringOut, rst.Value2)
|
||||
}
|
||||
|
@ -18,13 +18,12 @@ package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// Callable method given a `Name` and whether the method is a constant.
|
||||
// Method represents a callable given a `Name` and whether the method is a constant.
|
||||
// If the method is `Const` no transaction needs to be created for this
|
||||
// particular Method call. It can easily be simulated using a local VM.
|
||||
// For example a `Balance()` method only needs to retrieve something
|
||||
@ -35,46 +34,8 @@ import (
|
||||
type Method struct {
|
||||
Name string
|
||||
Const bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
}
|
||||
|
||||
func (method Method) pack(args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
if len(args) != len(method.Inputs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
|
||||
}
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := method.Inputs[i]
|
||||
// pack the input
|
||||
packed, err := input.Type.pack(reflect.ValueOf(a))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("`%s` %v", method.Name, err)
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.requiresLengthPrefix() {
|
||||
// calculate the offset
|
||||
offset := len(method.Inputs)*32 + len(variableInput)
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
ret = append(ret, packed...)
|
||||
}
|
||||
}
|
||||
// append the variable input at the end of the packed input
|
||||
ret = append(ret, variableInput...)
|
||||
|
||||
return ret, nil
|
||||
Inputs Arguments
|
||||
Outputs Arguments
|
||||
}
|
||||
|
||||
// Sig returns the methods string signature according to the ABI spec.
|
||||
@ -84,35 +45,35 @@ func (method Method) pack(args ...interface{}) ([]byte, error) {
|
||||
// function foo(uint32 a, int b) = "foo(uint32,int256)"
|
||||
//
|
||||
// Please note that "int" is substitute for its canonical representation "int256"
|
||||
func (m Method) Sig() string {
|
||||
types := make([]string, len(m.Inputs))
|
||||
func (method Method) Sig() string {
|
||||
types := make([]string, len(method.Inputs))
|
||||
i := 0
|
||||
for _, input := range m.Inputs {
|
||||
for _, input := range method.Inputs {
|
||||
types[i] = input.Type.String()
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf("%v(%v)", m.Name, strings.Join(types, ","))
|
||||
return fmt.Sprintf("%v(%v)", method.Name, strings.Join(types, ","))
|
||||
}
|
||||
|
||||
func (m Method) String() string {
|
||||
inputs := make([]string, len(m.Inputs))
|
||||
for i, input := range m.Inputs {
|
||||
func (method Method) String() string {
|
||||
inputs := make([]string, len(method.Inputs))
|
||||
for i, input := range method.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
}
|
||||
outputs := make([]string, len(m.Outputs))
|
||||
for i, output := range m.Outputs {
|
||||
outputs := make([]string, len(method.Outputs))
|
||||
for i, output := range method.Outputs {
|
||||
if len(output.Name) > 0 {
|
||||
outputs[i] = fmt.Sprintf("%v ", output.Name)
|
||||
}
|
||||
outputs[i] += output.Type.String()
|
||||
}
|
||||
constant := ""
|
||||
if m.Const {
|
||||
if method.Const {
|
||||
constant = "constant "
|
||||
}
|
||||
return fmt.Sprintf("function %v(%v) %sreturns(%v)", m.Name, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
|
||||
return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.Name, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
|
||||
}
|
||||
|
||||
func (m Method) Id() []byte {
|
||||
return crypto.Keccak256([]byte(m.Sig()))[:4]
|
||||
func (method Method) Id() []byte {
|
||||
return crypto.Keccak256([]byte(method.Sig()))[:4]
|
||||
}
|
||||
|
@ -25,36 +25,23 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
big_t = reflect.TypeOf(big.Int{})
|
||||
ubig_t = reflect.TypeOf(big.Int{})
|
||||
byte_t = reflect.TypeOf(byte(0))
|
||||
byte_ts = reflect.TypeOf([]byte(nil))
|
||||
uint_t = reflect.TypeOf(uint(0))
|
||||
uint8_t = reflect.TypeOf(uint8(0))
|
||||
uint16_t = reflect.TypeOf(uint16(0))
|
||||
uint32_t = reflect.TypeOf(uint32(0))
|
||||
uint64_t = reflect.TypeOf(uint64(0))
|
||||
int_t = reflect.TypeOf(int(0))
|
||||
int8_t = reflect.TypeOf(int8(0))
|
||||
int16_t = reflect.TypeOf(int16(0))
|
||||
int32_t = reflect.TypeOf(int32(0))
|
||||
int64_t = reflect.TypeOf(int64(0))
|
||||
hash_t = reflect.TypeOf(common.Hash{})
|
||||
address_t = reflect.TypeOf(common.Address{})
|
||||
|
||||
uint_ts = reflect.TypeOf([]uint(nil))
|
||||
uint8_ts = reflect.TypeOf([]uint8(nil))
|
||||
uint16_ts = reflect.TypeOf([]uint16(nil))
|
||||
uint32_ts = reflect.TypeOf([]uint32(nil))
|
||||
uint64_ts = reflect.TypeOf([]uint64(nil))
|
||||
ubig_ts = reflect.TypeOf([]*big.Int(nil))
|
||||
|
||||
int_ts = reflect.TypeOf([]int(nil))
|
||||
int8_ts = reflect.TypeOf([]int8(nil))
|
||||
int16_ts = reflect.TypeOf([]int16(nil))
|
||||
int32_ts = reflect.TypeOf([]int32(nil))
|
||||
int64_ts = reflect.TypeOf([]int64(nil))
|
||||
big_ts = reflect.TypeOf([]*big.Int(nil))
|
||||
big_t = reflect.TypeOf(&big.Int{})
|
||||
derefbig_t = reflect.TypeOf(big.Int{})
|
||||
uint8_t = reflect.TypeOf(uint8(0))
|
||||
uint16_t = reflect.TypeOf(uint16(0))
|
||||
uint32_t = reflect.TypeOf(uint32(0))
|
||||
uint64_t = reflect.TypeOf(uint64(0))
|
||||
int_t = reflect.TypeOf(int(0))
|
||||
int8_t = reflect.TypeOf(int8(0))
|
||||
int16_t = reflect.TypeOf(int16(0))
|
||||
int32_t = reflect.TypeOf(int32(0))
|
||||
int64_t = reflect.TypeOf(int64(0))
|
||||
address_t = reflect.TypeOf(common.Address{})
|
||||
int_ts = reflect.TypeOf([]int(nil))
|
||||
int8_ts = reflect.TypeOf([]int8(nil))
|
||||
int16_ts = reflect.TypeOf([]int16(nil))
|
||||
int32_ts = reflect.TypeOf([]int32(nil))
|
||||
int64_ts = reflect.TypeOf([]int64(nil))
|
||||
)
|
||||
|
||||
// U256 converts a big Int into a 256bit EVM number.
|
||||
|
@ -48,9 +48,8 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
case BoolTy:
|
||||
if reflectValue.Bool() {
|
||||
return math.PaddedBigBytes(common.Big1, 32)
|
||||
} else {
|
||||
return math.PaddedBigBytes(common.Big0, 32)
|
||||
}
|
||||
return math.PaddedBigBytes(common.Big0, 32)
|
||||
case BytesTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
@ -61,8 +60,9 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32)
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||
@ -74,6 +74,8 @@ func packNum(value reflect.Value) []byte {
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@ -322,12 +322,12 @@ func TestPack(t *testing.T) {
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatal("unexpected pack error:", err)
|
||||
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
@ -435,7 +435,4 @@ func TestPackNumber(t *testing.T) {
|
||||
t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed)
|
||||
}
|
||||
}
|
||||
if packed := packNum(reflect.ValueOf("string")); packed != nil {
|
||||
t.Errorf("expected 'string' to pack to nil. got %x instead", packed)
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
// or finds a big.Int
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != big_t {
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbig_t {
|
||||
return indirect(v.Elem())
|
||||
}
|
||||
return v
|
||||
@ -73,15 +73,9 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
case dstType.AssignableTo(srcType):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if dst.Len() < output.Type.SliceSize {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Ptr:
|
||||
@ -91,3 +85,28 @@ func set(dst, src reflect.Value, output Argument) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireAssignable assures that `dest` is a pointer and it's not an interface.
|
||||
func requireAssignable(dst, src reflect.Value) error {
|
||||
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v into %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireUnpackKind verifies preconditions for unpacking `args` into `kind`
|
||||
func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
||||
args Arguments) error {
|
||||
|
||||
switch k {
|
||||
case reflect.Struct:
|
||||
case reflect.Slice, reflect.Array:
|
||||
if minLen := args.LengthNonIndexed(); v.Len() < minLen {
|
||||
return fmt.Errorf("abi: insufficient number of elements in the list/array for unpack, want %d, got %d",
|
||||
minLen, v.Len())
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple into %v", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -21,14 +21,17 @@ import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Type enumerator
|
||||
const (
|
||||
IntTy byte = iota
|
||||
UintTy
|
||||
BoolTy
|
||||
StringTy
|
||||
SliceTy
|
||||
ArrayTy
|
||||
AddressTy
|
||||
FixedBytesTy
|
||||
BytesTy
|
||||
@ -39,9 +42,6 @@ const (
|
||||
|
||||
// Type is the reflection of the supported argument type
|
||||
type Type struct {
|
||||
IsSlice, IsArray bool
|
||||
SliceSize int
|
||||
|
||||
Elem *Type
|
||||
|
||||
Kind reflect.Kind
|
||||
@ -53,54 +53,57 @@ type Type struct {
|
||||
}
|
||||
|
||||
var (
|
||||
// fullTypeRegex parses the abi types
|
||||
//
|
||||
// Types can be in the format of:
|
||||
//
|
||||
// Input = Type [ "[" [ Number ] "]" ] Name .
|
||||
// Type = [ "u" ] "int" [ Number ] [ x ] [ Number ].
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// string int uint fixed
|
||||
// string32 int8 uint8 uint[]
|
||||
// address int256 uint256 fixed128x128[2]
|
||||
fullTypeRegex = regexp.MustCompile(`([a-zA-Z0-9]+)(\[([0-9]*)\])?`)
|
||||
// typeRegex parses the abi sub types
|
||||
typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?")
|
||||
)
|
||||
|
||||
// NewType creates a new reflection type of abi type given in t.
|
||||
func NewType(t string) (typ Type, err error) {
|
||||
res := fullTypeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
// check if type is slice and parse type.
|
||||
switch {
|
||||
case res[3] != "":
|
||||
// err is ignored. Already checked for number through the regexp
|
||||
typ.SliceSize, _ = strconv.Atoi(res[3])
|
||||
typ.IsArray = true
|
||||
case res[2] != "":
|
||||
typ.IsSlice, typ.SliceSize = true, -1
|
||||
case res[0] == "":
|
||||
return Type{}, fmt.Errorf("abi: type parse error: %s", t)
|
||||
// check that array brackets are equal if they exist
|
||||
if strings.Count(t, "[") != strings.Count(t, "]") {
|
||||
return Type{}, fmt.Errorf("invalid arg type in abi")
|
||||
}
|
||||
if typ.IsArray || typ.IsSlice {
|
||||
sliceType, err := NewType(res[1])
|
||||
|
||||
typ.stringKind = t
|
||||
|
||||
// if there are brackets, get ready to go into slice/array mode and
|
||||
// recursively create the type
|
||||
if strings.Count(t, "[") != 0 {
|
||||
i := strings.LastIndex(t, "[")
|
||||
// recursively embed the type
|
||||
embeddedType, err := NewType(t[:i])
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
typ.Elem = &sliceType
|
||||
typ.stringKind = sliceType.stringKind + t[len(res[1]):]
|
||||
// Although we know that this is an array, we cannot return
|
||||
// as we don't know the type of the element, however, if it
|
||||
// is still an array, then don't determine the type.
|
||||
if typ.Elem.IsArray || typ.Elem.IsSlice {
|
||||
return typ, nil
|
||||
}
|
||||
}
|
||||
// grab the last cell and create a type from there
|
||||
sliced := t[i:]
|
||||
// grab the slice size with regexp
|
||||
re := regexp.MustCompile("[0-9]+")
|
||||
intz := re.FindAllString(sliced, -1)
|
||||
|
||||
if len(intz) == 0 {
|
||||
// is a slice
|
||||
typ.T = SliceTy
|
||||
typ.Kind = reflect.Slice
|
||||
typ.Elem = &embeddedType
|
||||
typ.Type = reflect.SliceOf(embeddedType.Type)
|
||||
} else if len(intz) == 1 {
|
||||
// is a array
|
||||
typ.T = ArrayTy
|
||||
typ.Kind = reflect.Array
|
||||
typ.Elem = &embeddedType
|
||||
typ.Size, err = strconv.Atoi(intz[0])
|
||||
if err != nil {
|
||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||
}
|
||||
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
|
||||
} else {
|
||||
return Type{}, fmt.Errorf("invalid formatting of array type")
|
||||
}
|
||||
return typ, err
|
||||
}
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(res[1], -1)[0]
|
||||
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
// varSize is the size of the variable
|
||||
var varSize int
|
||||
if len(parsedType[3]) > 0 {
|
||||
@ -109,22 +112,15 @@ func NewType(t string) (typ Type, err error) {
|
||||
if err != nil {
|
||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||
}
|
||||
} else {
|
||||
if parsedType[0] == "uint" || parsedType[0] == "int" {
|
||||
// this should fail because it means that there's something wrong with
|
||||
// the abi type (the compiler should always format it to the size...always)
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
}
|
||||
// varType is the parsed abi type
|
||||
varType := parsedType[1]
|
||||
// substitute canonical integer
|
||||
if varSize == 0 && (varType == "int" || varType == "uint") {
|
||||
varSize = 256
|
||||
t += "256"
|
||||
}
|
||||
|
||||
// only set stringKind if not array or slice, as for those,
|
||||
// the correct string type has been set
|
||||
if !(typ.IsArray || typ.IsSlice) {
|
||||
typ.stringKind = t
|
||||
}
|
||||
|
||||
switch varType {
|
||||
switch varType := parsedType[1]; varType {
|
||||
case "int":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
|
||||
typ.Size = varSize
|
||||
@ -136,6 +132,7 @@ func NewType(t string) (typ Type, err error) {
|
||||
case "bool":
|
||||
typ.Kind = reflect.Bool
|
||||
typ.T = BoolTy
|
||||
typ.Type = reflect.TypeOf(bool(false))
|
||||
case "address":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = address_t
|
||||
@ -143,26 +140,24 @@ func NewType(t string) (typ Type, err error) {
|
||||
typ.T = AddressTy
|
||||
case "string":
|
||||
typ.Kind = reflect.String
|
||||
typ.Size = -1
|
||||
typ.Type = reflect.TypeOf("")
|
||||
typ.T = StringTy
|
||||
case "bytes":
|
||||
sliceType, _ := NewType("uint8")
|
||||
typ.Elem = &sliceType
|
||||
if varSize == 0 {
|
||||
typ.IsSlice = true
|
||||
typ.T = BytesTy
|
||||
typ.SliceSize = -1
|
||||
typ.Kind = reflect.Slice
|
||||
typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0)))
|
||||
} else {
|
||||
typ.IsArray = true
|
||||
typ.T = FixedBytesTy
|
||||
typ.SliceSize = varSize
|
||||
typ.Kind = reflect.Array
|
||||
typ.Size = varSize
|
||||
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
|
||||
}
|
||||
case "function":
|
||||
sliceType, _ := NewType("uint8")
|
||||
typ.Elem = &sliceType
|
||||
typ.IsArray = true
|
||||
typ.Kind = reflect.Array
|
||||
typ.T = FunctionTy
|
||||
typ.SliceSize = 24
|
||||
typ.Size = 24
|
||||
typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
|
||||
default:
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
@ -183,7 +178,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if (t.IsSlice || t.IsArray) && t.T != BytesTy && t.T != FixedBytesTy && t.T != FunctionTy {
|
||||
if t.T == SliceTy || t.T == ArrayTy {
|
||||
var packed []byte
|
||||
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
@ -193,18 +188,17 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
}
|
||||
packed = append(packed, val...)
|
||||
}
|
||||
if t.IsSlice {
|
||||
if t.T == SliceTy {
|
||||
return packBytesSlice(packed, v.Len()), nil
|
||||
} else if t.IsArray {
|
||||
} else if t.T == ArrayTy {
|
||||
return packed, nil
|
||||
}
|
||||
}
|
||||
|
||||
return packElement(t, v), nil
|
||||
}
|
||||
|
||||
// requireLengthPrefix returns whether the type requires any sort of length
|
||||
// prefixing.
|
||||
func (t Type) requiresLengthPrefix() bool {
|
||||
return t.T != FixedBytesTy && (t.T == StringTy || t.T == BytesTy || t.IsSlice)
|
||||
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
@ -34,51 +35,58 @@ func TestTypeRegexp(t *testing.T) {
|
||||
blob string
|
||||
kind Type
|
||||
}{
|
||||
{"bool", Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}},
|
||||
{"bool[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Bool, T: BoolTy, Elem: &Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}},
|
||||
{"bool[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Bool, T: BoolTy, Elem: &Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||
{"bool", Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
|
||||
{"bool[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
|
||||
{"bool[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||
{"bool[2][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
|
||||
{"bool[][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
|
||||
{"bool[][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
|
||||
{"bool[2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
|
||||
{"bool[2][][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
|
||||
{"bool[2][2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
||||
{"bool[][][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
||||
{"bool[][2][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"int16", Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||
{"int32", Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||
{"int64", Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||
{"int256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int8[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint16", Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||
{"uint32", Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||
{"uint64", Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"string", Type{Kind: reflect.String, Size: -1, T: StringTy, stringKind: "string"}},
|
||||
{"string[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[2]"}},
|
||||
{"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"string", Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
|
||||
{"string[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
|
||||
{"address", Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}},
|
||||
{"address[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Array, Type: address_t, T: AddressTy, Size: 20, Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||
{"address[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Array, Type: address_t, T: AddressTy, Size: 20, Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||
|
||||
{"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||
{"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||
// TODO when fixed types are implemented properly
|
||||
// {"fixed", Type{}},
|
||||
// {"fixed128x128", Type{}},
|
||||
@ -87,13 +95,14 @@ func TestTypeRegexp(t *testing.T) {
|
||||
// {"fixed128x128[]", Type{}},
|
||||
// {"fixed128x128[2]", Type{}},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
|
||||
for _, tt := range tests {
|
||||
typ, err := NewType(tt.blob)
|
||||
if err != nil {
|
||||
t.Errorf("type %d: failed to parse type string: %v", i, err)
|
||||
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
|
||||
}
|
||||
if !reflect.DeepEqual(typ, tt.kind) {
|
||||
t.Errorf("type %d: parsed type mismatch:\n have %+v\n want %+v", i, typeWithoutStringer(typ), typeWithoutStringer(tt.kind))
|
||||
t.Errorf("type %q: parsed type mismatch:\nGOT %s\nWANT %s ", tt.blob, spew.Sdump(typeWithoutStringer(typ)), spew.Sdump(typeWithoutStringer(tt.kind)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -104,15 +113,90 @@ func TestTypeCheck(t *testing.T) {
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), ""},
|
||||
{"int", big.NewInt(1), ""},
|
||||
{"uint30", big.NewInt(1), ""},
|
||||
{"uint", big.NewInt(1), "unsupported arg type: uint"},
|
||||
{"int", big.NewInt(1), "unsupported arg type: int"},
|
||||
{"uint256", big.NewInt(1), ""},
|
||||
{"uint256[][3][]", [][3][]*big.Int{{{}}}, ""},
|
||||
{"uint256[][][3]", [3][][]*big.Int{{{}}}, ""},
|
||||
{"uint256[3][][]", [][][3]*big.Int{{{}}}, ""},
|
||||
{"uint256[3][3][3]", [3][3][3]*big.Int{{{}}}, ""},
|
||||
{"uint8[][]", [][]uint8{}, ""},
|
||||
{"int256", big.NewInt(1), ""},
|
||||
{"uint8", uint8(1), ""},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint32", uint32(1), ""},
|
||||
{"uint64", uint64(1), ""},
|
||||
{"int8", int8(1), ""},
|
||||
{"int16", int16(1), ""},
|
||||
{"int32", int32(1), ""},
|
||||
{"int64", int64(1), ""},
|
||||
{"uint24", big.NewInt(1), ""},
|
||||
{"uint40", big.NewInt(1), ""},
|
||||
{"uint48", big.NewInt(1), ""},
|
||||
{"uint56", big.NewInt(1), ""},
|
||||
{"uint72", big.NewInt(1), ""},
|
||||
{"uint80", big.NewInt(1), ""},
|
||||
{"uint88", big.NewInt(1), ""},
|
||||
{"uint96", big.NewInt(1), ""},
|
||||
{"uint104", big.NewInt(1), ""},
|
||||
{"uint112", big.NewInt(1), ""},
|
||||
{"uint120", big.NewInt(1), ""},
|
||||
{"uint128", big.NewInt(1), ""},
|
||||
{"uint136", big.NewInt(1), ""},
|
||||
{"uint144", big.NewInt(1), ""},
|
||||
{"uint152", big.NewInt(1), ""},
|
||||
{"uint160", big.NewInt(1), ""},
|
||||
{"uint168", big.NewInt(1), ""},
|
||||
{"uint176", big.NewInt(1), ""},
|
||||
{"uint184", big.NewInt(1), ""},
|
||||
{"uint192", big.NewInt(1), ""},
|
||||
{"uint200", big.NewInt(1), ""},
|
||||
{"uint208", big.NewInt(1), ""},
|
||||
{"uint216", big.NewInt(1), ""},
|
||||
{"uint224", big.NewInt(1), ""},
|
||||
{"uint232", big.NewInt(1), ""},
|
||||
{"uint240", big.NewInt(1), ""},
|
||||
{"uint248", big.NewInt(1), ""},
|
||||
{"int24", big.NewInt(1), ""},
|
||||
{"int40", big.NewInt(1), ""},
|
||||
{"int48", big.NewInt(1), ""},
|
||||
{"int56", big.NewInt(1), ""},
|
||||
{"int72", big.NewInt(1), ""},
|
||||
{"int80", big.NewInt(1), ""},
|
||||
{"int88", big.NewInt(1), ""},
|
||||
{"int96", big.NewInt(1), ""},
|
||||
{"int104", big.NewInt(1), ""},
|
||||
{"int112", big.NewInt(1), ""},
|
||||
{"int120", big.NewInt(1), ""},
|
||||
{"int128", big.NewInt(1), ""},
|
||||
{"int136", big.NewInt(1), ""},
|
||||
{"int144", big.NewInt(1), ""},
|
||||
{"int152", big.NewInt(1), ""},
|
||||
{"int160", big.NewInt(1), ""},
|
||||
{"int168", big.NewInt(1), ""},
|
||||
{"int176", big.NewInt(1), ""},
|
||||
{"int184", big.NewInt(1), ""},
|
||||
{"int192", big.NewInt(1), ""},
|
||||
{"int200", big.NewInt(1), ""},
|
||||
{"int208", big.NewInt(1), ""},
|
||||
{"int216", big.NewInt(1), ""},
|
||||
{"int224", big.NewInt(1), ""},
|
||||
{"int232", big.NewInt(1), ""},
|
||||
{"int240", big.NewInt(1), ""},
|
||||
{"int248", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint8", uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
|
||||
{"uint8", uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
|
||||
{"uint8", uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
|
||||
{"uint8", int8(1), "abi: cannot use int8 as type uint8 as argument"},
|
||||
{"uint8", int16(1), "abi: cannot use int16 as type uint8 as argument"},
|
||||
{"uint8", int32(1), "abi: cannot use int32 as type uint8 as argument"},
|
||||
{"uint8", int64(1), "abi: cannot use int64 as type uint8 as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type []uint16 as argument"},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
@ -122,20 +206,61 @@ func TestTypeCheck(t *testing.T) {
|
||||
{"address[1]", [1]common.Address{{1}}, ""},
|
||||
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes30", [30]byte{}, ""},
|
||||
{"bytes29", [29]byte{}, ""},
|
||||
{"bytes28", [28]byte{}, ""},
|
||||
{"bytes27", [27]byte{}, ""},
|
||||
{"bytes26", [26]byte{}, ""},
|
||||
{"bytes25", [25]byte{}, ""},
|
||||
{"bytes24", [24]byte{}, ""},
|
||||
{"bytes23", [23]byte{}, ""},
|
||||
{"bytes22", [22]byte{}, ""},
|
||||
{"bytes21", [21]byte{}, ""},
|
||||
{"bytes20", [20]byte{}, ""},
|
||||
{"bytes19", [19]byte{}, ""},
|
||||
{"bytes18", [18]byte{}, ""},
|
||||
{"bytes17", [17]byte{}, ""},
|
||||
{"bytes16", [16]byte{}, ""},
|
||||
{"bytes15", [15]byte{}, ""},
|
||||
{"bytes14", [14]byte{}, ""},
|
||||
{"bytes13", [13]byte{}, ""},
|
||||
{"bytes12", [12]byte{}, ""},
|
||||
{"bytes11", [11]byte{}, ""},
|
||||
{"bytes10", [10]byte{}, ""},
|
||||
{"bytes9", [9]byte{}, ""},
|
||||
{"bytes8", [8]byte{}, ""},
|
||||
{"bytes7", [7]byte{}, ""},
|
||||
{"bytes6", [6]byte{}, ""},
|
||||
{"bytes5", [5]byte{}, ""},
|
||||
{"bytes4", [4]byte{}, ""},
|
||||
{"bytes3", [3]byte{}, ""},
|
||||
{"bytes2", [2]byte{}, ""},
|
||||
{"bytes1", [1]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes31", common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, ""},
|
||||
{"bytes", common.Hash{1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
|
||||
{"bytes", common.Hash{1}, "abi: cannot use array as type slice as argument"},
|
||||
{"string", "hello world", ""},
|
||||
{"string", string(""), ""},
|
||||
{"string", []byte{}, "abi: cannot use slice as type string as argument"},
|
||||
{"bytes32[]", [][32]byte{{}}, ""},
|
||||
{"function", [24]byte{}, ""},
|
||||
{"bytes20", common.Address{}, ""},
|
||||
{"address", [20]byte{}, ""},
|
||||
{"address", common.Address{}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
} else if err != nil && len(test.err) != 0 {
|
||||
if err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = typeCheck(typ, reflect.ValueOf(test.input))
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@ -25,122 +25,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// toGoSliceType parses the input and casts it to the proper slice defined by the ABI
|
||||
// argument in T.
|
||||
func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
index := i * 32
|
||||
// The slice must, at very least be large enough for the index+32 which is exactly the size required
|
||||
// for the [offset in output, size of offset].
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
|
||||
}
|
||||
elem := t.Type.Elem
|
||||
|
||||
// first we need to create a slice of the type
|
||||
var refSlice reflect.Value
|
||||
switch elem.T {
|
||||
case IntTy, UintTy, BoolTy:
|
||||
// create a new reference slice matching the element type
|
||||
switch t.Type.Kind {
|
||||
case reflect.Bool:
|
||||
refSlice = reflect.ValueOf([]bool(nil))
|
||||
case reflect.Uint8:
|
||||
refSlice = reflect.ValueOf([]uint8(nil))
|
||||
case reflect.Uint16:
|
||||
refSlice = reflect.ValueOf([]uint16(nil))
|
||||
case reflect.Uint32:
|
||||
refSlice = reflect.ValueOf([]uint32(nil))
|
||||
case reflect.Uint64:
|
||||
refSlice = reflect.ValueOf([]uint64(nil))
|
||||
case reflect.Int8:
|
||||
refSlice = reflect.ValueOf([]int8(nil))
|
||||
case reflect.Int16:
|
||||
refSlice = reflect.ValueOf([]int16(nil))
|
||||
case reflect.Int32:
|
||||
refSlice = reflect.ValueOf([]int32(nil))
|
||||
case reflect.Int64:
|
||||
refSlice = reflect.ValueOf([]int64(nil))
|
||||
default:
|
||||
refSlice = reflect.ValueOf([]*big.Int(nil))
|
||||
}
|
||||
case AddressTy: // address must be of slice Address
|
||||
refSlice = reflect.ValueOf([]common.Address(nil))
|
||||
case HashTy: // hash must be of slice hash
|
||||
refSlice = reflect.ValueOf([]common.Hash(nil))
|
||||
case FixedBytesTy:
|
||||
refSlice = reflect.ValueOf([][]byte(nil))
|
||||
default: // no other types are supported
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
|
||||
}
|
||||
|
||||
var slice []byte
|
||||
var size int
|
||||
var offset int
|
||||
if t.Type.IsSlice {
|
||||
// get the offset which determines the start of this array ...
|
||||
offset = int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
|
||||
slice = output[offset:]
|
||||
// ... starting with the size of the array in elements ...
|
||||
size = int(binary.BigEndian.Uint64(slice[24:32]))
|
||||
slice = slice[32:]
|
||||
// ... and make sure that we've at the very least the amount of bytes
|
||||
// available in the buffer.
|
||||
if size*32 > len(slice) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), offset+32+size*32)
|
||||
}
|
||||
|
||||
// reslice to match the required size
|
||||
slice = slice[:size*32]
|
||||
} else if t.Type.IsArray {
|
||||
//get the number of elements in the array
|
||||
size = t.Type.SliceSize
|
||||
|
||||
//check to make sure array size matches up
|
||||
if index+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), index+32*size)
|
||||
}
|
||||
//slice is there for a fixed amount of times
|
||||
slice = output[index : index+size*32]
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
var (
|
||||
inter interface{} // interface type
|
||||
returnOutput = slice[i*32 : i*32+32] // the return output
|
||||
err error
|
||||
)
|
||||
// set inter to the correct type (cast)
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
inter = readInteger(t.Type.Kind, returnOutput)
|
||||
case BoolTy:
|
||||
inter, err = readBool(returnOutput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case AddressTy:
|
||||
inter = common.BytesToAddress(returnOutput)
|
||||
case HashTy:
|
||||
inter = common.BytesToHash(returnOutput)
|
||||
case FixedBytesTy:
|
||||
inter = returnOutput
|
||||
}
|
||||
// append the item to our reflect slice
|
||||
refSlice = reflect.Append(refSlice, reflect.ValueOf(inter))
|
||||
}
|
||||
|
||||
// return the interface
|
||||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
// reads the integer based on its kind
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(b[len(b)-1])
|
||||
return b[len(b)-1]
|
||||
case reflect.Uint16:
|
||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||
case reflect.Uint32:
|
||||
@ -160,13 +49,10 @@ func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
// reads a bool
|
||||
func readBool(word []byte) (bool, error) {
|
||||
if len(word) != 32 {
|
||||
return false, fmt.Errorf("abi: fatal error: incorrect word length")
|
||||
}
|
||||
|
||||
for i, b := range word {
|
||||
if b != 0 && i != 31 {
|
||||
for _, b := range word[:31] {
|
||||
if b != 0 {
|
||||
return false, errBadBool
|
||||
}
|
||||
}
|
||||
@ -178,58 +64,133 @@ func readBool(word []byte) (bool, error) {
|
||||
default:
|
||||
return false, errBadBool
|
||||
}
|
||||
}
|
||||
|
||||
// A function type is simply the address with the function selection signature at the end.
|
||||
// This enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
|
||||
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
||||
if t.T != FunctionTy {
|
||||
return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array")
|
||||
}
|
||||
if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 {
|
||||
err = fmt.Errorf("abi: got improperly encoded function type, got %v", word)
|
||||
} else {
|
||||
copy(funcTy[:], word[0:24])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// through reflection, creates a fixed array to be read from
|
||||
func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
if t.T != FixedBytesTy {
|
||||
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
|
||||
}
|
||||
// convert
|
||||
array := reflect.New(t.Type).Elem()
|
||||
|
||||
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
|
||||
return array.Interface(), nil
|
||||
|
||||
}
|
||||
|
||||
// toGoType parses the input and casts it to the proper type defined by the ABI
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// we need to treat slices differently
|
||||
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy && t.Type.T != FunctionTy {
|
||||
return toGoSlice(i, t, output)
|
||||
// iteratively unpack elements
|
||||
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
||||
if start+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
||||
}
|
||||
|
||||
index := i * 32
|
||||
// this value will become our slice or our array, depending on the type
|
||||
var refSlice reflect.Value
|
||||
slice := output[start : start+size*32]
|
||||
|
||||
if t.T == SliceTy {
|
||||
// declare our slice
|
||||
refSlice = reflect.MakeSlice(t.Type, size, size)
|
||||
} else if t.T == ArrayTy {
|
||||
// declare our array
|
||||
refSlice = reflect.New(t.Type).Elem()
|
||||
} else {
|
||||
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
|
||||
}
|
||||
|
||||
for i, j := start, 0; j*32 < len(slice); i, j = i+32, j+1 {
|
||||
// this corrects the arrangement so that we get all the underlying array values
|
||||
if t.Elem.T == ArrayTy && j != 0 {
|
||||
i = start + t.Elem.Size*32*j
|
||||
}
|
||||
inter, err := toGoType(i, *t.Elem, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// append the item to our reflect slice
|
||||
refSlice.Index(j).Set(reflect.ValueOf(inter))
|
||||
}
|
||||
|
||||
// return the interface
|
||||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
// toGoType parses the output bytes and recursively assigns the value of these bytes
|
||||
// into a go type with accordance with the ABI spec.
|
||||
func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
|
||||
}
|
||||
|
||||
// Parse the given index output and check whether we need to read
|
||||
// a different offset and length based on the type (i.e. string, bytes)
|
||||
var returnOutput []byte
|
||||
switch t.Type.T {
|
||||
case StringTy, BytesTy: // variable arrays are written at the end of the return bytes
|
||||
// parse offset from which we should start reading
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
|
||||
}
|
||||
// parse the size up until we should be reading
|
||||
size := int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
|
||||
}
|
||||
var (
|
||||
returnOutput []byte
|
||||
begin, end int
|
||||
err error
|
||||
)
|
||||
|
||||
// get the bytes for this return value
|
||||
returnOutput = output[offset+32 : offset+32+size]
|
||||
default:
|
||||
// if we require a length prefix, find the beginning word and size returned.
|
||||
if t.requiresLengthPrefix() {
|
||||
begin, end, err = lengthPrefixPointsTo(index, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
returnOutput = output[index : index+32]
|
||||
}
|
||||
|
||||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
switch t.T {
|
||||
case SliceTy:
|
||||
return forEachUnpack(t, output, begin, end)
|
||||
case ArrayTy:
|
||||
return forEachUnpack(t, output, index, t.Size)
|
||||
case StringTy: // variable arrays are written at the end of the return bytes
|
||||
return string(output[begin : begin+end]), nil
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.Type.Kind, returnOutput), nil
|
||||
return readInteger(t.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return readBool(returnOutput)
|
||||
case AddressTy:
|
||||
return common.BytesToAddress(returnOutput), nil
|
||||
case HashTy:
|
||||
return common.BytesToHash(returnOutput), nil
|
||||
case BytesTy, FixedBytesTy, FunctionTy:
|
||||
return returnOutput, nil
|
||||
case StringTy:
|
||||
return string(returnOutput), nil
|
||||
case BytesTy:
|
||||
return output[begin : begin+end], nil
|
||||
case FixedBytesTy:
|
||||
return readFixedBytes(t, returnOutput)
|
||||
case FunctionTy:
|
||||
return readFunctionType(t, returnOutput)
|
||||
default:
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.T)
|
||||
}
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
|
||||
}
|
||||
|
||||
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
|
||||
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
length = int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+length > len(output) {
|
||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+length)
|
||||
}
|
||||
start = offset + 32
|
||||
|
||||
//fmt.Printf("LENGTH PREFIX INFO: \nsize: %v\noffset: %v\nstart: %v\n", length, offset, start)
|
||||
return
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@ -18,417 +18,420 @@ package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSimpleMethodUnpack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
def string // definition of the **output** ABI params
|
||||
marshalledOutput []byte // evm return data
|
||||
expectedOut interface{} // the expected output
|
||||
outVar string // the output variable (e.g. uint32, *big.Int, etc)
|
||||
err string // empty or error if expected
|
||||
}{
|
||||
{
|
||||
`[ { "type": "bool" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
bool(true),
|
||||
"bool",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
uint32(1),
|
||||
"uint32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
type unpackTest struct {
|
||||
def string // ABI definition JSON
|
||||
enc string // evm return data
|
||||
want interface{} // the expected output
|
||||
err string // empty or error if expected
|
||||
}
|
||||
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
int32(1),
|
||||
"int32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
func (test unpackTest) checkError(err error) error {
|
||||
if err != nil {
|
||||
if len(test.err) == 0 {
|
||||
return fmt.Errorf("expected no err but got: %v", err)
|
||||
} else if err.Error() != test.err {
|
||||
return fmt.Errorf("expected err: '%v' got err: %q", test.err, err)
|
||||
}
|
||||
} else if len(test.err) > 0 {
|
||||
return fmt.Errorf("expected err: %v but got none", test.err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
{
|
||||
`[ { "type": "address" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"),
|
||||
common.Address{1},
|
||||
"address",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"bytes",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"hash",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"interface",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "function" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
[24]byte{1},
|
||||
"function",
|
||||
"",
|
||||
},
|
||||
} {
|
||||
abiDefinition := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(abiDefinition))
|
||||
if err != nil {
|
||||
t.Errorf("%d failed. %v", i, err)
|
||||
continue
|
||||
}
|
||||
var unpackTests = []unpackTest{
|
||||
{
|
||||
def: `[{ "type": "bool" }]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: uint32(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: uint16(0),
|
||||
err: "abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint17"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: uint16(0),
|
||||
err: "abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint17"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: big.NewInt(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: int32(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: int16(0),
|
||||
err: "abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int17"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: int16(0),
|
||||
err: "abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int17"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: big.NewInt(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "address"}]`,
|
||||
enc: "0000000000000000000000000100000000000000000000000000000000000000",
|
||||
want: common.Address{1},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [32]byte{},
|
||||
err: "abi: cannot unmarshal []uint8 in to [32]uint8",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: []byte(nil),
|
||||
err: "abi: cannot unmarshal [32]uint8 in to []uint8",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: common.HexToHash("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "function"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [24]byte{1},
|
||||
},
|
||||
// slices
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint8{1, 2},
|
||||
},
|
||||
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000E0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [][]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][2]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2][2]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: [2][]uint8{{1}, {1}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [][2]uint8{{1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint16[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint16{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint16[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint16{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint32{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint32{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint64[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint64[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint64{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint256[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint256[3]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []int8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int8[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]int8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int16[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []int16{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int16[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]int16{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int32[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []int32{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int32[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]int32{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int64[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []int64{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int64[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]int64{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int256[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int256[3]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||
},
|
||||
// struct outputs
|
||||
{
|
||||
def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int","type":"int256"},{"name":"Int","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: multiple outputs mapping to the same struct field 'Int'",
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int","type":"int256"},{"name":"_int","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: multiple outputs mapping to the same struct field 'Int'",
|
||||
},
|
||||
{
|
||||
def: `[{"name":"Int","type":"int256"},{"name":"_int","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: multiple outputs mapping to the same struct field 'Int'",
|
||||
},
|
||||
{
|
||||
def: `[{"name":"Int","type":"int256"},{"name":"_","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: purely underscored output cannot unpack to struct",
|
||||
},
|
||||
}
|
||||
|
||||
var outvar interface{}
|
||||
switch test.outVar {
|
||||
case "bool":
|
||||
var v bool
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint8":
|
||||
var v uint8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint16":
|
||||
var v uint16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint32":
|
||||
var v uint32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint64":
|
||||
var v uint64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int8":
|
||||
var v int8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int16":
|
||||
var v int16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int32":
|
||||
var v int32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int64":
|
||||
var v int64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "*big.Int":
|
||||
var v *big.Int
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "address":
|
||||
var v common.Address
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "bytes":
|
||||
var v []byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "hash":
|
||||
var v common.Hash
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v.Bytes()[:]
|
||||
case "function":
|
||||
var v [24]byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "interface":
|
||||
err = abi.Unpack(&outvar, "method", test.marshalledOutput)
|
||||
default:
|
||||
t.Errorf("unsupported type '%v' please add it to the switch statement in this test", test.outVar)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if !reflect.DeepEqual(test.expectedOut, outvar) {
|
||||
t.Errorf("%d failed. Output error: expected %v, got %v", i, test.expectedOut, outvar)
|
||||
func TestUnpack(t *testing.T) {
|
||||
for i, test := range unpackTests {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(def))
|
||||
if err != nil {
|
||||
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||
}
|
||||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.want))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
if err := test.checkError(err); err != nil {
|
||||
t.Errorf("test %d (%v) failed: %v", i, test.def, err)
|
||||
return
|
||||
}
|
||||
out := outptr.Elem().Interface()
|
||||
if !reflect.DeepEqual(test.want, out) {
|
||||
t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.want, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
type methodMultiOutput struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceArrayOutput(t *testing.T) {
|
||||
var (
|
||||
var1 = new([1]uint32)
|
||||
var2 = new([1]uint32)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint32[1]"}, {"type":"uint32[1]"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if *var1 != [1]uint32{1} {
|
||||
t.Error("expected var1 to be [1], got", *var1)
|
||||
}
|
||||
if *var2 != [1]uint32{2} {
|
||||
t.Error("expected var2 to be [2], got", *var2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithStruct(t *testing.T) {
|
||||
func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOutput) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
var expected = methodMultiOutput{big.NewInt(1), "hello"}
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
require.NoError(err)
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
buff.Write(common.RightPadBytes([]byte(expected.String), 32))
|
||||
return abi, buff.Bytes(), expected
|
||||
}
|
||||
|
||||
var inter struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if inter.Int == nil || inter.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", inter.Int)
|
||||
}
|
||||
|
||||
if inter.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", inter.String)
|
||||
}
|
||||
|
||||
var reversed struct {
|
||||
func TestMethodMultiReturn(t *testing.T) {
|
||||
type reversed struct {
|
||||
String string
|
||||
Int *big.Int
|
||||
}
|
||||
|
||||
err = abi.Unpack(&reversed, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if reversed.Int == nil || reversed.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", reversed.Int)
|
||||
}
|
||||
|
||||
if reversed.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", reversed.String)
|
||||
abi, data, expected := methodMultiReturn(require.New(t))
|
||||
bigint := new(big.Int)
|
||||
var testCases = []struct {
|
||||
dest interface{}
|
||||
expected interface{}
|
||||
error string
|
||||
name string
|
||||
}{{
|
||||
&methodMultiOutput{},
|
||||
&expected,
|
||||
"",
|
||||
"Can unpack into structure",
|
||||
}, {
|
||||
&reversed{},
|
||||
&reversed{expected.String, expected.Int},
|
||||
"",
|
||||
"Can unpack into reversed structure",
|
||||
}, {
|
||||
&[]interface{}{&bigint, new(string)},
|
||||
&[]interface{}{&expected.Int, &expected.String},
|
||||
"",
|
||||
"Can unpack into a slice",
|
||||
}, {
|
||||
&[2]interface{}{&bigint, new(string)},
|
||||
&[2]interface{}{&expected.Int, &expected.String},
|
||||
"",
|
||||
"Can unpack into an array",
|
||||
}, {
|
||||
&[]interface{}{new(int), new(int)},
|
||||
&[]interface{}{&expected.Int, &expected.String},
|
||||
"abi: cannot unmarshal *big.Int in to int",
|
||||
"Can not unpack into a slice with wrong types",
|
||||
}, {
|
||||
&[]interface{}{new(int)},
|
||||
&[]interface{}{},
|
||||
"abi: insufficient number of elements in the list/array for unpack, want 2, got 1",
|
||||
"Can not unpack into a slice with wrong types",
|
||||
}}
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
err := abi.Unpack(tc.dest, "multi", data)
|
||||
if tc.error == "" {
|
||||
require.Nil(err, "Should be able to unpack method outputs.")
|
||||
require.Equal(tc.expected, tc.dest)
|
||||
} else {
|
||||
require.EqualError(err, tc.error)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithSlice(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
func TestMultiReturnWithArray(t *testing.T) {
|
||||
const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008"))
|
||||
|
||||
var inter []interface{}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(inter) != 2 {
|
||||
t.Fatal("expected 2 results got", len(inter))
|
||||
}
|
||||
|
||||
if num, ok := inter[0].(*big.Int); !ok || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected index 0 to be 1 got", num)
|
||||
}
|
||||
|
||||
if str, ok := inter[1].(string); !ok || str != stringOut {
|
||||
t.Error("expected index 1 to be", stringOut, "got", str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalArrays(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "bytes32", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "constant" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
ret1, ret1Exp := new([3]uint64), [3]uint64{9, 9, 9}
|
||||
ret2, ret2Exp := new(uint64), uint64(8)
|
||||
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
output := common.LeftPadBytes([]byte{1}, 32)
|
||||
|
||||
var bytes10 [10]byte
|
||||
err = abi.Unpack(&bytes10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||
t.Error("array result", *ret1, "!= Expected", ret1Exp)
|
||||
}
|
||||
|
||||
var bytes32 [32]byte
|
||||
err = abi.Unpack(&bytes32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(bytes32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
type (
|
||||
B10 [10]byte
|
||||
B32 [32]byte
|
||||
)
|
||||
|
||||
var b10 B10
|
||||
err = abi.Unpack(&b10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var b32 B32
|
||||
err = abi.Unpack(&b32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(b32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
output[10] = 1
|
||||
var shortAssignLong [32]byte
|
||||
err = abi.Unpack(&shortAssignLong, "bytes10", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(output, shortAssignLong[:]) {
|
||||
t.Errorf("expected %x to be %x", shortAssignLong, output)
|
||||
if *ret2 != ret2Exp {
|
||||
t.Error("int result", *ret2, "!= Expected", ret2Exp)
|
||||
}
|
||||
}
|
||||
|
||||
@ -450,6 +453,29 @@ func TestUnmarshal(t *testing.T) {
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
|
||||
// marshall mixed bytes (mixedBytes)
|
||||
p0, p0Exp := []byte{}, common.Hex2Bytes("01020000000000000000")
|
||||
p1, p1Exp := [32]byte{}, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000ddeeff")
|
||||
mixedBytes := []interface{}{&p0, &p1}
|
||||
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000ddeeff"))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000a"))
|
||||
buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000"))
|
||||
|
||||
err = abi.Unpack(&mixedBytes, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
if !bytes.Equal(p0, p0Exp) {
|
||||
t.Errorf("unexpected value unpacked: want %x, got %x", p0Exp, p0)
|
||||
}
|
||||
|
||||
if !bytes.Equal(p1[:], p1Exp) {
|
||||
t.Errorf("unexpected value unpacked: want %x, got %x", p1Exp, p1)
|
||||
}
|
||||
}
|
||||
|
||||
// marshal int
|
||||
var Int *big.Int
|
||||
err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
@ -473,6 +499,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
}
|
||||
|
||||
// marshal dynamic bytes max length 32
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut := common.RightPadBytes([]byte("hello"), 32)
|
||||
@ -504,11 +531,11 @@ func TestUnmarshal(t *testing.T) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 63
|
||||
// marshall dynamic bytes max length 64
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 63)
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
@ -516,8 +543,8 @@ func TestUnmarshal(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
if !bytes.Equal(Bytes, bytesOut[:len(bytesOut)-1]) {
|
||||
t.Errorf("expected %x got %x", bytesOut[:len(bytesOut)-1], Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes output empty
|
||||
@ -569,29 +596,6 @@ func TestUnmarshal(t *testing.T) {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal mixed bytes
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
fixed := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
buff.Write(fixed)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var out []interface{}
|
||||
err = abi.Unpack(&out, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytesOut, out[0].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", bytesOut, out[0])
|
||||
}
|
||||
|
||||
if !bytes.Equal(fixed, out[1].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", fixed, out[1])
|
||||
}
|
||||
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
|
@ -42,8 +42,9 @@ type Wallet interface {
|
||||
URL() URL
|
||||
|
||||
// Status returns a textual status to aid the user in the current state of the
|
||||
// wallet.
|
||||
Status() string
|
||||
// wallet. It also returns an error indicating any failure the wallet might have
|
||||
// encountered.
|
||||
Status() (string, error)
|
||||
|
||||
// Open initializes access to a wallet instance. It is not meant to unlock or
|
||||
// decrypt account keys, rather simply to establish a connection to hardware
|
||||
@ -147,9 +148,26 @@ type Backend interface {
|
||||
Subscribe(sink chan<- WalletEvent) event.Subscription
|
||||
}
|
||||
|
||||
// WalletEventType represents the different event types that can be fired by
|
||||
// the wallet subscription subsystem.
|
||||
type WalletEventType int
|
||||
|
||||
const (
|
||||
// WalletArrived is fired when a new wallet is detected either via USB or via
|
||||
// a filesystem event in the keystore.
|
||||
WalletArrived WalletEventType = iota
|
||||
|
||||
// WalletOpened is fired when a wallet is successfully opened with the purpose
|
||||
// of starting any background processes such as automatic key derivation.
|
||||
WalletOpened
|
||||
|
||||
// WalletDropped
|
||||
WalletDropped
|
||||
)
|
||||
|
||||
// WalletEvent is an event fired by an account backend when a wallet arrival or
|
||||
// departure is detected.
|
||||
type WalletEvent struct {
|
||||
Wallet Wallet // Wallet instance arrived or departed
|
||||
Arrive bool // Whether the wallet was added or removed
|
||||
Wallet Wallet // Wallet instance arrived or departed
|
||||
Kind WalletEventType // Event type that happened in the system
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func NewAuthNeededError(needed string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Error implements the standard error interfacel.
|
||||
// Error implements the standard error interface.
|
||||
func (err *AuthNeededError) Error() string {
|
||||
return fmt.Sprintf("authentication needed: %s", err.Needed)
|
||||
}
|
||||
|
@ -27,12 +27,17 @@ import (
|
||||
// DefaultRootDerivationPath is the root path to which custom derivation endpoints
|
||||
// are appended. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0}
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
|
||||
|
||||
// DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DerivationPath represents the computer friendly version of a hierarchical
|
||||
// deterministic wallet account derivaion path.
|
||||
|
@ -37,11 +37,11 @@ func TestHDPathParsing(t *testing.T) {
|
||||
{"m/2147483692/2147483708/2147483648/2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
|
||||
// Plain relative derivation paths
|
||||
{"0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
{"128", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 128}},
|
||||
{"0'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"128'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 128}},
|
||||
{"2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}},
|
||||
{"128", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 128}},
|
||||
{"0'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
{"128'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 128}},
|
||||
{"2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
|
||||
// Hexadecimal absolute derivation paths
|
||||
{"m/0x2C'/0x3c'/0x00'/0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
@ -52,11 +52,11 @@ func TestHDPathParsing(t *testing.T) {
|
||||
{"m/0x8000002C/0x8000003c/0x80000000/0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
|
||||
// Hexadecimal relative derivation paths
|
||||
{"0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
{"0x80", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 128}},
|
||||
{"0x00'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0x80'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 128}},
|
||||
{"0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}},
|
||||
{"0x80", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 128}},
|
||||
{"0x00'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
{"0x80'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 128}},
|
||||
{"0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
|
||||
// Weird inputs just to ensure they work
|
||||
{" m / 44 '\n/\n 60 \n\n\t' /\n0 ' /\t\t 0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@ -31,6 +30,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
||||
@ -71,6 +71,7 @@ type accountCache struct {
|
||||
byAddr map[common.Address][]accounts.Account
|
||||
throttle *time.Timer
|
||||
notify chan struct{}
|
||||
fileC fileCache
|
||||
}
|
||||
|
||||
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
@ -78,6 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
keydir: keydir,
|
||||
byAddr: make(map[common.Address][]accounts.Account),
|
||||
notify: make(chan struct{}, 1),
|
||||
fileC: fileCache{all: set.NewNonTS()},
|
||||
}
|
||||
ac.watcher = newWatcher(ac)
|
||||
return ac, ac.notify
|
||||
@ -127,6 +129,23 @@ func (ac *accountCache) delete(removed accounts.Account) {
|
||||
}
|
||||
}
|
||||
|
||||
// deleteByFile removes an account referenced by the given path.
|
||||
func (ac *accountCache) deleteByFile(path string) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Path >= path })
|
||||
|
||||
if i < len(ac.all) && ac.all[i].URL.Path == path {
|
||||
removed := ac.all[i]
|
||||
ac.all = append(ac.all[:i], ac.all[i+1:]...)
|
||||
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
|
||||
delete(ac.byAddr, removed.Address)
|
||||
} else {
|
||||
ac.byAddr[removed.Address] = ba
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
|
||||
for i := range slice {
|
||||
if slice[i] == elem {
|
||||
@ -167,15 +186,16 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
|
||||
default:
|
||||
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
|
||||
copy(err.Matches, matches)
|
||||
sort.Sort(accountsByURL(err.Matches))
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accountCache) maybeReload() {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
|
||||
if ac.watcher.running {
|
||||
ac.mu.Unlock()
|
||||
return // A watcher is running and will keep the cache up-to-date.
|
||||
}
|
||||
if ac.throttle == nil {
|
||||
@ -184,12 +204,15 @@ func (ac *accountCache) maybeReload() {
|
||||
select {
|
||||
case <-ac.throttle.C:
|
||||
default:
|
||||
ac.mu.Unlock()
|
||||
return // The cache was reloaded recently.
|
||||
}
|
||||
}
|
||||
// No watcher running, start it.
|
||||
ac.watcher.start()
|
||||
ac.reload()
|
||||
ac.throttle.Reset(minReloadInterval)
|
||||
ac.mu.Unlock()
|
||||
ac.scanAccounts()
|
||||
}
|
||||
|
||||
func (ac *accountCache) close() {
|
||||
@ -205,80 +228,71 @@ func (ac *accountCache) close() {
|
||||
ac.mu.Unlock()
|
||||
}
|
||||
|
||||
// reload caches addresses of existing accounts.
|
||||
// Callers must hold ac.mu.
|
||||
func (ac *accountCache) reload() {
|
||||
accounts, err := ac.scan()
|
||||
// scanAccounts checks if any changes have occurred on the filesystem, and
|
||||
// updates the account cache accordingly
|
||||
func (ac *accountCache) scanAccounts() error {
|
||||
// Scan the entire folder metadata for file changes
|
||||
creates, deletes, updates, err := ac.fileC.scan(ac.keydir)
|
||||
if err != nil {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
return err
|
||||
}
|
||||
ac.all = accounts
|
||||
sort.Sort(ac.all)
|
||||
for k := range ac.byAddr {
|
||||
delete(ac.byAddr, k)
|
||||
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, a := range accounts {
|
||||
ac.byAddr[a.Address] = append(ac.byAddr[a.Address], a)
|
||||
// Create a helper method to scan the contents of the key files
|
||||
var (
|
||||
buf = new(bufio.Reader)
|
||||
key struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
)
|
||||
readAccount := func(path string) *accounts.Account {
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Trace("Failed to open keystore file", "path", path, "err", err)
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
buf.Reset(fd)
|
||||
// Parse the address.
|
||||
key.Address = ""
|
||||
err = json.NewDecoder(buf).Decode(&key)
|
||||
addr := common.HexToAddress(key.Address)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Debug("Failed to decode keystore key", "path", path, "err", err)
|
||||
case (addr == common.Address{}):
|
||||
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
|
||||
default:
|
||||
return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Process all the file diffs
|
||||
start := time.Now()
|
||||
|
||||
for _, p := range creates.List() {
|
||||
if a := readAccount(p.(string)); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
for _, p := range deletes.List() {
|
||||
ac.deleteByFile(p.(string))
|
||||
}
|
||||
for _, p := range updates.List() {
|
||||
path := p.(string)
|
||||
ac.deleteByFile(path)
|
||||
if a := readAccount(path); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
end := time.Now()
|
||||
|
||||
select {
|
||||
case ac.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
log.Debug("Reloaded keystore contents", "accounts", len(ac.all))
|
||||
}
|
||||
|
||||
func (ac *accountCache) scan() ([]accounts.Account, error) {
|
||||
files, err := ioutil.ReadDir(ac.keydir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
buf = new(bufio.Reader)
|
||||
addrs []accounts.Account
|
||||
keyJSON struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
)
|
||||
for _, fi := range files {
|
||||
path := filepath.Join(ac.keydir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
logger := log.New("path", path)
|
||||
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
logger.Trace("Failed to open keystore file", "err", err)
|
||||
continue
|
||||
}
|
||||
buf.Reset(fd)
|
||||
// Parse the address.
|
||||
keyJSON.Address = ""
|
||||
err = json.NewDecoder(buf).Decode(&keyJSON)
|
||||
addr := common.HexToAddress(keyJSON.Address)
|
||||
switch {
|
||||
case err != nil:
|
||||
logger.Debug("Failed to decode keystore key", "err", err)
|
||||
case (addr == common.Address{}):
|
||||
logger.Debug("Failed to decode keystore key", "err", "missing or zero address")
|
||||
default:
|
||||
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
return addrs, err
|
||||
}
|
||||
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
}
|
||||
// Skip misc special files, directories (yes, symlinks too).
|
||||
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
log.Trace("Handled keystore changes", "time", end.Sub(start))
|
||||
return nil
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package keystore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -58,7 +59,7 @@ func TestWatchNewFile(t *testing.T) {
|
||||
|
||||
// Ensure the watcher is started before adding any files.
|
||||
ks.Accounts()
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
// Move in the files.
|
||||
wantAccounts := make([]accounts.Account, len(cachetestAccounts))
|
||||
@ -295,3 +296,111 @@ func TestCacheFind(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
||||
var list []accounts.Account
|
||||
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
|
||||
list = ks.Accounts()
|
||||
if reflect.DeepEqual(list, wantAccounts) {
|
||||
// ks should have also received change notifications
|
||||
select {
|
||||
case <-ks.changes:
|
||||
default:
|
||||
return fmt.Errorf("wasn't notified of new accounts")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
time.Sleep(d)
|
||||
}
|
||||
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
|
||||
}
|
||||
|
||||
// TestUpdatedKeyfileContents tests that updating the contents of a keystore file
|
||||
// is noticed by the watcher, and the account cache is updated accordingly
|
||||
func TestUpdatedKeyfileContents(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a temporary kesytore to test with
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
|
||||
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
||||
|
||||
list := ks.Accounts()
|
||||
if len(list) > 0 {
|
||||
t.Error("initial account list not empty:", list)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Create the directory and copy a key file into it.
|
||||
os.MkdirAll(dir, 0700)
|
||||
defer os.RemoveAll(dir)
|
||||
file := filepath.Join(dir, "aaa")
|
||||
|
||||
// Place one of our testfiles in there
|
||||
if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ks should see the account.
|
||||
wantAccounts := []accounts.Account{cachetestAccounts[0]}
|
||||
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
|
||||
if err := waitForAccounts(wantAccounts, ks); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
// Now replace file contents
|
||||
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
wantAccounts = []accounts.Account{cachetestAccounts[1]}
|
||||
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
|
||||
if err := waitForAccounts(wantAccounts, ks); err != nil {
|
||||
t.Errorf("First replacement failed")
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
// Now replace file contents again
|
||||
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
wantAccounts = []accounts.Account{cachetestAccounts[2]}
|
||||
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
|
||||
if err := waitForAccounts(wantAccounts, ks); err != nil {
|
||||
t.Errorf("Second replacement failed")
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// needed so that modTime of `file` is different to its current value after ioutil.WriteFile
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
// Now replace file contents with crap
|
||||
if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
if err := waitForAccounts([]accounts.Account{}, ks); err != nil {
|
||||
t.Errorf("Emptying account file failed")
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// forceCopyFile is like cp.CopyFile, but doesn't complain if the destination exists.
|
||||
func forceCopyFile(dst, src string) error {
|
||||
data, err := ioutil.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(dst, data, 0644)
|
||||
}
|
||||
|
102
accounts/keystore/file_cache.go
Normal file
102
accounts/keystore/file_cache.go
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package keystore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
set "gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// fileCache is a cache of files seen during scan of keystore.
|
||||
type fileCache struct {
|
||||
all *set.SetNonTS // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// scan performs a new scan on the given directory, compares against the already
|
||||
// cached filenames, and returns file sets: creates, deletes, updates.
|
||||
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||
t0 := time.Now()
|
||||
|
||||
// List all the failes from the keystore folder
|
||||
files, err := ioutil.ReadDir(keyDir)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
t1 := time.Now()
|
||||
|
||||
fc.mu.Lock()
|
||||
defer fc.mu.Unlock()
|
||||
|
||||
// Iterate all the files and gather their metadata
|
||||
all := set.NewNonTS()
|
||||
mods := set.NewNonTS()
|
||||
|
||||
var newLastMod time.Time
|
||||
for _, fi := range files {
|
||||
// Skip any non-key files from the folder
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
// Gather the set of all and fresly modified files
|
||||
all.Add(path)
|
||||
|
||||
modified := fi.ModTime()
|
||||
if modified.After(fc.lastMod) {
|
||||
mods.Add(path)
|
||||
}
|
||||
if modified.After(newLastMod) {
|
||||
newLastMod = modified
|
||||
}
|
||||
}
|
||||
t2 := time.Now()
|
||||
|
||||
// Update the tracked files and return the three sets
|
||||
deletes := set.Difference(fc.all, all) // Deletes = previous - current
|
||||
creates := set.Difference(all, fc.all) // Creates = current - previous
|
||||
updates := set.Difference(mods, creates) // Updates = modified - creates
|
||||
|
||||
fc.all, fc.lastMod = all, newLastMod
|
||||
t3 := time.Now()
|
||||
|
||||
// Report on the scanning stats and return
|
||||
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
|
||||
return creates, deletes, updates, nil
|
||||
}
|
||||
|
||||
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
}
|
||||
// Skip misc special files, directories (yes, symlinks too).
|
||||
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
@ -91,14 +91,6 @@ type cipherparamsJSON struct {
|
||||
IV string `json:"iv"`
|
||||
}
|
||||
|
||||
type scryptParamsJSON struct {
|
||||
N int `json:"n"`
|
||||
R int `json:"r"`
|
||||
P int `json:"p"`
|
||||
DkLen int `json:"dklen"`
|
||||
Salt string `json:"salt"`
|
||||
}
|
||||
|
||||
func (k *Key) MarshalJSON() (j []byte, err error) {
|
||||
jStruct := plainKeyJSON{
|
||||
hex.EncodeToString(k.Address[:]),
|
||||
|
@ -143,14 +143,14 @@ func (ks *KeyStore) refreshWallets() {
|
||||
for _, account := range accs {
|
||||
// Drop wallets while they were in front of the next account
|
||||
for len(ks.wallets) > 0 && ks.wallets[0].URL().Cmp(account.URL) < 0 {
|
||||
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Kind: accounts.WalletDropped})
|
||||
ks.wallets = ks.wallets[1:]
|
||||
}
|
||||
// If there are no more wallets or the account is before the next, wrap new wallet
|
||||
if len(ks.wallets) == 0 || ks.wallets[0].URL().Cmp(account.URL) > 0 {
|
||||
wallet := &keystoreWallet{account: account, keystore: ks}
|
||||
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||
wallets = append(wallets, wallet)
|
||||
continue
|
||||
}
|
||||
@ -163,7 +163,7 @@ func (ks *KeyStore) refreshWallets() {
|
||||
}
|
||||
// Drop any leftover wallets and set the new batch
|
||||
for _, wallet := range ks.wallets {
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
|
||||
}
|
||||
ks.wallets = wallets
|
||||
ks.mu.Unlock()
|
||||
|
@ -28,6 +28,7 @@ package keystore
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
crand "crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@ -90,6 +91,12 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
|
||||
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth)
|
||||
return a.Address, err
|
||||
}
|
||||
|
||||
func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error {
|
||||
keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP)
|
||||
if err != nil {
|
||||
@ -140,7 +147,7 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
Cipher: "aes-128-ctr",
|
||||
CipherText: hex.EncodeToString(cipherText),
|
||||
CipherParams: cipherParamsJSON,
|
||||
KDF: "scrypt",
|
||||
KDF: keyHeaderKDF,
|
||||
KDFParams: scryptParamsJSON,
|
||||
MAC: hex.EncodeToString(mac),
|
||||
}
|
||||
@ -275,7 +282,7 @@ func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
|
||||
}
|
||||
dkLen := ensureInt(cryptoJSON.KDFParams["dklen"])
|
||||
|
||||
if cryptoJSON.KDF == "scrypt" {
|
||||
if cryptoJSON.KDF == keyHeaderKDF {
|
||||
n := ensureInt(cryptoJSON.KDFParams["n"])
|
||||
r := ensureInt(cryptoJSON.KDFParams["r"])
|
||||
p := ensureInt(cryptoJSON.KDFParams["p"])
|
||||
|
@ -272,82 +272,104 @@ func TestWalletNotifierLifecycle(t *testing.T) {
|
||||
t.Errorf("wallet notifier didn't terminate after unsubscribe")
|
||||
}
|
||||
|
||||
type walletEvent struct {
|
||||
accounts.WalletEvent
|
||||
a accounts.Account
|
||||
}
|
||||
|
||||
// Tests that wallet notifications and correctly fired when accounts are added
|
||||
// or deleted from the keystore.
|
||||
func TestWalletNotifications(t *testing.T) {
|
||||
// Create a temporary kesytore to test with
|
||||
dir, ks := tmpKeyStore(t, false)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// Subscribe to the wallet feed
|
||||
updates := make(chan accounts.WalletEvent, 1)
|
||||
sub := ks.Subscribe(updates)
|
||||
// Subscribe to the wallet feed and collect events.
|
||||
var (
|
||||
events []walletEvent
|
||||
updates = make(chan accounts.WalletEvent)
|
||||
sub = ks.Subscribe(updates)
|
||||
)
|
||||
defer sub.Unsubscribe()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-updates:
|
||||
events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]})
|
||||
case <-sub.Err():
|
||||
close(updates)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Randomly add and remove account and make sure events and wallets are in sync
|
||||
live := make(map[common.Address]accounts.Account)
|
||||
// Randomly add and remove accounts.
|
||||
var (
|
||||
live = make(map[common.Address]accounts.Account)
|
||||
wantEvents []walletEvent
|
||||
)
|
||||
for i := 0; i < 1024; i++ {
|
||||
// Execute a creation or deletion and ensure event arrival
|
||||
if create := len(live) == 0 || rand.Int()%4 > 0; create {
|
||||
// Add a new account and ensure wallet notifications arrives
|
||||
account, err := ks.NewAccount("")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create test account: %v", err)
|
||||
}
|
||||
select {
|
||||
case event := <-updates:
|
||||
if !event.Arrive {
|
||||
t.Errorf("departure event on account creation")
|
||||
}
|
||||
if event.Wallet.Accounts()[0] != account {
|
||||
t.Errorf("account mismatch on created wallet: have %v, want %v", event.Wallet.Accounts()[0], account)
|
||||
}
|
||||
default:
|
||||
t.Errorf("wallet arrival event not fired on account creation")
|
||||
}
|
||||
live[account.Address] = account
|
||||
wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletArrived}, account})
|
||||
} else {
|
||||
// Select a random account to delete (crude, but works)
|
||||
// Delete a random account.
|
||||
var account accounts.Account
|
||||
for _, a := range live {
|
||||
account = a
|
||||
break
|
||||
}
|
||||
// Remove an account and ensure wallet notifiaction arrives
|
||||
if err := ks.Delete(account, ""); err != nil {
|
||||
t.Fatalf("failed to delete test account: %v", err)
|
||||
}
|
||||
select {
|
||||
case event := <-updates:
|
||||
if event.Arrive {
|
||||
t.Errorf("arrival event on account deletion")
|
||||
}
|
||||
if event.Wallet.Accounts()[0] != account {
|
||||
t.Errorf("account mismatch on deleted wallet: have %v, want %v", event.Wallet.Accounts()[0], account)
|
||||
}
|
||||
default:
|
||||
t.Errorf("wallet departure event not fired on account creation")
|
||||
}
|
||||
delete(live, account.Address)
|
||||
wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletDropped}, account})
|
||||
}
|
||||
// Retrieve the list of wallets and ensure it matches with our required live set
|
||||
liveList := make([]accounts.Account, 0, len(live))
|
||||
for _, account := range live {
|
||||
liveList = append(liveList, account)
|
||||
}
|
||||
sort.Sort(accountsByURL(liveList))
|
||||
}
|
||||
|
||||
wallets := ks.Wallets()
|
||||
if len(liveList) != len(wallets) {
|
||||
t.Errorf("wallet list doesn't match required accounts: have %v, want %v", wallets, liveList)
|
||||
} else {
|
||||
for j, wallet := range wallets {
|
||||
if accs := wallet.Accounts(); len(accs) != 1 {
|
||||
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
|
||||
} else if accs[0] != liveList[j] {
|
||||
t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j])
|
||||
}
|
||||
// Shut down the event collector and check events.
|
||||
sub.Unsubscribe()
|
||||
<-updates
|
||||
checkAccounts(t, live, ks.Wallets())
|
||||
checkEvents(t, wantEvents, events)
|
||||
}
|
||||
|
||||
// checkAccounts checks that all known live accounts are present in the wallet list.
|
||||
func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) {
|
||||
if len(live) != len(wallets) {
|
||||
t.Errorf("wallet list doesn't match required accounts: have %d, want %d", len(wallets), len(live))
|
||||
return
|
||||
}
|
||||
liveList := make([]accounts.Account, 0, len(live))
|
||||
for _, account := range live {
|
||||
liveList = append(liveList, account)
|
||||
}
|
||||
sort.Sort(accountsByURL(liveList))
|
||||
for j, wallet := range wallets {
|
||||
if accs := wallet.Accounts(); len(accs) != 1 {
|
||||
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
|
||||
} else if accs[0] != liveList[j] {
|
||||
t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkEvents checks that all events in 'want' are present in 'have'. Events may be present multiple times.
|
||||
func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
|
||||
for _, wantEv := range want {
|
||||
nmatch := 0
|
||||
for ; len(have) > 0; nmatch++ {
|
||||
if have[0].Kind != wantEv.Kind || have[0].a != wantEv.a {
|
||||
break
|
||||
}
|
||||
have = have[1:]
|
||||
}
|
||||
if nmatch == 0 {
|
||||
t.Fatalf("can't find event with Kind=%v for %x", wantEv.Kind, wantEv.a.Address)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,16 +36,16 @@ func (w *keystoreWallet) URL() accounts.URL {
|
||||
return w.account.URL
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always returning "open", since there is no
|
||||
// concept of open/close for plain keystore accounts.
|
||||
func (w *keystoreWallet) Status() string {
|
||||
// Status implements accounts.Wallet, returning whether the account held by the
|
||||
// keystore wallet is unlocked or not.
|
||||
func (w *keystoreWallet) Status() (string, error) {
|
||||
w.keystore.mu.RLock()
|
||||
defer w.keystore.mu.RUnlock()
|
||||
|
||||
if _, ok := w.keystore.unlocked[w.account.Address]; ok {
|
||||
return "Unlocked"
|
||||
return "Unlocked", nil
|
||||
}
|
||||
return "Locked"
|
||||
return "Locked", nil
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, but is a noop for plain wallets since there
|
||||
|
@ -58,6 +58,9 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid hex in encSeed")
|
||||
}
|
||||
if len(encSeedBytes) < 16 {
|
||||
return nil, errors.New("invalid encSeed, too short")
|
||||
}
|
||||
iv := encSeedBytes[:16]
|
||||
cipherText := encSeedBytes[16:]
|
||||
/*
|
||||
|
@ -70,7 +70,6 @@ func (w *watcher) loop() {
|
||||
return
|
||||
}
|
||||
defer notify.Stop(w.ev)
|
||||
|
||||
logger.Trace("Started watching keystore folder")
|
||||
defer logger.Trace("Stopped watching keystore folder")
|
||||
|
||||
@ -82,32 +81,28 @@ func (w *watcher) loop() {
|
||||
// When an event occurs, the reload call is delayed a bit so that
|
||||
// multiple events arriving quickly only cause a single reload.
|
||||
var (
|
||||
debounce = time.NewTimer(0)
|
||||
debounceDuration = 500 * time.Millisecond
|
||||
inCycle, hadEvent bool
|
||||
debounceDuration = 500 * time.Millisecond
|
||||
rescanTriggered = false
|
||||
debounce = time.NewTimer(0)
|
||||
)
|
||||
// Ignore initial trigger
|
||||
if !debounce.Stop() {
|
||||
<-debounce.C
|
||||
}
|
||||
defer debounce.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-w.quit:
|
||||
return
|
||||
case <-w.ev:
|
||||
if !inCycle {
|
||||
// Trigger the scan (with delay), if not already triggered
|
||||
if !rescanTriggered {
|
||||
debounce.Reset(debounceDuration)
|
||||
inCycle = true
|
||||
} else {
|
||||
hadEvent = true
|
||||
rescanTriggered = true
|
||||
}
|
||||
case <-debounce.C:
|
||||
w.ac.mu.Lock()
|
||||
w.ac.reload()
|
||||
w.ac.mu.Unlock()
|
||||
if hadEvent {
|
||||
debounce.Reset(debounceDuration)
|
||||
inCycle, hadEvent = true, false
|
||||
} else {
|
||||
inCycle, hadEvent = false, false
|
||||
}
|
||||
w.ac.scanAccounts()
|
||||
rescanTriggered = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -41,6 +41,11 @@ type Manager struct {
|
||||
// NewManager creates a generic account manager to sign transaction via various
|
||||
// supported backends.
|
||||
func NewManager(backends ...Backend) *Manager {
|
||||
// Retrieve the initial list of wallets from the backends and sort by URL
|
||||
var wallets []Wallet
|
||||
for _, backend := range backends {
|
||||
wallets = merge(wallets, backend.Wallets()...)
|
||||
}
|
||||
// Subscribe to wallet notifications from all backends
|
||||
updates := make(chan WalletEvent, 4*len(backends))
|
||||
|
||||
@ -48,11 +53,6 @@ func NewManager(backends ...Backend) *Manager {
|
||||
for i, backend := range backends {
|
||||
subs[i] = backend.Subscribe(updates)
|
||||
}
|
||||
// Retrieve the initial list of wallets from the backends and sort by URL
|
||||
var wallets []Wallet
|
||||
for _, backend := range backends {
|
||||
wallets = merge(wallets, backend.Wallets()...)
|
||||
}
|
||||
// Assemble the account manager and return
|
||||
am := &Manager{
|
||||
backends: make(map[reflect.Type][]Backend),
|
||||
@ -96,9 +96,10 @@ func (am *Manager) update() {
|
||||
case event := <-am.updates:
|
||||
// Wallet event arrived, update local cache
|
||||
am.lock.Lock()
|
||||
if event.Arrive {
|
||||
switch event.Kind {
|
||||
case WalletArrived:
|
||||
am.wallets = merge(am.wallets, event.Wallet)
|
||||
} else {
|
||||
case WalletDropped:
|
||||
am.wallets = drop(am.wallets, event.Wallet)
|
||||
}
|
||||
am.lock.Unlock()
|
||||
|
@ -14,10 +14,6 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
@ -33,24 +29,30 @@ import (
|
||||
)
|
||||
|
||||
// LedgerScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
var LedgerScheme = "ledger"
|
||||
const LedgerScheme = "ledger"
|
||||
|
||||
// ledgerDeviceIDs are the known device IDs that Ledger wallets use.
|
||||
var ledgerDeviceIDs = []deviceID{
|
||||
{Vendor: 0x2c97, Product: 0x0000}, // Ledger Blue
|
||||
{Vendor: 0x2c97, Product: 0x0001}, // Ledger Nano S
|
||||
}
|
||||
// TrezorScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
const TrezorScheme = "trezor"
|
||||
|
||||
// Maximum time between wallet refreshes (if USB hotplug notifications don't work).
|
||||
const ledgerRefreshCycle = time.Second
|
||||
// refreshCycle is the maximum time between wallet refreshes (if USB hotplug
|
||||
// notifications don't work).
|
||||
const refreshCycle = time.Second
|
||||
|
||||
// Minimum time between wallet refreshes to avoid USB trashing.
|
||||
const ledgerRefreshThrottling = 500 * time.Millisecond
|
||||
// refreshThrottling is the minimum time between wallet refreshes to avoid USB
|
||||
// trashing.
|
||||
const refreshThrottling = 500 * time.Millisecond
|
||||
|
||||
// Hub is a accounts.Backend that can find and handle generic USB hardware wallets.
|
||||
type Hub struct {
|
||||
scheme string // Protocol scheme prefixing account and wallet URLs.
|
||||
vendorID uint16 // USB vendor identifier used for device discovery
|
||||
productIDs []uint16 // USB product identifiers used for device discovery
|
||||
usageID uint16 // USB usage page identifier used for macOS device discovery
|
||||
endpointID int // USB endpoint identifier used for non-macOS device discovery
|
||||
makeDriver func(log.Logger) driver // Factory method to construct a vendor specific driver
|
||||
|
||||
// LedgerHub is a accounts.Backend that can find and handle Ledger hardware wallets.
|
||||
type LedgerHub struct {
|
||||
refreshed time.Time // Time instance when the list of wallets was last refreshed
|
||||
wallets []accounts.Wallet // List of Ledger devices currently tracking
|
||||
wallets []accounts.Wallet // List of USB wallet devices currently tracking
|
||||
updateFeed event.Feed // Event feed to notify wallet additions/removals
|
||||
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
|
||||
updating bool // Whether the event notification loop is running
|
||||
@ -65,20 +67,36 @@ type LedgerHub struct {
|
||||
}
|
||||
|
||||
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
|
||||
func NewLedgerHub() (*LedgerHub, error) {
|
||||
func NewLedgerHub() (*Hub, error) {
|
||||
return newHub(LedgerScheme, 0x2c97, []uint16{0x0000 /* Ledger Blue */, 0x0001 /* Ledger Nano S */}, 0xffa0, 0, newLedgerDriver)
|
||||
}
|
||||
|
||||
// NewTrezorHub creates a new hardware wallet manager for Trezor devices.
|
||||
func NewTrezorHub() (*Hub, error) {
|
||||
return newHub(TrezorScheme, 0x534c, []uint16{0x0001 /* Trezor 1 */}, 0xff00, 0, newTrezorDriver)
|
||||
}
|
||||
|
||||
// newHub creates a new hardware wallet manager for generic USB devices.
|
||||
func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) {
|
||||
if !hid.Supported() {
|
||||
return nil, errors.New("unsupported platform")
|
||||
}
|
||||
hub := &LedgerHub{
|
||||
quit: make(chan chan error),
|
||||
hub := &Hub{
|
||||
scheme: scheme,
|
||||
vendorID: vendorID,
|
||||
productIDs: productIDs,
|
||||
usageID: usageID,
|
||||
endpointID: endpointID,
|
||||
makeDriver: makeDriver,
|
||||
quit: make(chan chan error),
|
||||
}
|
||||
hub.refreshWallets()
|
||||
return hub, nil
|
||||
}
|
||||
|
||||
// Wallets implements accounts.Backend, returning all the currently tracked USB
|
||||
// devices that appear to be Ledger hardware wallets.
|
||||
func (hub *LedgerHub) Wallets() []accounts.Wallet {
|
||||
// devices that appear to be hardware wallets.
|
||||
func (hub *Hub) Wallets() []accounts.Wallet {
|
||||
// Make sure the list of wallets is up to date
|
||||
hub.refreshWallets()
|
||||
|
||||
@ -92,17 +110,17 @@ func (hub *LedgerHub) Wallets() []accounts.Wallet {
|
||||
|
||||
// refreshWallets scans the USB devices attached to the machine and updates the
|
||||
// list of wallets based on the found devices.
|
||||
func (hub *LedgerHub) refreshWallets() {
|
||||
func (hub *Hub) refreshWallets() {
|
||||
// Don't scan the USB like crazy it the user fetches wallets in a loop
|
||||
hub.stateLock.RLock()
|
||||
elapsed := time.Since(hub.refreshed)
|
||||
hub.stateLock.RUnlock()
|
||||
|
||||
if elapsed < ledgerRefreshThrottling {
|
||||
if elapsed < refreshThrottling {
|
||||
return
|
||||
}
|
||||
// Retrieve the current list of Ledger devices
|
||||
var ledgers []hid.DeviceInfo
|
||||
// Retrieve the current list of USB wallet devices
|
||||
var devices []hid.DeviceInfo
|
||||
|
||||
if runtime.GOOS == "linux" {
|
||||
// hidapi on Linux opens the device during enumeration to retrieve some infos,
|
||||
@ -117,10 +135,10 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, info := range hid.Enumerate(0, 0) { // Can't enumerate directly, one valid ID is the 0 wildcard
|
||||
for _, id := range ledgerDeviceIDs {
|
||||
if info.VendorID == id.Vendor && info.ProductID == id.Product {
|
||||
ledgers = append(ledgers, info)
|
||||
for _, info := range hid.Enumerate(hub.vendorID, 0) {
|
||||
for _, id := range hub.productIDs {
|
||||
if info.ProductID == id && (info.UsagePage == hub.usageID || info.Interface == hub.endpointID) {
|
||||
devices = append(devices, info)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -132,22 +150,29 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
// Transform the current list of wallets into the new one
|
||||
hub.stateLock.Lock()
|
||||
|
||||
wallets := make([]accounts.Wallet, 0, len(ledgers))
|
||||
wallets := make([]accounts.Wallet, 0, len(devices))
|
||||
events := []accounts.WalletEvent{}
|
||||
|
||||
for _, ledger := range ledgers {
|
||||
url := accounts.URL{Scheme: LedgerScheme, Path: ledger.Path}
|
||||
for _, device := range devices {
|
||||
url := accounts.URL{Scheme: hub.scheme, Path: device.Path}
|
||||
|
||||
// Drop wallets in front of the next device or those that failed for some reason
|
||||
for len(hub.wallets) > 0 && (hub.wallets[0].URL().Cmp(url) < 0 || hub.wallets[0].(*ledgerWallet).failed()) {
|
||||
events = append(events, accounts.WalletEvent{Wallet: hub.wallets[0], Arrive: false})
|
||||
for len(hub.wallets) > 0 {
|
||||
// Abort if we're past the current device and found an operational one
|
||||
_, failure := hub.wallets[0].Status()
|
||||
if hub.wallets[0].URL().Cmp(url) >= 0 || failure == nil {
|
||||
break
|
||||
}
|
||||
// Drop the stale and failed devices
|
||||
events = append(events, accounts.WalletEvent{Wallet: hub.wallets[0], Kind: accounts.WalletDropped})
|
||||
hub.wallets = hub.wallets[1:]
|
||||
}
|
||||
// If there are no more wallets or the device is before the next, wrap new wallet
|
||||
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
|
||||
wallet := &ledgerWallet{hub: hub, url: &url, info: ledger, log: log.New("url", url)}
|
||||
logger := log.New("url", url)
|
||||
wallet := &wallet{hub: hub, driver: hub.makeDriver(logger), url: &url, info: device, log: logger}
|
||||
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||
wallets = append(wallets, wallet)
|
||||
continue
|
||||
}
|
||||
@ -160,7 +185,7 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
}
|
||||
// Drop any leftover wallets and set the new batch
|
||||
for _, wallet := range hub.wallets {
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
|
||||
}
|
||||
hub.refreshed = time.Now()
|
||||
hub.wallets = wallets
|
||||
@ -173,8 +198,8 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
}
|
||||
|
||||
// Subscribe implements accounts.Backend, creating an async subscription to
|
||||
// receive notifications on the addition or removal of Ledger wallets.
|
||||
func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
|
||||
// receive notifications on the addition or removal of USB wallets.
|
||||
func (hub *Hub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
|
||||
// We need the mutex to reliably start/stop the update loop
|
||||
hub.stateLock.Lock()
|
||||
defer hub.stateLock.Unlock()
|
||||
@ -190,18 +215,14 @@ func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscrip
|
||||
return sub
|
||||
}
|
||||
|
||||
// updater is responsible for maintaining an up-to-date list of wallets stored in
|
||||
// the keystore, and for firing wallet addition/removal events. It listens for
|
||||
// account change events from the underlying account cache, and also periodically
|
||||
// forces a manual refresh (only triggers for systems where the filesystem notifier
|
||||
// is not running).
|
||||
func (hub *LedgerHub) updater() {
|
||||
// updater is responsible for maintaining an up-to-date list of wallets managed
|
||||
// by the USB hub, and for firing wallet addition/removal events.
|
||||
func (hub *Hub) updater() {
|
||||
for {
|
||||
// Wait for a USB hotplug event (not supported yet) or a refresh timeout
|
||||
select {
|
||||
//case <-hub.changes: // reenable on hutplug implementation
|
||||
case <-time.After(ledgerRefreshCycle):
|
||||
}
|
||||
// TODO: Wait for a USB hotplug event (not supported yet) or a refresh timeout
|
||||
// <-hub.changes
|
||||
time.Sleep(refreshCycle)
|
||||
|
||||
// Run the wallet refresher
|
||||
hub.refreshWallets()
|
||||
|
3081
accounts/usbwallet/internal/trezor/messages.pb.go
Normal file
3081
accounts/usbwallet/internal/trezor/messages.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
905
accounts/usbwallet/internal/trezor/messages.proto
Normal file
905
accounts/usbwallet/internal/trezor/messages.proto
Normal file
@ -0,0 +1,905 @@
|
||||
// This file originates from the SatoshiLabs Trezor `common` repository at:
|
||||
// https://github.com/trezor/trezor-common/blob/master/protob/messages.proto
|
||||
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
/**
|
||||
* Messages for TREZOR communication
|
||||
*/
|
||||
|
||||
// Sugar for easier handling in Java
|
||||
option java_package = "com.satoshilabs.trezor.lib.protobuf";
|
||||
option java_outer_classname = "TrezorMessage";
|
||||
|
||||
import "types.proto";
|
||||
|
||||
/**
|
||||
* Mapping between Trezor wire identifier (uint) and a protobuf message
|
||||
*/
|
||||
enum MessageType {
|
||||
MessageType_Initialize = 0 [(wire_in) = true];
|
||||
MessageType_Ping = 1 [(wire_in) = true];
|
||||
MessageType_Success = 2 [(wire_out) = true];
|
||||
MessageType_Failure = 3 [(wire_out) = true];
|
||||
MessageType_ChangePin = 4 [(wire_in) = true];
|
||||
MessageType_WipeDevice = 5 [(wire_in) = true];
|
||||
MessageType_FirmwareErase = 6 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_FirmwareUpload = 7 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_FirmwareRequest = 8 [(wire_out) = true, (wire_bootloader) = true];
|
||||
MessageType_GetEntropy = 9 [(wire_in) = true];
|
||||
MessageType_Entropy = 10 [(wire_out) = true];
|
||||
MessageType_GetPublicKey = 11 [(wire_in) = true];
|
||||
MessageType_PublicKey = 12 [(wire_out) = true];
|
||||
MessageType_LoadDevice = 13 [(wire_in) = true];
|
||||
MessageType_ResetDevice = 14 [(wire_in) = true];
|
||||
MessageType_SignTx = 15 [(wire_in) = true];
|
||||
MessageType_SimpleSignTx = 16 [(wire_in) = true, deprecated = true];
|
||||
MessageType_Features = 17 [(wire_out) = true];
|
||||
MessageType_PinMatrixRequest = 18 [(wire_out) = true];
|
||||
MessageType_PinMatrixAck = 19 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_Cancel = 20 [(wire_in) = true];
|
||||
MessageType_TxRequest = 21 [(wire_out) = true];
|
||||
MessageType_TxAck = 22 [(wire_in) = true];
|
||||
MessageType_CipherKeyValue = 23 [(wire_in) = true];
|
||||
MessageType_ClearSession = 24 [(wire_in) = true];
|
||||
MessageType_ApplySettings = 25 [(wire_in) = true];
|
||||
MessageType_ButtonRequest = 26 [(wire_out) = true];
|
||||
MessageType_ButtonAck = 27 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_ApplyFlags = 28 [(wire_in) = true];
|
||||
MessageType_GetAddress = 29 [(wire_in) = true];
|
||||
MessageType_Address = 30 [(wire_out) = true];
|
||||
MessageType_SelfTest = 32 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_BackupDevice = 34 [(wire_in) = true];
|
||||
MessageType_EntropyRequest = 35 [(wire_out) = true];
|
||||
MessageType_EntropyAck = 36 [(wire_in) = true];
|
||||
MessageType_SignMessage = 38 [(wire_in) = true];
|
||||
MessageType_VerifyMessage = 39 [(wire_in) = true];
|
||||
MessageType_MessageSignature = 40 [(wire_out) = true];
|
||||
MessageType_PassphraseRequest = 41 [(wire_out) = true];
|
||||
MessageType_PassphraseAck = 42 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_EstimateTxSize = 43 [(wire_in) = true, deprecated = true];
|
||||
MessageType_TxSize = 44 [(wire_out) = true, deprecated = true];
|
||||
MessageType_RecoveryDevice = 45 [(wire_in) = true];
|
||||
MessageType_WordRequest = 46 [(wire_out) = true];
|
||||
MessageType_WordAck = 47 [(wire_in) = true];
|
||||
MessageType_CipheredKeyValue = 48 [(wire_out) = true];
|
||||
MessageType_EncryptMessage = 49 [(wire_in) = true, deprecated = true];
|
||||
MessageType_EncryptedMessage = 50 [(wire_out) = true, deprecated = true];
|
||||
MessageType_DecryptMessage = 51 [(wire_in) = true, deprecated = true];
|
||||
MessageType_DecryptedMessage = 52 [(wire_out) = true, deprecated = true];
|
||||
MessageType_SignIdentity = 53 [(wire_in) = true];
|
||||
MessageType_SignedIdentity = 54 [(wire_out) = true];
|
||||
MessageType_GetFeatures = 55 [(wire_in) = true];
|
||||
MessageType_EthereumGetAddress = 56 [(wire_in) = true];
|
||||
MessageType_EthereumAddress = 57 [(wire_out) = true];
|
||||
MessageType_EthereumSignTx = 58 [(wire_in) = true];
|
||||
MessageType_EthereumTxRequest = 59 [(wire_out) = true];
|
||||
MessageType_EthereumTxAck = 60 [(wire_in) = true];
|
||||
MessageType_GetECDHSessionKey = 61 [(wire_in) = true];
|
||||
MessageType_ECDHSessionKey = 62 [(wire_out) = true];
|
||||
MessageType_SetU2FCounter = 63 [(wire_in) = true];
|
||||
MessageType_EthereumSignMessage = 64 [(wire_in) = true];
|
||||
MessageType_EthereumVerifyMessage = 65 [(wire_in) = true];
|
||||
MessageType_EthereumMessageSignature = 66 [(wire_out) = true];
|
||||
MessageType_DebugLinkDecision = 100 [(wire_debug_in) = true, (wire_tiny) = true];
|
||||
MessageType_DebugLinkGetState = 101 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkState = 102 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkStop = 103 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkLog = 104 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkMemoryRead = 110 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkMemory = 111 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkMemoryWrite = 112 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkFlashErase = 113 [(wire_debug_in) = true];
|
||||
}
|
||||
|
||||
////////////////////
|
||||
// Basic messages //
|
||||
////////////////////
|
||||
|
||||
/**
|
||||
* Request: Reset device to default state and ask for device details
|
||||
* @next Features
|
||||
*/
|
||||
message Initialize {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask for device details (no device reset)
|
||||
* @next Features
|
||||
*/
|
||||
message GetFeatures {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Reports various information about the device
|
||||
* @prev Initialize
|
||||
* @prev GetFeatures
|
||||
*/
|
||||
message Features {
|
||||
optional string vendor = 1; // name of the manufacturer, e.g. "bitcointrezor.com"
|
||||
optional uint32 major_version = 2; // major version of the device, e.g. 1
|
||||
optional uint32 minor_version = 3; // minor version of the device, e.g. 0
|
||||
optional uint32 patch_version = 4; // patch version of the device, e.g. 0
|
||||
optional bool bootloader_mode = 5; // is device in bootloader mode?
|
||||
optional string device_id = 6; // device's unique identifier
|
||||
optional bool pin_protection = 7; // is device protected by PIN?
|
||||
optional bool passphrase_protection = 8; // is node/mnemonic encrypted using passphrase?
|
||||
optional string language = 9; // device language
|
||||
optional string label = 10; // device description label
|
||||
repeated CoinType coins = 11; // supported coins
|
||||
optional bool initialized = 12; // does device contain seed?
|
||||
optional bytes revision = 13; // SCM revision of firmware
|
||||
optional bytes bootloader_hash = 14; // hash of the bootloader
|
||||
optional bool imported = 15; // was storage imported from an external source?
|
||||
optional bool pin_cached = 16; // is PIN already cached in session?
|
||||
optional bool passphrase_cached = 17; // is passphrase already cached in session?
|
||||
optional bool firmware_present = 18; // is valid firmware loaded?
|
||||
optional bool needs_backup = 19; // does storage need backup? (equals to Storage.needs_backup)
|
||||
optional uint32 flags = 20; // device flags (equals to Storage.flags)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: clear session (removes cached PIN, passphrase, etc).
|
||||
* @next Success
|
||||
*/
|
||||
message ClearSession {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: change language and/or label of the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
* @next ButtonRequest
|
||||
* @next PinMatrixRequest
|
||||
*/
|
||||
message ApplySettings {
|
||||
optional string language = 1;
|
||||
optional string label = 2;
|
||||
optional bool use_passphrase = 3;
|
||||
optional bytes homescreen = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: set flags of the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message ApplyFlags {
|
||||
optional uint32 flags = 1; // bitmask, can only set bits, not unset
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Starts workflow for setting/changing/removing the PIN
|
||||
* @next ButtonRequest
|
||||
* @next PinMatrixRequest
|
||||
*/
|
||||
message ChangePin {
|
||||
optional bool remove = 1; // is PIN removal requested?
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Test if the device is alive, device sends back the message in Success response
|
||||
* @next Success
|
||||
*/
|
||||
message Ping {
|
||||
optional string message = 1; // message to send back in Success message
|
||||
optional bool button_protection = 2; // ask for button press
|
||||
optional bool pin_protection = 3; // ask for PIN if set in device
|
||||
optional bool passphrase_protection = 4; // ask for passphrase if set in device
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Success of the previous request
|
||||
*/
|
||||
message Success {
|
||||
optional string message = 1; // human readable description of action or request-specific payload
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Failure of the previous request
|
||||
*/
|
||||
message Failure {
|
||||
optional FailureType code = 1; // computer-readable definition of the error state
|
||||
optional string message = 2; // human-readable message of the error state
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is waiting for HW button press.
|
||||
* @next ButtonAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message ButtonRequest {
|
||||
optional ButtonRequestType code = 1;
|
||||
optional string data = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer agrees to wait for HW button press
|
||||
* @prev ButtonRequest
|
||||
*/
|
||||
message ButtonAck {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
|
||||
* @next PinMatrixAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message PinMatrixRequest {
|
||||
optional PinMatrixRequestType type = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer responds with encoded PIN
|
||||
* @prev PinMatrixRequest
|
||||
*/
|
||||
message PinMatrixAck {
|
||||
required string pin = 1; // matrix encoded PIN entered by user
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Abort last operation that required user interaction
|
||||
* @prev ButtonRequest
|
||||
* @prev PinMatrixRequest
|
||||
* @prev PassphraseRequest
|
||||
*/
|
||||
message Cancel {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device awaits encryption passphrase
|
||||
* @next PassphraseAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message PassphraseRequest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Send passphrase back
|
||||
* @prev PassphraseRequest
|
||||
*/
|
||||
message PassphraseAck {
|
||||
required string passphrase = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Request a sample of random data generated by hardware RNG. May be used for testing.
|
||||
* @next ButtonRequest
|
||||
* @next Entropy
|
||||
* @next Failure
|
||||
*/
|
||||
message GetEntropy {
|
||||
required uint32 size = 1; // size of requested entropy
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Reply with random data generated by internal RNG
|
||||
* @prev GetEntropy
|
||||
*/
|
||||
message Entropy {
|
||||
required bytes entropy = 1; // stream of random generated bytes
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for public key corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next PublicKey
|
||||
* @next Failure
|
||||
*/
|
||||
message GetPublicKey {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string ecdsa_curve_name = 2; // ECDSA curve name to use
|
||||
optional bool show_display = 3; // optionally show on display before sending the result
|
||||
optional string coin_name = 4 [default='Bitcoin'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains public key derived from device private seed
|
||||
* @prev GetPublicKey
|
||||
*/
|
||||
message PublicKey {
|
||||
required HDNodeType node = 1; // BIP32 public node
|
||||
optional string xpub = 2; // serialized form of public node
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for address corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next Address
|
||||
* @next Failure
|
||||
*/
|
||||
message GetAddress {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string coin_name = 2 [default='Bitcoin'];
|
||||
optional bool show_display = 3 ; // optionally show on display before sending the result
|
||||
optional MultisigRedeemScriptType multisig = 4; // filled if we are showing a multisig address
|
||||
optional InputScriptType script_type = 5 [default=SPENDADDRESS]; // used to distinguish between various address formats (non-segwit, segwit, etc.)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for Ethereum address corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next EthereumAddress
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumGetAddress {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional bool show_display = 2; // optionally show on display before sending the result
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains address derived from device private seed
|
||||
* @prev GetAddress
|
||||
*/
|
||||
message Address {
|
||||
required string address = 1; // Coin address in Base58 encoding
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains an Ethereum address derived from device private seed
|
||||
* @prev EthereumGetAddress
|
||||
*/
|
||||
message EthereumAddress {
|
||||
required bytes address = 1; // Coin address as an Ethereum 160 bit hash
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Request device to wipe all sensitive data and settings
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message WipeDevice {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Load seed and related internal settings from the computer
|
||||
* @next ButtonRequest
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message LoadDevice {
|
||||
optional string mnemonic = 1; // seed encoded as BIP-39 mnemonic (12, 18 or 24 words)
|
||||
optional HDNodeType node = 2; // BIP-32 node
|
||||
optional string pin = 3; // set PIN protection
|
||||
optional bool passphrase_protection = 4; // enable master node encryption using passphrase
|
||||
optional string language = 5 [default='english']; // device language
|
||||
optional string label = 6; // device label
|
||||
optional bool skip_checksum = 7; // do not test mnemonic for valid BIP-39 checksum
|
||||
optional uint32 u2f_counter = 8; // U2F counter
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to do initialization involving user interaction
|
||||
* @next EntropyRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message ResetDevice {
|
||||
optional bool display_random = 1; // display entropy generated by the device before asking for additional entropy
|
||||
optional uint32 strength = 2 [default=256]; // strength of seed in bits
|
||||
optional bool passphrase_protection = 3; // enable master node encryption using passphrase
|
||||
optional bool pin_protection = 4; // enable PIN protection
|
||||
optional string language = 5 [default='english']; // device language
|
||||
optional string label = 6; // device label
|
||||
optional uint32 u2f_counter = 7; // U2F counter
|
||||
optional bool skip_backup = 8; // postpone seed backup to BackupDevice workflow
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Perform backup of the device seed if not backed up using ResetDevice
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message BackupDevice {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Ask for additional entropy from host computer
|
||||
* @prev ResetDevice
|
||||
* @next EntropyAck
|
||||
*/
|
||||
message EntropyRequest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Provide additional entropy for seed generation function
|
||||
* @prev EntropyRequest
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message EntropyAck {
|
||||
optional bytes entropy = 1; // 256 bits (32 bytes) of random data
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Start recovery workflow asking user for specific words of mnemonic
|
||||
* Used to recovery device safely even on untrusted computer.
|
||||
* @next WordRequest
|
||||
*/
|
||||
message RecoveryDevice {
|
||||
optional uint32 word_count = 1; // number of words in BIP-39 mnemonic
|
||||
optional bool passphrase_protection = 2; // enable master node encryption using passphrase
|
||||
optional bool pin_protection = 3; // enable PIN protection
|
||||
optional string language = 4 [default='english']; // device language
|
||||
optional string label = 5; // device label
|
||||
optional bool enforce_wordlist = 6; // enforce BIP-39 wordlist during the process
|
||||
// 7 reserved for unused recovery method
|
||||
optional uint32 type = 8; // supported recovery type (see RecoveryType)
|
||||
optional uint32 u2f_counter = 9; // U2F counter
|
||||
optional bool dry_run = 10; // perform dry-run recovery workflow (for safe mnemonic validation)
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is waiting for user to enter word of the mnemonic
|
||||
* Its position is shown only on device's internal display.
|
||||
* @prev RecoveryDevice
|
||||
* @prev WordAck
|
||||
*/
|
||||
message WordRequest {
|
||||
optional WordRequestType type = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer replies with word from the mnemonic
|
||||
* @prev WordRequest
|
||||
* @next WordRequest
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message WordAck {
|
||||
required string word = 1; // one word of mnemonic on asked position
|
||||
}
|
||||
|
||||
//////////////////////////////
|
||||
// Message signing messages //
|
||||
//////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign message
|
||||
* @next MessageSignature
|
||||
* @next Failure
|
||||
*/
|
||||
message SignMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes message = 2; // message to be signed
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use for signing
|
||||
optional InputScriptType script_type = 4 [default=SPENDADDRESS]; // used to distinguish between various address formats (non-segwit, segwit, etc.)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to verify message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message VerifyMessage {
|
||||
optional string address = 1; // address to verify
|
||||
optional bytes signature = 2; // signature to verify
|
||||
optional bytes message = 3; // message to verify
|
||||
optional string coin_name = 4 [default='Bitcoin']; // coin to use for verifying
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Signed message
|
||||
* @prev SignMessage
|
||||
*/
|
||||
message MessageSignature {
|
||||
optional string address = 1; // address used to sign the message
|
||||
optional bytes signature = 2; // signature of the message
|
||||
}
|
||||
|
||||
///////////////////////////
|
||||
// Encryption/decryption //
|
||||
///////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to encrypt message
|
||||
* @next EncryptedMessage
|
||||
* @next Failure
|
||||
*/
|
||||
message EncryptMessage {
|
||||
optional bytes pubkey = 1; // public key
|
||||
optional bytes message = 2; // message to encrypt
|
||||
optional bool display_only = 3; // show just on display? (don't send back via wire)
|
||||
repeated uint32 address_n = 4; // BIP-32 path to derive the signing key from master node
|
||||
optional string coin_name = 5 [default='Bitcoin']; // coin to use for signing
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Encrypted message
|
||||
* @prev EncryptMessage
|
||||
*/
|
||||
message EncryptedMessage {
|
||||
optional bytes nonce = 1; // nonce used during encryption
|
||||
optional bytes message = 2; // encrypted message
|
||||
optional bytes hmac = 3; // message hmac
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to decrypt message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message DecryptMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the decryption key from master node
|
||||
optional bytes nonce = 2; // nonce used during encryption
|
||||
optional bytes message = 3; // message to decrypt
|
||||
optional bytes hmac = 4; // message hmac
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Decrypted message
|
||||
* @prev DecryptedMessage
|
||||
*/
|
||||
message DecryptedMessage {
|
||||
optional bytes message = 1; // decrypted message
|
||||
optional string address = 2; // address used to sign the message (if used)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to encrypt or decrypt value of given key
|
||||
* @next CipheredKeyValue
|
||||
* @next Failure
|
||||
*/
|
||||
message CipherKeyValue {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string key = 2; // key component of key:value
|
||||
optional bytes value = 3; // value component of key:value
|
||||
optional bool encrypt = 4; // are we encrypting (True) or decrypting (False)?
|
||||
optional bool ask_on_encrypt = 5; // should we ask on encrypt operation?
|
||||
optional bool ask_on_decrypt = 6; // should we ask on decrypt operation?
|
||||
optional bytes iv = 7; // initialization vector (will be computed if not set)
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Return ciphered/deciphered value
|
||||
* @prev CipherKeyValue
|
||||
*/
|
||||
message CipheredKeyValue {
|
||||
optional bytes value = 1; // ciphered/deciphered value
|
||||
}
|
||||
|
||||
//////////////////////////////////
|
||||
// Transaction signing messages //
|
||||
//////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Estimated size of the transaction
|
||||
* This behaves exactly like SignTx, which means that it can ask using TxRequest
|
||||
* This call is non-blocking (except possible PassphraseRequest to unlock the seed)
|
||||
* @next TxSize
|
||||
* @next Failure
|
||||
*/
|
||||
message EstimateTxSize {
|
||||
required uint32 outputs_count = 1; // number of transaction outputs
|
||||
required uint32 inputs_count = 2; // number of transaction inputs
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Estimated size of the transaction
|
||||
* @prev EstimateTxSize
|
||||
*/
|
||||
message TxSize {
|
||||
optional uint32 tx_size = 1; // estimated size of transaction in bytes
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign transaction
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next TxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message SignTx {
|
||||
required uint32 outputs_count = 1; // number of transaction outputs
|
||||
required uint32 inputs_count = 2; // number of transaction inputs
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use
|
||||
optional uint32 version = 4 [default=1]; // transaction version
|
||||
optional uint32 lock_time = 5 [default=0]; // transaction lock_time
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Simplified transaction signing
|
||||
* This method doesn't support streaming, so there are hardware limits in number of inputs and outputs.
|
||||
* In case of success, the result is returned using TxRequest message.
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next TxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message SimpleSignTx {
|
||||
repeated TxInputType inputs = 1; // transaction inputs
|
||||
repeated TxOutputType outputs = 2; // transaction outputs
|
||||
repeated TransactionType transactions = 3; // transactions whose outputs are used to build current inputs
|
||||
optional string coin_name = 4 [default='Bitcoin']; // coin to use
|
||||
optional uint32 version = 5 [default=1]; // transaction version
|
||||
optional uint32 lock_time = 6 [default=0]; // transaction lock_time
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device asks for information for signing transaction or returns the last result
|
||||
* If request_index is set, device awaits TxAck message (with fields filled in according to request_type)
|
||||
* If signature_index is set, 'signature' contains signed input of signature_index's input
|
||||
* @prev SignTx
|
||||
* @prev SimpleSignTx
|
||||
* @prev TxAck
|
||||
*/
|
||||
message TxRequest {
|
||||
optional RequestType request_type = 1; // what should be filled in TxAck message?
|
||||
optional TxRequestDetailsType details = 2; // request for tx details
|
||||
optional TxRequestSerializedType serialized = 3; // serialized data and request for next
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Reported transaction data
|
||||
* @prev TxRequest
|
||||
* @next TxRequest
|
||||
*/
|
||||
message TxAck {
|
||||
optional TransactionType tx = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign transaction
|
||||
* All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
|
||||
* Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next EthereumTxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumSignTx {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional bytes nonce = 2; // <=256 bit unsigned big endian
|
||||
optional bytes gas_price = 3; // <=256 bit unsigned big endian (in wei)
|
||||
optional bytes gas_limit = 4; // <=256 bit unsigned big endian
|
||||
optional bytes to = 5; // 160 bit address hash
|
||||
optional bytes value = 6; // <=256 bit unsigned big endian (in wei)
|
||||
optional bytes data_initial_chunk = 7; // The initial data chunk (<= 1024 bytes)
|
||||
optional uint32 data_length = 8; // Length of transaction payload
|
||||
optional uint32 chain_id = 9; // Chain Id for EIP 155
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device asks for more data from transaction payload, or returns the signature.
|
||||
* If data_length is set, device awaits that many more bytes of payload.
|
||||
* Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
|
||||
* @prev EthereumSignTx
|
||||
* @next EthereumTxAck
|
||||
*/
|
||||
message EthereumTxRequest {
|
||||
optional uint32 data_length = 1; // Number of bytes being requested (<= 1024)
|
||||
optional uint32 signature_v = 2; // Computed signature (recovery parameter, limited to 27 or 28)
|
||||
optional bytes signature_r = 3; // Computed signature R component (256 bit)
|
||||
optional bytes signature_s = 4; // Computed signature S component (256 bit)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Transaction payload data.
|
||||
* @prev EthereumTxRequest
|
||||
* @next EthereumTxRequest
|
||||
*/
|
||||
message EthereumTxAck {
|
||||
optional bytes data_chunk = 1; // Bytes from transaction payload (<= 1024 bytes)
|
||||
}
|
||||
|
||||
////////////////////////////////////////
|
||||
// Ethereum: Message signing messages //
|
||||
////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign message
|
||||
* @next EthereumMessageSignature
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumSignMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes message = 2; // message to be signed
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to verify message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumVerifyMessage {
|
||||
optional bytes address = 1; // address to verify
|
||||
optional bytes signature = 2; // signature to verify
|
||||
optional bytes message = 3; // message to verify
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Signed message
|
||||
* @prev EthereumSignMessage
|
||||
*/
|
||||
message EthereumMessageSignature {
|
||||
optional bytes address = 1; // address used to sign the message
|
||||
optional bytes signature = 2; // signature of the message
|
||||
}
|
||||
|
||||
///////////////////////
|
||||
// Identity messages //
|
||||
///////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign identity
|
||||
* @next SignedIdentity
|
||||
* @next Failure
|
||||
*/
|
||||
message SignIdentity {
|
||||
optional IdentityType identity = 1; // identity
|
||||
optional bytes challenge_hidden = 2; // non-visible challenge
|
||||
optional string challenge_visual = 3; // challenge shown on display (e.g. date+time)
|
||||
optional string ecdsa_curve_name = 4; // ECDSA curve name to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device provides signed identity
|
||||
* @prev SignIdentity
|
||||
*/
|
||||
message SignedIdentity {
|
||||
optional string address = 1; // identity address
|
||||
optional bytes public_key = 2; // identity public key
|
||||
optional bytes signature = 3; // signature of the identity data
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// ECDH messages //
|
||||
///////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to generate ECDH session key
|
||||
* @next ECDHSessionKey
|
||||
* @next Failure
|
||||
*/
|
||||
message GetECDHSessionKey {
|
||||
optional IdentityType identity = 1; // identity
|
||||
optional bytes peer_public_key = 2; // peer's public key
|
||||
optional string ecdsa_curve_name = 3; // ECDSA curve name to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device provides ECDH session key
|
||||
* @prev GetECDHSessionKey
|
||||
*/
|
||||
message ECDHSessionKey {
|
||||
optional bytes session_key = 1; // ECDH session key
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// U2F messages //
|
||||
///////////////////
|
||||
|
||||
/**
|
||||
* Request: Set U2F counter
|
||||
* @next Success
|
||||
*/
|
||||
message SetU2FCounter {
|
||||
optional uint32 u2f_counter = 1; // counter
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
// Bootloader messages //
|
||||
/////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to erase its firmware (so it can be replaced via FirmwareUpload)
|
||||
* @next Success
|
||||
* @next FirmwareRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message FirmwareErase {
|
||||
optional uint32 length = 1; // length of new firmware
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Ask for firmware chunk
|
||||
* @next FirmwareUpload
|
||||
*/
|
||||
message FirmwareRequest {
|
||||
optional uint32 offset = 1; // offset of requested firmware chunk
|
||||
optional uint32 length = 2; // length of requested firmware chunk
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Send firmware in binary form to the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message FirmwareUpload {
|
||||
required bytes payload = 1; // firmware to be loaded into device
|
||||
optional bytes hash = 2; // hash of the payload
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Request: Perform a device self-test
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message SelfTest {
|
||||
optional bytes payload = 1; // payload to be used in self-test
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Debug messages (only available if DebugLink is enabled) //
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: "Press" the button on the device
|
||||
* @next Success
|
||||
*/
|
||||
message DebugLinkDecision {
|
||||
required bool yes_no = 1; // true for "Confirm", false for "Cancel"
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer asks for device state
|
||||
* @next DebugLinkState
|
||||
*/
|
||||
message DebugLinkGetState {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device current state
|
||||
* @prev DebugLinkGetState
|
||||
*/
|
||||
message DebugLinkState {
|
||||
optional bytes layout = 1; // raw buffer of display
|
||||
optional string pin = 2; // current PIN, blank if PIN is not set/enabled
|
||||
optional string matrix = 3; // current PIN matrix
|
||||
optional string mnemonic = 4; // current BIP-39 mnemonic
|
||||
optional HDNodeType node = 5; // current BIP-32 node
|
||||
optional bool passphrase_protection = 6; // is node/mnemonic encrypted using passphrase?
|
||||
optional string reset_word = 7; // word on device display during ResetDevice workflow
|
||||
optional bytes reset_entropy = 8; // current entropy during ResetDevice workflow
|
||||
optional string recovery_fake_word = 9; // (fake) word on display during RecoveryDevice workflow
|
||||
optional uint32 recovery_word_pos = 10; // index of mnemonic word the device is expecting during RecoveryDevice workflow
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to restart
|
||||
*/
|
||||
message DebugLinkStop {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device wants host to log event
|
||||
*/
|
||||
message DebugLinkLog {
|
||||
optional uint32 level = 1;
|
||||
optional string bucket = 2;
|
||||
optional string text = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Read memory from device
|
||||
* @next DebugLinkMemory
|
||||
*/
|
||||
message DebugLinkMemoryRead {
|
||||
optional uint32 address = 1;
|
||||
optional uint32 length = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device sends memory back
|
||||
* @prev DebugLinkMemoryRead
|
||||
*/
|
||||
message DebugLinkMemory {
|
||||
optional bytes memory = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Write memory to device.
|
||||
* WARNING: Writing to the wrong location can irreparably break the device.
|
||||
*/
|
||||
message DebugLinkMemoryWrite {
|
||||
optional uint32 address = 1;
|
||||
optional bytes memory = 2;
|
||||
optional bool flash = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Erase block of flash on device
|
||||
* WARNING: Writing to the wrong location can irreparably break the device.
|
||||
*/
|
||||
message DebugLinkFlashErase {
|
||||
optional uint32 sector = 1;
|
||||
}
|
46
accounts/usbwallet/internal/trezor/trezor.go
Normal file
46
accounts/usbwallet/internal/trezor/trezor.go
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Trezor hardware
|
||||
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
||||
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
|
||||
|
||||
//go:generate protoc --go_out=import_path=trezor:. types.proto messages.proto
|
||||
|
||||
// Package trezor contains the wire protocol wrapper in Go.
|
||||
package trezor
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// Type returns the protocol buffer type number of a specific message. If the
|
||||
// message is nil, this method panics!
|
||||
func Type(msg proto.Message) uint16 {
|
||||
return uint16(MessageType_value["MessageType_"+reflect.TypeOf(msg).Elem().Name()])
|
||||
}
|
||||
|
||||
// Name returns the friendly message type name of a specific protocol buffer
|
||||
// type numbers.
|
||||
func Name(kind uint16) string {
|
||||
name := MessageType_name[int32(kind)]
|
||||
if len(name) < 12 {
|
||||
return name
|
||||
}
|
||||
return name[12:]
|
||||
}
|
1333
accounts/usbwallet/internal/trezor/types.pb.go
Normal file
1333
accounts/usbwallet/internal/trezor/types.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
278
accounts/usbwallet/internal/trezor/types.proto
Normal file
278
accounts/usbwallet/internal/trezor/types.proto
Normal file
@ -0,0 +1,278 @@
|
||||
// This file originates from the SatoshiLabs Trezor `common` repository at:
|
||||
// https://github.com/trezor/trezor-common/blob/master/protob/types.proto
|
||||
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
/**
|
||||
* Types for TREZOR communication
|
||||
*
|
||||
* @author Marek Palatinus <slush@satoshilabs.com>
|
||||
* @version 1.2
|
||||
*/
|
||||
|
||||
// Sugar for easier handling in Java
|
||||
option java_package = "com.satoshilabs.trezor.lib.protobuf";
|
||||
option java_outer_classname = "TrezorType";
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
/**
|
||||
* Options for specifying message direction and type of wire (normal/debug)
|
||||
*/
|
||||
extend google.protobuf.EnumValueOptions {
|
||||
optional bool wire_in = 50002; // message can be transmitted via wire from PC to TREZOR
|
||||
optional bool wire_out = 50003; // message can be transmitted via wire from TREZOR to PC
|
||||
optional bool wire_debug_in = 50004; // message can be transmitted via debug wire from PC to TREZOR
|
||||
optional bool wire_debug_out = 50005; // message can be transmitted via debug wire from TREZOR to PC
|
||||
optional bool wire_tiny = 50006; // message is handled by TREZOR when the USB stack is in tiny mode
|
||||
optional bool wire_bootloader = 50007; // message is only handled by TREZOR Bootloader
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of failures returned by Failure message
|
||||
* @used_in Failure
|
||||
*/
|
||||
enum FailureType {
|
||||
Failure_UnexpectedMessage = 1;
|
||||
Failure_ButtonExpected = 2;
|
||||
Failure_DataError = 3;
|
||||
Failure_ActionCancelled = 4;
|
||||
Failure_PinExpected = 5;
|
||||
Failure_PinCancelled = 6;
|
||||
Failure_PinInvalid = 7;
|
||||
Failure_InvalidSignature = 8;
|
||||
Failure_ProcessError = 9;
|
||||
Failure_NotEnoughFunds = 10;
|
||||
Failure_NotInitialized = 11;
|
||||
Failure_FirmwareError = 99;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of script which will be used for transaction output
|
||||
* @used_in TxOutputType
|
||||
*/
|
||||
enum OutputScriptType {
|
||||
PAYTOADDRESS = 0; // used for all addresses (bitcoin, p2sh, witness)
|
||||
PAYTOSCRIPTHASH = 1; // p2sh address (deprecated; use PAYTOADDRESS)
|
||||
PAYTOMULTISIG = 2; // only for change output
|
||||
PAYTOOPRETURN = 3; // op_return
|
||||
PAYTOWITNESS = 4; // only for change output
|
||||
PAYTOP2SHWITNESS = 5; // only for change output
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of script which will be used for transaction output
|
||||
* @used_in TxInputType
|
||||
*/
|
||||
enum InputScriptType {
|
||||
SPENDADDRESS = 0; // standard p2pkh address
|
||||
SPENDMULTISIG = 1; // p2sh multisig address
|
||||
EXTERNAL = 2; // reserved for external inputs (coinjoin)
|
||||
SPENDWITNESS = 3; // native segwit
|
||||
SPENDP2SHWITNESS = 4; // segwit over p2sh (backward compatible)
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of information required by transaction signing process
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
enum RequestType {
|
||||
TXINPUT = 0;
|
||||
TXOUTPUT = 1;
|
||||
TXMETA = 2;
|
||||
TXFINISHED = 3;
|
||||
TXEXTRADATA = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of button request
|
||||
* @used_in ButtonRequest
|
||||
*/
|
||||
enum ButtonRequestType {
|
||||
ButtonRequest_Other = 1;
|
||||
ButtonRequest_FeeOverThreshold = 2;
|
||||
ButtonRequest_ConfirmOutput = 3;
|
||||
ButtonRequest_ResetDevice = 4;
|
||||
ButtonRequest_ConfirmWord = 5;
|
||||
ButtonRequest_WipeDevice = 6;
|
||||
ButtonRequest_ProtectCall = 7;
|
||||
ButtonRequest_SignTx = 8;
|
||||
ButtonRequest_FirmwareCheck = 9;
|
||||
ButtonRequest_Address = 10;
|
||||
ButtonRequest_PublicKey = 11;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of PIN request
|
||||
* @used_in PinMatrixRequest
|
||||
*/
|
||||
enum PinMatrixRequestType {
|
||||
PinMatrixRequestType_Current = 1;
|
||||
PinMatrixRequestType_NewFirst = 2;
|
||||
PinMatrixRequestType_NewSecond = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of recovery procedure. These should be used as bitmask, e.g.,
|
||||
* `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
|
||||
* listing every method supported by the host computer.
|
||||
*
|
||||
* Note that ScrambledWords must be supported by every implementation
|
||||
* for backward compatibility; there is no way to not support it.
|
||||
*
|
||||
* @used_in RecoveryDevice
|
||||
*/
|
||||
enum RecoveryDeviceType {
|
||||
// use powers of two when extending this field
|
||||
RecoveryDeviceType_ScrambledWords = 0; // words in scrambled order
|
||||
RecoveryDeviceType_Matrix = 1; // matrix recovery type
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of Recovery Word request
|
||||
* @used_in WordRequest
|
||||
*/
|
||||
enum WordRequestType {
|
||||
WordRequestType_Plain = 0;
|
||||
WordRequestType_Matrix9 = 1;
|
||||
WordRequestType_Matrix6 = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing BIP32 (hierarchical deterministic) node
|
||||
* Used for imports of private key into the device and exporting public key out of device
|
||||
* @used_in PublicKey
|
||||
* @used_in LoadDevice
|
||||
* @used_in DebugLinkState
|
||||
* @used_in Storage
|
||||
*/
|
||||
message HDNodeType {
|
||||
required uint32 depth = 1;
|
||||
required uint32 fingerprint = 2;
|
||||
required uint32 child_num = 3;
|
||||
required bytes chain_code = 4;
|
||||
optional bytes private_key = 5;
|
||||
optional bytes public_key = 6;
|
||||
}
|
||||
|
||||
message HDNodePathType {
|
||||
required HDNodeType node = 1; // BIP-32 node in deserialized form
|
||||
repeated uint32 address_n = 2; // BIP-32 path to derive the key from node
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing Coin
|
||||
* @used_in Features
|
||||
*/
|
||||
message CoinType {
|
||||
optional string coin_name = 1;
|
||||
optional string coin_shortcut = 2;
|
||||
optional uint32 address_type = 3 [default=0];
|
||||
optional uint64 maxfee_kb = 4;
|
||||
optional uint32 address_type_p2sh = 5 [default=5];
|
||||
optional string signed_message_header = 8;
|
||||
optional uint32 xpub_magic = 9 [default=76067358]; // default=0x0488b21e
|
||||
optional uint32 xprv_magic = 10 [default=76066276]; // default=0x0488ade4
|
||||
optional bool segwit = 11;
|
||||
optional uint32 forkid = 12;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of redeem script used in input
|
||||
* @used_in TxInputType
|
||||
*/
|
||||
message MultisigRedeemScriptType {
|
||||
repeated HDNodePathType pubkeys = 1; // pubkeys from multisig address (sorted lexicographically)
|
||||
repeated bytes signatures = 2; // existing signatures for partially signed input
|
||||
optional uint32 m = 3; // "m" from n, how many valid signatures is necessary for spending
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction input
|
||||
* @used_in SimpleSignTx
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxInputType {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes prev_hash = 2; // hash of previous transaction output to spend by this input
|
||||
required uint32 prev_index = 3; // index of previous output to spend
|
||||
optional bytes script_sig = 4; // script signature, unset for tx to sign
|
||||
optional uint32 sequence = 5 [default=4294967295]; // sequence (default=0xffffffff)
|
||||
optional InputScriptType script_type = 6 [default=SPENDADDRESS]; // defines template of input script
|
||||
optional MultisigRedeemScriptType multisig = 7; // Filled if input is going to spend multisig tx
|
||||
optional uint64 amount = 8; // amount of previous transaction output (for segwit only)
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction output
|
||||
* @used_in SimpleSignTx
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxOutputType {
|
||||
optional string address = 1; // target coin address in Base58 encoding
|
||||
repeated uint32 address_n = 2; // BIP-32 path to derive the key from master node; has higher priority than "address"
|
||||
required uint64 amount = 3; // amount to spend in satoshis
|
||||
required OutputScriptType script_type = 4; // output script type
|
||||
optional MultisigRedeemScriptType multisig = 5; // defines multisig address; script_type must be PAYTOMULTISIG
|
||||
optional bytes op_return_data = 6; // defines op_return data; script_type must be PAYTOOPRETURN, amount must be 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing compiled transaction output
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxOutputBinType {
|
||||
required uint64 amount = 1;
|
||||
required bytes script_pubkey = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction
|
||||
* @used_in SimpleSignTx
|
||||
*/
|
||||
message TransactionType {
|
||||
optional uint32 version = 1;
|
||||
repeated TxInputType inputs = 2;
|
||||
repeated TxOutputBinType bin_outputs = 3;
|
||||
repeated TxOutputType outputs = 5;
|
||||
optional uint32 lock_time = 4;
|
||||
optional uint32 inputs_cnt = 6;
|
||||
optional uint32 outputs_cnt = 7;
|
||||
optional bytes extra_data = 8;
|
||||
optional uint32 extra_data_len = 9;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing request details
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
message TxRequestDetailsType {
|
||||
optional uint32 request_index = 1; // device expects TxAck message from the computer
|
||||
optional bytes tx_hash = 2; // tx_hash of requested transaction
|
||||
optional uint32 extra_data_len = 3; // length of requested extra data
|
||||
optional uint32 extra_data_offset = 4; // offset of requested extra data
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing serialized data
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
message TxRequestSerializedType {
|
||||
optional uint32 signature_index = 1; // 'signature' field contains signed input of this index
|
||||
optional bytes signature = 2; // signature of the signature_index input
|
||||
optional bytes serialized_tx = 3; // part of serialized and signed transaction
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing identity data
|
||||
* @used_in IdentityType
|
||||
*/
|
||||
message IdentityType {
|
||||
optional string proto = 1; // proto part of URI
|
||||
optional string user = 2; // user part of URI
|
||||
optional string host = 3; // host part of URI
|
||||
optional string port = 4; // port part of URI
|
||||
optional string path = 5; // path part of URI
|
||||
optional uint32 index = 6 [default=0]; // identity index
|
||||
}
|
464
accounts/usbwallet/ledger.go
Normal file
464
accounts/usbwallet/ledger.go
Normal file
@ -0,0 +1,464 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// ledgerOpcode is an enumeration encoding the supported Ledger opcodes.
|
||||
type ledgerOpcode byte
|
||||
|
||||
// ledgerParam1 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam1 byte
|
||||
|
||||
// ledgerParam2 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam2 byte
|
||||
|
||||
const (
|
||||
ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path
|
||||
ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters
|
||||
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
|
||||
|
||||
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
|
||||
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
|
||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errLedgerReplyInvalidHeader = errors.New("ledger: invalid reply header")
|
||||
|
||||
// errLedgerInvalidVersionReply is the error message returned by a Ledger version retrieval
|
||||
// when a response does arrive, but it does not contain the expected data.
|
||||
var errLedgerInvalidVersionReply = errors.New("ledger: invalid version reply")
|
||||
|
||||
// ledgerDriver implements the communication with a Ledger hardware wallet.
|
||||
type ledgerDriver struct {
|
||||
device io.ReadWriter // USB device connection to communicate through
|
||||
version [3]byte // Current version of the Ledger firmware (zero if app is offline)
|
||||
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
|
||||
failure error // Any failure that would make the device unusable
|
||||
log log.Logger // Contextual logger to tag the ledger with its id
|
||||
}
|
||||
|
||||
// newLedgerDriver creates a new instance of a Ledger USB protocol driver.
|
||||
func newLedgerDriver(logger log.Logger) driver {
|
||||
return &ledgerDriver{
|
||||
log: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Status implements usbwallet.driver, returning various states the Ledger can
|
||||
// currently be in.
|
||||
func (w *ledgerDriver) Status() (string, error) {
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure), w.failure
|
||||
}
|
||||
if w.browser {
|
||||
return "Ethereum app in browser mode", w.failure
|
||||
}
|
||||
if w.offline() {
|
||||
return "Ethereum app offline", w.failure
|
||||
}
|
||||
return fmt.Sprintf("Ethereum app v%d.%d.%d online", w.version[0], w.version[1], w.version[2]), w.failure
|
||||
}
|
||||
|
||||
// offline returns whether the wallet and the Ethereum app is offline or not.
|
||||
//
|
||||
// The method assumes that the state lock is held!
|
||||
func (w *ledgerDriver) offline() bool {
|
||||
return w.version == [3]byte{0, 0, 0}
|
||||
}
|
||||
|
||||
// Open implements usbwallet.driver, attempting to initialize the connection to the
|
||||
// Ledger hardware wallet. The Ledger does not require a user passphrase, so that
|
||||
// parameter is silently discarded.
|
||||
func (w *ledgerDriver) Open(device io.ReadWriter, passphrase string) error {
|
||||
w.device, w.failure = device, nil
|
||||
|
||||
_, err := w.ledgerDerive(accounts.DefaultBaseDerivationPath)
|
||||
if err != nil {
|
||||
// Ethereum app is not running or in browser mode, nothing more to do, return
|
||||
if err == errLedgerReplyInvalidHeader {
|
||||
w.browser = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Try to resolve the Ethereum app's version, will fail prior to v1.0.2
|
||||
if w.version, err = w.ledgerVersion(); err != nil {
|
||||
w.version = [3]byte{1, 0, 0} // Assume worst case, can't verify if v1.0.0 or v1.0.1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements usbwallet.driver, cleaning up and metadata maintained within
|
||||
// the Ledger driver.
|
||||
func (w *ledgerDriver) Close() error {
|
||||
w.browser, w.version = false, [3]byte{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Heartbeat implements usbwallet.driver, performing a sanity check against the
|
||||
// Ledger to see if it's still online.
|
||||
func (w *ledgerDriver) Heartbeat() error {
|
||||
if _, err := w.ledgerVersion(); err != nil && err != errLedgerInvalidVersionReply {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Derive implements usbwallet.driver, sending a derivation request to the Ledger
|
||||
// and returning the Ethereum address located on that derivation path.
|
||||
func (w *ledgerDriver) Derive(path accounts.DerivationPath) (common.Address, error) {
|
||||
return w.ledgerDerive(path)
|
||||
}
|
||||
|
||||
// SignTx implements usbwallet.driver, sending the transaction to the Ledger and
|
||||
// waiting for the user to confirm or deny the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// If the Ethereum app doesn't run, abort
|
||||
if w.offline() {
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
return w.ledgerSign(path, tx, chainID)
|
||||
}
|
||||
|
||||
// ledgerVersion retrieves the current version of the Ethereum wallet app running
|
||||
// on the Ledger wallet.
|
||||
//
|
||||
// The version retrieval protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+----+---
|
||||
// E0 | 06 | 00 | 00 | 00 | 04
|
||||
//
|
||||
// With no input data, and the output data being:
|
||||
//
|
||||
// Description | Length
|
||||
// ---------------------------------------------------+--------
|
||||
// Flags 01: arbitrary data signature enabled by user | 1 byte
|
||||
// Application major version | 1 byte
|
||||
// Application minor version | 1 byte
|
||||
// Application patch version | 1 byte
|
||||
func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
|
||||
if err != nil {
|
||||
return [3]byte{}, err
|
||||
}
|
||||
if len(reply) != 4 {
|
||||
return [3]byte{}, errLedgerInvalidVersionReply
|
||||
}
|
||||
// Cache the version for future reference
|
||||
var version [3]byte
|
||||
copy(version[:], reply[1:])
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// ledgerDerive retrieves the currently active Ethereum address from a Ledger
|
||||
// wallet at the specified derivation path.
|
||||
//
|
||||
// The address derivation protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 02 | 00 return address
|
||||
// 01 display address and confirm before returning
|
||||
// | 00: do not return the chain code
|
||||
// | 01: return the chain code
|
||||
// | var | 00
|
||||
//
|
||||
// Where the input data is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+--------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------------------+-------------------
|
||||
// Public Key length | 1 byte
|
||||
// Uncompressed Public Key | arbitrary
|
||||
// Ethereum address length | 1 byte
|
||||
// Ethereum address | 40 bytes hex ascii
|
||||
// Chain code if requested | 32 bytes
|
||||
func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpRetrieveAddress, ledgerP1DirectlyFetchAddress, ledgerP2DiscardAddressChainCode, path)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
// Discard the public key, we don't need that for now
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks public key entry")
|
||||
}
|
||||
reply = reply[1+int(reply[0]):]
|
||||
|
||||
// Extract the Ethereum hex address string
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks address entry")
|
||||
}
|
||||
hexstr := reply[1 : 1+int(reply[0])]
|
||||
|
||||
// Decode the hex sting into an Ethereum address and return
|
||||
var address common.Address
|
||||
hex.Decode(address[:], hexstr)
|
||||
return address, nil
|
||||
}
|
||||
|
||||
// ledgerSign sends the transaction to the Ledger wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
//
|
||||
// The transaction signing protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 04 | 00: first transaction data block
|
||||
// 80: subsequent transaction data block
|
||||
// | 00 | variable | variable
|
||||
//
|
||||
// Where the input for the first transaction block (first 255 bytes) is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+----------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the input for subsequent transaction blocks (first 255 bytes) are:
|
||||
//
|
||||
// Description | Length
|
||||
// ----------------------+----------
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------+---------
|
||||
// signature V | 1 byte
|
||||
// signature R | 32 bytes
|
||||
// signature S | 32 bytes
|
||||
func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
)
|
||||
if chainID == nil {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data()}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
} else {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
}
|
||||
payload := append(path, txrlp...)
|
||||
|
||||
// Send the request and wait for the response
|
||||
var (
|
||||
op = ledgerP1InitTransactionData
|
||||
reply []byte
|
||||
)
|
||||
for len(payload) > 0 {
|
||||
// Calculate the size of the next data chunk
|
||||
chunk := 255
|
||||
if chunk > len(payload) {
|
||||
chunk = len(payload)
|
||||
}
|
||||
// Send the chunk over, ensuring it's processed correctly
|
||||
reply, err = w.ledgerExchange(ledgerOpSignTransaction, op, 0, payload[:chunk])
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
// Shift the payload and ensure subsequent chunks are marked as such
|
||||
payload = payload[chunk:]
|
||||
op = ledgerP1ContTransactionData
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(reply) != 65 {
|
||||
return common.Address{}, nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(reply[1:], reply[0])
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
return sender, signed, nil
|
||||
}
|
||||
|
||||
// ledgerExchange performs a data exchange with the Ledger wallet, sending it a
|
||||
// message and retrieving the response.
|
||||
//
|
||||
// The common transport header is defined as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// --------------------------------------+----------
|
||||
// Communication channel ID (big endian) | 2 bytes
|
||||
// Command tag | 1 byte
|
||||
// Packet sequence index (big endian) | 2 bytes
|
||||
// Payload | arbitrary
|
||||
//
|
||||
// The Communication channel ID allows commands multiplexing over the same
|
||||
// physical link. It is not used for the time being, and should be set to 0101
|
||||
// to avoid compatibility issues with implementations ignoring a leading 00 byte.
|
||||
//
|
||||
// The Command tag describes the message content. Use TAG_APDU (0x05) for standard
|
||||
// APDU payloads, or TAG_PING (0x02) for a simple link test.
|
||||
//
|
||||
// The Packet sequence index describes the current sequence for fragmented payloads.
|
||||
// The first fragment index is 0x00.
|
||||
//
|
||||
// APDU Command payloads are encoded as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// -----------------------------------
|
||||
// APDU length (big endian) | 2 bytes
|
||||
// APDU CLA | 1 byte
|
||||
// APDU INS | 1 byte
|
||||
// APDU P1 | 1 byte
|
||||
// APDU P2 | 1 byte
|
||||
// APDU length | 1 byte
|
||||
// Optional APDU data | arbitrary
|
||||
func (w *ledgerDriver) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
|
||||
// Construct the message payload, possibly split into multiple chunks
|
||||
apdu := make([]byte, 2, 7+len(data))
|
||||
|
||||
binary.BigEndian.PutUint16(apdu, uint16(5+len(data)))
|
||||
apdu = append(apdu, []byte{0xe0, byte(opcode), byte(p1), byte(p2), byte(len(data))}...)
|
||||
apdu = append(apdu, data...)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
header := []byte{0x01, 0x01, 0x05, 0x00, 0x00} // Channel ID and command tag appended
|
||||
chunk := make([]byte, 64)
|
||||
space := len(chunk) - len(header)
|
||||
|
||||
for i := 0; len(apdu) > 0; i++ {
|
||||
// Construct the new message to stream
|
||||
chunk = append(chunk[:0], header...)
|
||||
binary.BigEndian.PutUint16(chunk[3:], uint16(i))
|
||||
|
||||
if len(apdu) > space {
|
||||
chunk = append(chunk, apdu[:space]...)
|
||||
apdu = apdu[space:]
|
||||
} else {
|
||||
chunk = append(chunk, apdu...)
|
||||
apdu = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var reply []byte
|
||||
chunk = chunk[:64] // Yeah, we surely have enough space
|
||||
for {
|
||||
// Read the next chunk from the Ledger wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||
return nil, errLedgerReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the total message length
|
||||
var payload []byte
|
||||
|
||||
if chunk[3] == 0x00 && chunk[4] == 0x00 {
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint16(chunk[5:7])))
|
||||
payload = chunk[7:]
|
||||
} else {
|
||||
payload = chunk[5:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
return reply[:len(reply)-2], nil
|
||||
}
|
@ -1,903 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/karalabe/hid"
|
||||
)
|
||||
|
||||
// Maximum time between wallet health checks to detect USB unplugs.
|
||||
const ledgerHeartbeatCycle = time.Second
|
||||
|
||||
// Minimum time to wait between self derivation attempts, even it the user is
|
||||
// requesting accounts like crazy.
|
||||
const ledgerSelfDeriveThrottling = time.Second
|
||||
|
||||
// ledgerOpcode is an enumeration encoding the supported Ledger opcodes.
|
||||
type ledgerOpcode byte
|
||||
|
||||
// ledgerParam1 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam1 byte
|
||||
|
||||
// ledgerParam2 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam2 byte
|
||||
|
||||
const (
|
||||
ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path
|
||||
ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters
|
||||
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
|
||||
|
||||
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
|
||||
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
|
||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errReplyInvalidHeader = errors.New("invalid reply header")
|
||||
|
||||
// errInvalidVersionReply is the error message returned by a Ledger version retrieval
|
||||
// when a response does arrive, but it does not contain the expected data.
|
||||
var errInvalidVersionReply = errors.New("invalid version reply")
|
||||
|
||||
// ledgerWallet represents a live USB Ledger hardware wallet.
|
||||
type ledgerWallet struct {
|
||||
hub *LedgerHub // USB hub the device originates from (TODO(karalabe): remove if hotplug lands on Windows)
|
||||
url *accounts.URL // Textual URL uniquely identifying this wallet
|
||||
|
||||
info hid.DeviceInfo // Known USB device infos about the wallet
|
||||
device *hid.Device // USB device advertising itself as a Ledger wallet
|
||||
failure error // Any failure that would make the device unusable
|
||||
|
||||
version [3]byte // Current version of the Ledger Ethereum app (zero if app is offline)
|
||||
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
|
||||
accounts []accounts.Account // List of derive accounts pinned on the Ledger
|
||||
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations
|
||||
|
||||
deriveNextPath accounts.DerivationPath // Next derivation path for account auto-discovery
|
||||
deriveNextAddr common.Address // Next derived account address for auto-discovery
|
||||
deriveChain ethereum.ChainStateReader // Blockchain state reader to discover used account with
|
||||
deriveReq chan chan struct{} // Channel to request a self-derivation on
|
||||
deriveQuit chan chan error // Channel to terminate the self-deriver with
|
||||
|
||||
healthQuit chan chan error
|
||||
|
||||
// Locking a hardware wallet is a bit special. Since hardware devices are lower
|
||||
// performing, any communication with them might take a non negligible amount of
|
||||
// time. Worse still, waiting for user confirmation can take arbitrarily long,
|
||||
// but exclusive communication must be upheld during. Locking the entire wallet
|
||||
// in the mean time however would stall any parts of the system that don't want
|
||||
// to communicate, just read some state (e.g. list the accounts).
|
||||
//
|
||||
// As such, a hardware wallet needs two locks to function correctly. A state
|
||||
// lock can be used to protect the wallet's software-side internal state, which
|
||||
// must not be held exlusively during hardware communication. A communication
|
||||
// lock can be used to achieve exclusive access to the device itself, this one
|
||||
// however should allow "skipping" waiting for operations that might want to
|
||||
// use the device, but can live without too (e.g. account self-derivation).
|
||||
//
|
||||
// Since we have two locks, it's important to know how to properly use them:
|
||||
// - Communication requires the `device` to not change, so obtaining the
|
||||
// commsLock should be done after having a stateLock.
|
||||
// - Communication must not disable read access to the wallet state, so it
|
||||
// must only ever hold a *read* lock to stateLock.
|
||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||
|
||||
log log.Logger // Contextual logger to tag the ledger with its id
|
||||
}
|
||||
|
||||
// URL implements accounts.Wallet, returning the URL of the Ledger device.
|
||||
func (w *ledgerWallet) URL() accounts.URL {
|
||||
return *w.url // Immutable, no need for a lock
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always whether the Ledger is opened, closed
|
||||
// or whether the Ethereum app was not started on it.
|
||||
func (w *ledgerWallet) Status() string {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure)
|
||||
}
|
||||
if w.device == nil {
|
||||
return "Closed"
|
||||
}
|
||||
if w.browser {
|
||||
return "Ethereum app in browser mode"
|
||||
}
|
||||
if w.offline() {
|
||||
return "Ethereum app offline"
|
||||
}
|
||||
return fmt.Sprintf("Ethereum app v%d.%d.%d online", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
|
||||
// offline returns whether the wallet and the Ethereum app is offline or not.
|
||||
//
|
||||
// The method assumes that the state lock is held!
|
||||
func (w *ledgerWallet) offline() bool {
|
||||
return w.version == [3]byte{0, 0, 0}
|
||||
}
|
||||
|
||||
// failed returns if the USB device wrapped by the wallet failed for some reason.
|
||||
// This is used by the device scanner to report failed wallets as departed.
|
||||
//
|
||||
// The method assumes that the state lock is *not* held!
|
||||
func (w *ledgerWallet) failed() bool {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
return w.failure != nil
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, attempting to open a USB connection to the
|
||||
// Ledger hardware wallet. The Ledger does not require a user passphrase, so that
|
||||
// parameter is silently discarded.
|
||||
func (w *ledgerWallet) Open(passphrase string) error {
|
||||
w.stateLock.Lock() // State lock is enough since there's no connection yet at this point
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
// If the wallet was already opened, don't try to open again
|
||||
if w.device != nil {
|
||||
return accounts.ErrWalletAlreadyOpen
|
||||
}
|
||||
// Otherwise iterate over all USB devices and find this again (no way to directly do this)
|
||||
device, err := w.info.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Wallet seems to be successfully opened, guess if the Ethereum app is running
|
||||
w.device = device
|
||||
w.commsLock = make(chan struct{}, 1)
|
||||
w.commsLock <- struct{}{} // Enable lock
|
||||
|
||||
w.paths = make(map[common.Address]accounts.DerivationPath)
|
||||
|
||||
w.deriveReq = make(chan chan struct{})
|
||||
w.deriveQuit = make(chan chan error)
|
||||
w.healthQuit = make(chan chan error)
|
||||
|
||||
defer func() {
|
||||
go w.heartbeat()
|
||||
go w.selfDerive()
|
||||
}()
|
||||
|
||||
if _, err = w.ledgerDerive(accounts.DefaultBaseDerivationPath); err != nil {
|
||||
// Ethereum app is not running or in browser mode, nothing more to do, return
|
||||
if err == errReplyInvalidHeader {
|
||||
w.browser = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Try to resolve the Ethereum app's version, will fail prior to v1.0.2
|
||||
if w.version, err = w.ledgerVersion(); err != nil {
|
||||
w.version = [3]byte{1, 0, 0} // Assume worst case, can't verify if v1.0.0 or v1.0.1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// heartbeat is a health check loop for the Ledger wallets to periodically verify
|
||||
// whether they are still present or if they malfunctioned. It is needed because:
|
||||
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
|
||||
// - communication timeout on the Ledger requires a device power cycle to fix
|
||||
func (w *ledgerWallet) heartbeat() {
|
||||
w.log.Debug("Ledger health-check started")
|
||||
defer w.log.Debug("Ledger health-check stopped")
|
||||
|
||||
// Execute heartbeat checks until termination or error
|
||||
var (
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until termination is requested or the heartbeat cycle arrives
|
||||
select {
|
||||
case errc = <-w.healthQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case <-time.After(ledgerHeartbeatCycle):
|
||||
// Heartbeat time
|
||||
}
|
||||
// Execute a tiny data exchange to see responsiveness
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil {
|
||||
// Terminated while waiting for the lock
|
||||
w.stateLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
<-w.commsLock // Don't lock state while resolving version
|
||||
_, err = w.ledgerVersion()
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
if err != nil && err != errInvalidVersionReply {
|
||||
w.stateLock.Lock() // Lock state to tear the wallet down
|
||||
w.failure = err
|
||||
w.close()
|
||||
w.stateLock.Unlock()
|
||||
}
|
||||
// Ignore non hardware related errors
|
||||
err = nil
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("Ledger health-check failed", "err", err)
|
||||
errc = <-w.healthQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Close implements accounts.Wallet, closing the USB connection to the Ledger.
|
||||
func (w *ledgerWallet) Close() error {
|
||||
// Ensure the wallet was opened
|
||||
w.stateLock.RLock()
|
||||
hQuit, dQuit := w.healthQuit, w.deriveQuit
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Terminate the health checks
|
||||
var herr error
|
||||
if hQuit != nil {
|
||||
errc := make(chan error)
|
||||
hQuit <- errc
|
||||
herr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the self-derivations
|
||||
var derr error
|
||||
if dQuit != nil {
|
||||
errc := make(chan error)
|
||||
dQuit <- errc
|
||||
derr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the device connection
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.healthQuit = nil
|
||||
w.deriveQuit = nil
|
||||
w.deriveReq = nil
|
||||
|
||||
if err := w.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
return derr
|
||||
}
|
||||
|
||||
// close is the internal wallet closer that terminates the USB connection and
|
||||
// resets all the fields to their defaults.
|
||||
//
|
||||
// Note, close assumes the state lock is held!
|
||||
func (w *ledgerWallet) close() error {
|
||||
// Allow duplicate closes, especially for health-check failures
|
||||
if w.device == nil {
|
||||
return nil
|
||||
}
|
||||
// Close the device, clear everything, then return
|
||||
w.device.Close()
|
||||
w.device = nil
|
||||
|
||||
w.browser, w.version = false, [3]byte{}
|
||||
w.accounts, w.paths = nil, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
|
||||
// the Ledger hardware wallet. If self-derivation was enabled, the account list
|
||||
// is periodically expanded based on current chain state.
|
||||
func (w *ledgerWallet) Accounts() []accounts.Account {
|
||||
// Attempt self-derivation if it's running
|
||||
reqc := make(chan struct{}, 1)
|
||||
select {
|
||||
case w.deriveReq <- reqc:
|
||||
// Self-derivation request accepted, wait for it
|
||||
<-reqc
|
||||
default:
|
||||
// Self-derivation offline, throttled or busy, skip
|
||||
}
|
||||
// Return whatever account list we ended up with
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
cpy := make([]accounts.Account, len(w.accounts))
|
||||
copy(cpy, w.accounts)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// selfDerive is an account derivation loop that upon request attempts to find
|
||||
// new non-zero accounts.
|
||||
func (w *ledgerWallet) selfDerive() {
|
||||
w.log.Debug("Ledger self-derivation started")
|
||||
defer w.log.Debug("Ledger self-derivation stopped")
|
||||
|
||||
// Execute self-derivations until termination or error
|
||||
var (
|
||||
reqc chan struct{}
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until either derivation or termination is requested
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case reqc = <-w.deriveReq:
|
||||
// Account discovery requested
|
||||
}
|
||||
// Derivation needs a chain and device access, skip if either unavailable
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil || w.deriveChain == nil || w.offline() {
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case <-w.commsLock:
|
||||
default:
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
// Device lock obtained, derive the next batch of accounts
|
||||
var (
|
||||
accs []accounts.Account
|
||||
paths []accounts.DerivationPath
|
||||
|
||||
nextAddr = w.deriveNextAddr
|
||||
nextPath = w.deriveNextPath
|
||||
|
||||
context = context.Background()
|
||||
)
|
||||
for empty := false; !empty; {
|
||||
// Retrieve the next derived Ethereum account
|
||||
if nextAddr == (common.Address{}) {
|
||||
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
||||
w.log.Warn("Ledger account derivation failed", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check the account's status against the current chain state
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
)
|
||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("Ledger balance retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("Ledger nonce retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||
if balance.Sign() == 0 && nonce == 0 {
|
||||
empty = true
|
||||
}
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
path := make(accounts.DerivationPath, len(nextPath))
|
||||
copy(path[:], nextPath[:])
|
||||
paths = append(paths, path)
|
||||
|
||||
account := accounts.Account{
|
||||
Address: nextAddr,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
accs = append(accs, account)
|
||||
|
||||
// Display a log message to the user for new (or previously empty accounts)
|
||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||
w.log.Info("Ledger discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
|
||||
}
|
||||
// Fetch the next potential account
|
||||
if !empty {
|
||||
nextAddr = common.Address{}
|
||||
nextPath[len(nextPath)-1]++
|
||||
}
|
||||
}
|
||||
// Self derivation complete, release device lock
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Insert any accounts successfully derived
|
||||
w.stateLock.Lock()
|
||||
for i := 0; i < len(accs); i++ {
|
||||
if _, ok := w.paths[accs[i].Address]; !ok {
|
||||
w.accounts = append(w.accounts, accs[i])
|
||||
w.paths[accs[i].Address] = paths[i]
|
||||
}
|
||||
}
|
||||
// Shift the self-derivation forward
|
||||
// TODO(karalabe): don't overwrite changes from wallet.SelfDerive
|
||||
w.deriveNextAddr = nextAddr
|
||||
w.deriveNextPath = nextPath
|
||||
w.stateLock.Unlock()
|
||||
|
||||
// Notify the user of termination and loop after a bit of time (to avoid trashing)
|
||||
reqc <- struct{}{}
|
||||
if err == nil {
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested, abort
|
||||
case <-time.After(ledgerSelfDeriveThrottling):
|
||||
// Waited enough, willing to self-derive again
|
||||
}
|
||||
}
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("Ledger self-derivation failed", "err", err)
|
||||
errc = <-w.deriveQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Contains implements accounts.Wallet, returning whether a particular account is
|
||||
// or is not pinned into this Ledger instance. Although we could attempt to resolve
|
||||
// unpinned accounts, that would be an non-negligible hardware operation.
|
||||
func (w *ledgerWallet) Contains(account accounts.Account) bool {
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
_, exists := w.paths[account.Address]
|
||||
return exists
|
||||
}
|
||||
|
||||
// Derive implements accounts.Wallet, deriving a new account at the specific
|
||||
// derivation path. If pin is set to true, the account will be added to the list
|
||||
// of tracked accounts.
|
||||
func (w *ledgerWallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
|
||||
// Try to derive the actual account and update its URL if successful
|
||||
w.stateLock.RLock() // Avoid device disappearing during derivation
|
||||
|
||||
if w.device == nil || w.offline() {
|
||||
w.stateLock.RUnlock()
|
||||
return accounts.Account{}, accounts.ErrWalletClosed
|
||||
}
|
||||
<-w.commsLock // Avoid concurrent hardware access
|
||||
address, err := w.ledgerDerive(path)
|
||||
w.commsLock <- struct{}{}
|
||||
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// If an error occurred or no pinning was requested, return
|
||||
if err != nil {
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
account := accounts.Account{
|
||||
Address: address,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
if !pin {
|
||||
return account, nil
|
||||
}
|
||||
// Pinning needs to modify the state
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
if _, ok := w.paths[address]; !ok {
|
||||
w.accounts = append(w.accounts, account)
|
||||
w.paths[address] = path
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// SelfDerive implements accounts.Wallet, trying to discover accounts that the
|
||||
// user used previously (based on the chain state), but ones that he/she did not
|
||||
// explicitly pin to the wallet manually. To avoid chain head monitoring, self
|
||||
// derivation only runs during account listing (and even then throttled).
|
||||
func (w *ledgerWallet) SelfDerive(base accounts.DerivationPath, chain ethereum.ChainStateReader) {
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.deriveNextPath = make(accounts.DerivationPath, len(base))
|
||||
copy(w.deriveNextPath[:], base[:])
|
||||
|
||||
w.deriveNextAddr = common.Address{}
|
||||
w.deriveChain = chain
|
||||
}
|
||||
|
||||
// SignHash implements accounts.Wallet, however signing arbitrary data is not
|
||||
// supported for Ledger wallets, so this method will always return an error.
|
||||
func (w *ledgerWallet) SignHash(acc accounts.Account, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTx implements accounts.Wallet. It sends the transaction over to the Ledger
|
||||
// wallet to request a confirmation from the user. It returns either the signed
|
||||
// transaction or a failure if the user denied the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *ledgerWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
w.stateLock.RLock() // Comms have own mutex, this is for the state fields
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
// If the wallet is closed, or the Ethereum app doesn't run, abort
|
||||
if w.device == nil || w.offline() {
|
||||
return nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Make sure the requested account is contained within
|
||||
path, ok := w.paths[account.Address]
|
||||
if !ok {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
return nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
<-w.commsLock
|
||||
defer func() { w.commsLock <- struct{}{} }()
|
||||
|
||||
// Ensure the device isn't screwed with while user confirmation is pending
|
||||
// TODO(karalabe): remove if hotplug lands on Windows
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend++
|
||||
w.hub.commsLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend--
|
||||
w.hub.commsLock.Unlock()
|
||||
}()
|
||||
return w.ledgerSign(path, account.Address, tx, chainID)
|
||||
}
|
||||
|
||||
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
|
||||
// data is not supported for Ledger wallets, so this method will always return
|
||||
// an error.
|
||||
func (w *ledgerWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
|
||||
// transaction with the given account using passphrase as extra authentication.
|
||||
// Since the Ledger does not support extra passphrases, it is silently ignored.
|
||||
func (w *ledgerWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
return w.SignTx(account, tx, chainID)
|
||||
}
|
||||
|
||||
// ledgerVersion retrieves the current version of the Ethereum wallet app running
|
||||
// on the Ledger wallet.
|
||||
//
|
||||
// The version retrieval protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+----+---
|
||||
// E0 | 06 | 00 | 00 | 00 | 04
|
||||
//
|
||||
// With no input data, and the output data being:
|
||||
//
|
||||
// Description | Length
|
||||
// ---------------------------------------------------+--------
|
||||
// Flags 01: arbitrary data signature enabled by user | 1 byte
|
||||
// Application major version | 1 byte
|
||||
// Application minor version | 1 byte
|
||||
// Application patch version | 1 byte
|
||||
func (w *ledgerWallet) ledgerVersion() ([3]byte, error) {
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
|
||||
if err != nil {
|
||||
return [3]byte{}, err
|
||||
}
|
||||
if len(reply) != 4 {
|
||||
return [3]byte{}, errInvalidVersionReply
|
||||
}
|
||||
// Cache the version for future reference
|
||||
var version [3]byte
|
||||
copy(version[:], reply[1:])
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// ledgerDerive retrieves the currently active Ethereum address from a Ledger
|
||||
// wallet at the specified derivation path.
|
||||
//
|
||||
// The address derivation protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 02 | 00 return address
|
||||
// 01 display address and confirm before returning
|
||||
// | 00: do not return the chain code
|
||||
// | 01: return the chain code
|
||||
// | var | 00
|
||||
//
|
||||
// Where the input data is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+--------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------------------+-------------------
|
||||
// Public Key length | 1 byte
|
||||
// Uncompressed Public Key | arbitrary
|
||||
// Ethereum address length | 1 byte
|
||||
// Ethereum address | 40 bytes hex ascii
|
||||
// Chain code if requested | 32 bytes
|
||||
func (w *ledgerWallet) ledgerDerive(derivationPath []uint32) (common.Address, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpRetrieveAddress, ledgerP1DirectlyFetchAddress, ledgerP2DiscardAddressChainCode, path)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
// Discard the public key, we don't need that for now
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks public key entry")
|
||||
}
|
||||
reply = reply[1+int(reply[0]):]
|
||||
|
||||
// Extract the Ethereum hex address string
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks address entry")
|
||||
}
|
||||
hexstr := reply[1 : 1+int(reply[0])]
|
||||
|
||||
// Decode the hex sting into an Ethereum address and return
|
||||
var address common.Address
|
||||
hex.Decode(address[:], hexstr)
|
||||
return address, nil
|
||||
}
|
||||
|
||||
// ledgerSign sends the transaction to the Ledger wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
//
|
||||
// The transaction signing protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 04 | 00: first transaction data block
|
||||
// 80: subsequent transaction data block
|
||||
// | 00 | variable | variable
|
||||
//
|
||||
// Where the input for the first transaction block (first 255 bytes) is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+----------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the input for subsequent transaction blocks (first 255 bytes) are:
|
||||
//
|
||||
// Description | Length
|
||||
// ----------------------+----------
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------+---------
|
||||
// signature V | 1 byte
|
||||
// signature R | 32 bytes
|
||||
// signature S | 32 bytes
|
||||
func (w *ledgerWallet) ledgerSign(derivationPath []uint32, address common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
)
|
||||
if chainID == nil {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data()}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
payload := append(path, txrlp...)
|
||||
|
||||
// Send the request and wait for the response
|
||||
var (
|
||||
op = ledgerP1InitTransactionData
|
||||
reply []byte
|
||||
)
|
||||
for len(payload) > 0 {
|
||||
// Calculate the size of the next data chunk
|
||||
chunk := 255
|
||||
if chunk > len(payload) {
|
||||
chunk = len(payload)
|
||||
}
|
||||
// Send the chunk over, ensuring it's processed correctly
|
||||
reply, err = w.ledgerExchange(ledgerOpSignTransaction, op, 0, payload[:chunk])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Shift the payload and ensure subsequent chunks are marked as such
|
||||
payload = payload[chunk:]
|
||||
op = ledgerP1ContTransactionData
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(reply) != 65 {
|
||||
return nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(reply[1:], reply[0])
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sender != address {
|
||||
return nil, fmt.Errorf("signer mismatch: expected %s, got %s", address.Hex(), sender.Hex())
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
|
||||
// ledgerExchange performs a data exchange with the Ledger wallet, sending it a
|
||||
// message and retrieving the response.
|
||||
//
|
||||
// The common transport header is defined as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// --------------------------------------+----------
|
||||
// Communication channel ID (big endian) | 2 bytes
|
||||
// Command tag | 1 byte
|
||||
// Packet sequence index (big endian) | 2 bytes
|
||||
// Payload | arbitrary
|
||||
//
|
||||
// The Communication channel ID allows commands multiplexing over the same
|
||||
// physical link. It is not used for the time being, and should be set to 0101
|
||||
// to avoid compatibility issues with implementations ignoring a leading 00 byte.
|
||||
//
|
||||
// The Command tag describes the message content. Use TAG_APDU (0x05) for standard
|
||||
// APDU payloads, or TAG_PING (0x02) for a simple link test.
|
||||
//
|
||||
// The Packet sequence index describes the current sequence for fragmented payloads.
|
||||
// The first fragment index is 0x00.
|
||||
//
|
||||
// APDU Command payloads are encoded as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// -----------------------------------
|
||||
// APDU length (big endian) | 2 bytes
|
||||
// APDU CLA | 1 byte
|
||||
// APDU INS | 1 byte
|
||||
// APDU P1 | 1 byte
|
||||
// APDU P2 | 1 byte
|
||||
// APDU length | 1 byte
|
||||
// Optional APDU data | arbitrary
|
||||
func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
|
||||
// Construct the message payload, possibly split into multiple chunks
|
||||
apdu := make([]byte, 2, 7+len(data))
|
||||
|
||||
binary.BigEndian.PutUint16(apdu, uint16(5+len(data)))
|
||||
apdu = append(apdu, []byte{0xe0, byte(opcode), byte(p1), byte(p2), byte(len(data))}...)
|
||||
apdu = append(apdu, data...)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
header := []byte{0x01, 0x01, 0x05, 0x00, 0x00} // Channel ID and command tag appended
|
||||
chunk := make([]byte, 64)
|
||||
space := len(chunk) - len(header)
|
||||
|
||||
for i := 0; len(apdu) > 0; i++ {
|
||||
// Construct the new message to stream
|
||||
chunk = append(chunk[:0], header...)
|
||||
binary.BigEndian.PutUint16(chunk[3:], uint16(i))
|
||||
|
||||
if len(apdu) > space {
|
||||
chunk = append(chunk, apdu[:space]...)
|
||||
apdu = apdu[space:]
|
||||
} else {
|
||||
chunk = append(chunk, apdu...)
|
||||
apdu = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var reply []byte
|
||||
chunk = chunk[:64] // Yeah, we surely have enough space
|
||||
for {
|
||||
// Read the next chunk from the Ledger wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||
return nil, errReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the total message length
|
||||
var payload []byte
|
||||
|
||||
if chunk[3] == 0x00 && chunk[4] == 0x00 {
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint16(chunk[5:7])))
|
||||
payload = chunk[7:]
|
||||
} else {
|
||||
payload = chunk[5:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
return reply[:len(reply)-2], nil
|
||||
}
|
330
accounts/usbwallet/trezor.go
Normal file
330
accounts/usbwallet/trezor.go
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Trezor hardware
|
||||
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
||||
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/usbwallet/internal/trezor"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrTrezorPINNeeded is returned if opening the trezor requires a PIN code. In
|
||||
// this case, the calling application should display a pinpad and send back the
|
||||
// encoded passphrase.
|
||||
var ErrTrezorPINNeeded = errors.New("trezor: pin needed")
|
||||
|
||||
// errTrezorReplyInvalidHeader is the error message returned by a Trezor data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errTrezorReplyInvalidHeader = errors.New("trezor: invalid reply header")
|
||||
|
||||
// trezorDriver implements the communication with a Trezor hardware wallet.
|
||||
type trezorDriver struct {
|
||||
device io.ReadWriter // USB device connection to communicate through
|
||||
version [3]uint32 // Current version of the Trezor firmware
|
||||
label string // Current textual label of the Trezor device
|
||||
pinwait bool // Flags whether the device is waiting for PIN entry
|
||||
failure error // Any failure that would make the device unusable
|
||||
log log.Logger // Contextual logger to tag the trezor with its id
|
||||
}
|
||||
|
||||
// newTrezorDriver creates a new instance of a Trezor USB protocol driver.
|
||||
func newTrezorDriver(logger log.Logger) driver {
|
||||
return &trezorDriver{
|
||||
log: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always whether the Trezor is opened, closed
|
||||
// or whether the Ethereum app was not started on it.
|
||||
func (w *trezorDriver) Status() (string, error) {
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure), w.failure
|
||||
}
|
||||
if w.device == nil {
|
||||
return "Closed", w.failure
|
||||
}
|
||||
if w.pinwait {
|
||||
return fmt.Sprintf("Trezor v%d.%d.%d '%s' waiting for PIN", w.version[0], w.version[1], w.version[2], w.label), w.failure
|
||||
}
|
||||
return fmt.Sprintf("Trezor v%d.%d.%d '%s' online", w.version[0], w.version[1], w.version[2], w.label), w.failure
|
||||
}
|
||||
|
||||
// Open implements usbwallet.driver, attempting to initialize the connection to
|
||||
// the Trezor hardware wallet. Initializing the Trezor is a two phase operation:
|
||||
// * The first phase is to initialize the connection and read the wallet's
|
||||
// features. This phase is invoked is the provided passphrase is empty. The
|
||||
// device will display the pinpad as a result and will return an appropriate
|
||||
// error to notify the user that a second open phase is needed.
|
||||
// * The second phase is to unlock access to the Trezor, which is done by the
|
||||
// user actually providing a passphrase mapping a keyboard keypad to the pin
|
||||
// number of the user (shuffled according to the pinpad displayed).
|
||||
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
|
||||
w.device, w.failure = device, nil
|
||||
|
||||
// If phase 1 is requested, init the connection and wait for user callback
|
||||
if passphrase == "" {
|
||||
// If we're already waiting for a PIN entry, insta-return
|
||||
if w.pinwait {
|
||||
return ErrTrezorPINNeeded
|
||||
}
|
||||
// Initialize a connection to the device
|
||||
features := new(trezor.Features)
|
||||
if _, err := w.trezorExchange(&trezor.Initialize{}, features); err != nil {
|
||||
return err
|
||||
}
|
||||
w.version = [3]uint32{features.GetMajorVersion(), features.GetMinorVersion(), features.GetPatchVersion()}
|
||||
w.label = features.GetLabel()
|
||||
|
||||
// Do a manual ping, forcing the device to ask for its PIN
|
||||
askPin := true
|
||||
res, err := w.trezorExchange(&trezor.Ping{PinProtection: &askPin}, new(trezor.PinMatrixRequest), new(trezor.Success))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Only return the PIN request if the device wasn't unlocked until now
|
||||
if res == 1 {
|
||||
return nil // Device responded with trezor.Success
|
||||
}
|
||||
w.pinwait = true
|
||||
return ErrTrezorPINNeeded
|
||||
}
|
||||
// Phase 2 requested with actual PIN entry
|
||||
w.pinwait = false
|
||||
|
||||
if _, err := w.trezorExchange(&trezor.PinMatrixAck{Pin: &passphrase}, new(trezor.Success)); err != nil {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements usbwallet.driver, cleaning up and metadata maintained within
|
||||
// the Trezor driver.
|
||||
func (w *trezorDriver) Close() error {
|
||||
w.version, w.label, w.pinwait = [3]uint32{}, "", false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Heartbeat implements usbwallet.driver, performing a sanity check against the
|
||||
// Trezor to see if it's still online.
|
||||
func (w *trezorDriver) Heartbeat() error {
|
||||
if _, err := w.trezorExchange(&trezor.Ping{}, new(trezor.Success)); err != nil {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Derive implements usbwallet.driver, sending a derivation request to the Trezor
|
||||
// and returning the Ethereum address located on that derivation path.
|
||||
func (w *trezorDriver) Derive(path accounts.DerivationPath) (common.Address, error) {
|
||||
return w.trezorDerive(path)
|
||||
}
|
||||
|
||||
// SignTx implements usbwallet.driver, sending the transaction to the Trezor and
|
||||
// waiting for the user to confirm or deny the transaction.
|
||||
func (w *trezorDriver) SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
if w.device == nil {
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
return w.trezorSign(path, tx, chainID)
|
||||
}
|
||||
|
||||
// trezorDerive sends a derivation request to the Trezor device and returns the
|
||||
// Ethereum address located on that path.
|
||||
func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, error) {
|
||||
address := new(trezor.EthereumAddress)
|
||||
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
return common.BytesToAddress(address.GetAddress()), nil
|
||||
}
|
||||
|
||||
// trezorSign sends the transaction to the Trezor wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// Create the transaction initiation message
|
||||
data := tx.Data()
|
||||
length := uint32(len(data))
|
||||
|
||||
request := &trezor.EthereumSignTx{
|
||||
AddressN: derivationPath,
|
||||
Nonce: new(big.Int).SetUint64(tx.Nonce()).Bytes(),
|
||||
GasPrice: tx.GasPrice().Bytes(),
|
||||
GasLimit: new(big.Int).SetUint64(tx.Gas()).Bytes(),
|
||||
Value: tx.Value().Bytes(),
|
||||
DataLength: &length,
|
||||
}
|
||||
if to := tx.To(); to != nil {
|
||||
request.To = (*to)[:] // Non contract deploy, set recipient explicitly
|
||||
}
|
||||
if length > 1024 { // Send the data chunked if that was requested
|
||||
request.DataInitialChunk, data = data[:1024], data[1024:]
|
||||
} else {
|
||||
request.DataInitialChunk, data = data, nil
|
||||
}
|
||||
if chainID != nil { // EIP-155 transaction, set chain ID explicitly (only 32 bit is supported!?)
|
||||
id := uint32(chainID.Int64())
|
||||
request.ChainId = &id
|
||||
}
|
||||
// Send the initiation message and stream content until a signature is returned
|
||||
response := new(trezor.EthereumTxRequest)
|
||||
if _, err := w.trezorExchange(request, response); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
for response.DataLength != nil && int(*response.DataLength) <= len(data) {
|
||||
chunk := data[:*response.DataLength]
|
||||
data = data[*response.DataLength:]
|
||||
|
||||
if _, err := w.trezorExchange(&trezor.EthereumTxAck{DataChunk: chunk}, response); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(response.GetSignatureR()) == 0 || len(response.GetSignatureS()) == 0 || response.GetSignatureV() == 0 {
|
||||
return common.Address{}, nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(append(response.GetSignatureR(), response.GetSignatureS()...), byte(response.GetSignatureV()))
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
return sender, signed, nil
|
||||
}
|
||||
|
||||
// trezorExchange performs a data exchange with the Trezor wallet, sending it a
|
||||
// message and retrieving the response. If multiple responses are possible, the
|
||||
// method will also return the index of the destination object used.
|
||||
func (w *trezorDriver) trezorExchange(req proto.Message, results ...proto.Message) (int, error) {
|
||||
// Construct the original message payload to chunk up
|
||||
data, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
payload := make([]byte, 8+len(data))
|
||||
copy(payload, []byte{0x23, 0x23})
|
||||
binary.BigEndian.PutUint16(payload[2:], trezor.Type(req))
|
||||
binary.BigEndian.PutUint32(payload[4:], uint32(len(data)))
|
||||
copy(payload[8:], data)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
chunk := make([]byte, 64)
|
||||
chunk[0] = 0x3f // Report ID magic number
|
||||
|
||||
for len(payload) > 0 {
|
||||
// Construct the new message to stream, padding with zeroes if needed
|
||||
if len(payload) > 63 {
|
||||
copy(chunk[1:], payload[:63])
|
||||
payload = payload[63:]
|
||||
} else {
|
||||
copy(chunk[1:], payload)
|
||||
copy(chunk[1+len(payload):], make([]byte, 63-len(payload)))
|
||||
payload = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Trezor", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var (
|
||||
kind uint16
|
||||
reply []byte
|
||||
)
|
||||
for {
|
||||
// Read the next chunk from the Trezor wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Trezor", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x3f || (len(reply) == 0 && (chunk[1] != 0x23 || chunk[2] != 0x23)) {
|
||||
return 0, errTrezorReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the reply message type and total message length
|
||||
var payload []byte
|
||||
|
||||
if len(reply) == 0 {
|
||||
kind = binary.BigEndian.Uint16(chunk[3:5])
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint32(chunk[5:9])))
|
||||
payload = chunk[9:]
|
||||
} else {
|
||||
payload = chunk[1:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Try to parse the reply into the requested reply message
|
||||
if kind == uint16(trezor.MessageType_MessageType_Failure) {
|
||||
// Trezor returned a failure, extract and return the message
|
||||
failure := new(trezor.Failure)
|
||||
if err := proto.Unmarshal(reply, failure); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, errors.New("trezor: " + failure.GetMessage())
|
||||
}
|
||||
if kind == uint16(trezor.MessageType_MessageType_ButtonRequest) {
|
||||
// Trezor is waiting for user confirmation, ack and wait for the next message
|
||||
return w.trezorExchange(&trezor.ButtonAck{}, results...)
|
||||
}
|
||||
for i, res := range results {
|
||||
if trezor.Type(res) == kind {
|
||||
return i, proto.Unmarshal(reply, res)
|
||||
}
|
||||
}
|
||||
expected := make([]string, len(results))
|
||||
for i, res := range results {
|
||||
expected[i] = trezor.Name(trezor.Type(res))
|
||||
}
|
||||
return 0, fmt.Errorf("trezor: expected reply types %s, got %s", expected, trezor.Name(kind))
|
||||
}
|
562
accounts/usbwallet/wallet.go
Normal file
562
accounts/usbwallet/wallet.go
Normal file
@ -0,0 +1,562 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package usbwallet implements support for USB hardware wallets.
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/karalabe/hid"
|
||||
)
|
||||
|
||||
// Maximum time between wallet health checks to detect USB unplugs.
|
||||
const heartbeatCycle = time.Second
|
||||
|
||||
// Minimum time to wait between self derivation attempts, even it the user is
|
||||
// requesting accounts like crazy.
|
||||
const selfDeriveThrottling = time.Second
|
||||
|
||||
// driver defines the vendor specific functionality hardware wallets instances
|
||||
// must implement to allow using them with the wallet lifecycle management.
|
||||
type driver interface {
|
||||
// Status returns a textual status to aid the user in the current state of the
|
||||
// wallet. It also returns an error indicating any failure the wallet might have
|
||||
// encountered.
|
||||
Status() (string, error)
|
||||
|
||||
// Open initializes access to a wallet instance. The passphrase parameter may
|
||||
// or may not be used by the implementation of a particular wallet instance.
|
||||
Open(device io.ReadWriter, passphrase string) error
|
||||
|
||||
// Close releases any resources held by an open wallet instance.
|
||||
Close() error
|
||||
|
||||
// Heartbeat performs a sanity check against the hardware wallet to see if it
|
||||
// is still online and healthy.
|
||||
Heartbeat() error
|
||||
|
||||
// Derive sends a derivation request to the USB device and returns the Ethereum
|
||||
// address located on that path.
|
||||
Derive(path accounts.DerivationPath) (common.Address, error)
|
||||
|
||||
// SignTx sends the transaction to the USB device and waits for the user to confirm
|
||||
// or deny the transaction.
|
||||
SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error)
|
||||
}
|
||||
|
||||
// wallet represents the common functionality shared by all USB hardware
|
||||
// wallets to prevent reimplementing the same complex maintenance mechanisms
|
||||
// for different vendors.
|
||||
type wallet struct {
|
||||
hub *Hub // USB hub scanning
|
||||
driver driver // Hardware implementation of the low level device operations
|
||||
url *accounts.URL // Textual URL uniquely identifying this wallet
|
||||
|
||||
info hid.DeviceInfo // Known USB device infos about the wallet
|
||||
device *hid.Device // USB device advertising itself as a hardware wallet
|
||||
|
||||
accounts []accounts.Account // List of derive accounts pinned on the hardware wallet
|
||||
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations
|
||||
|
||||
deriveNextPath accounts.DerivationPath // Next derivation path for account auto-discovery
|
||||
deriveNextAddr common.Address // Next derived account address for auto-discovery
|
||||
deriveChain ethereum.ChainStateReader // Blockchain state reader to discover used account with
|
||||
deriveReq chan chan struct{} // Channel to request a self-derivation on
|
||||
deriveQuit chan chan error // Channel to terminate the self-deriver with
|
||||
|
||||
healthQuit chan chan error
|
||||
|
||||
// Locking a hardware wallet is a bit special. Since hardware devices are lower
|
||||
// performing, any communication with them might take a non negligible amount of
|
||||
// time. Worse still, waiting for user confirmation can take arbitrarily long,
|
||||
// but exclusive communication must be upheld during. Locking the entire wallet
|
||||
// in the mean time however would stall any parts of the system that don't want
|
||||
// to communicate, just read some state (e.g. list the accounts).
|
||||
//
|
||||
// As such, a hardware wallet needs two locks to function correctly. A state
|
||||
// lock can be used to protect the wallet's software-side internal state, which
|
||||
// must not be held exlusively during hardware communication. A communication
|
||||
// lock can be used to achieve exclusive access to the device itself, this one
|
||||
// however should allow "skipping" waiting for operations that might want to
|
||||
// use the device, but can live without too (e.g. account self-derivation).
|
||||
//
|
||||
// Since we have two locks, it's important to know how to properly use them:
|
||||
// - Communication requires the `device` to not change, so obtaining the
|
||||
// commsLock should be done after having a stateLock.
|
||||
// - Communication must not disable read access to the wallet state, so it
|
||||
// must only ever hold a *read* lock to stateLock.
|
||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||
|
||||
log log.Logger // Contextual logger to tag the base with its id
|
||||
}
|
||||
|
||||
// URL implements accounts.Wallet, returning the URL of the USB hardware device.
|
||||
func (w *wallet) URL() accounts.URL {
|
||||
return *w.url // Immutable, no need for a lock
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, returning a custom status message from the
|
||||
// underlying vendor-specific hardware wallet implementation.
|
||||
func (w *wallet) Status() (string, error) {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
status, failure := w.driver.Status()
|
||||
if w.device == nil {
|
||||
return "Closed", failure
|
||||
}
|
||||
return status, failure
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, attempting to open a USB connection to the
|
||||
// hardware wallet.
|
||||
func (w *wallet) Open(passphrase string) error {
|
||||
w.stateLock.Lock() // State lock is enough since there's no connection yet at this point
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
// If the device was already opened once, refuse to try again
|
||||
if w.paths != nil {
|
||||
return accounts.ErrWalletAlreadyOpen
|
||||
}
|
||||
// Make sure the actual device connection is done only once
|
||||
if w.device == nil {
|
||||
device, err := w.info.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.device = device
|
||||
w.commsLock = make(chan struct{}, 1)
|
||||
w.commsLock <- struct{}{} // Enable lock
|
||||
}
|
||||
// Delegate device initialization to the underlying driver
|
||||
if err := w.driver.Open(w.device, passphrase); err != nil {
|
||||
return err
|
||||
}
|
||||
// Connection successful, start life-cycle management
|
||||
w.paths = make(map[common.Address]accounts.DerivationPath)
|
||||
|
||||
w.deriveReq = make(chan chan struct{})
|
||||
w.deriveQuit = make(chan chan error)
|
||||
w.healthQuit = make(chan chan error)
|
||||
|
||||
go w.heartbeat()
|
||||
go w.selfDerive()
|
||||
|
||||
// Notify anyone listening for wallet events that a new device is accessible
|
||||
go w.hub.updateFeed.Send(accounts.WalletEvent{Wallet: w, Kind: accounts.WalletOpened})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// heartbeat is a health check loop for the USB wallets to periodically verify
|
||||
// whether they are still present or if they malfunctioned.
|
||||
func (w *wallet) heartbeat() {
|
||||
w.log.Debug("USB wallet health-check started")
|
||||
defer w.log.Debug("USB wallet health-check stopped")
|
||||
|
||||
// Execute heartbeat checks until termination or error
|
||||
var (
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until termination is requested or the heartbeat cycle arrives
|
||||
select {
|
||||
case errc = <-w.healthQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case <-time.After(heartbeatCycle):
|
||||
// Heartbeat time
|
||||
}
|
||||
// Execute a tiny data exchange to see responsiveness
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil {
|
||||
// Terminated while waiting for the lock
|
||||
w.stateLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
<-w.commsLock // Don't lock state while resolving version
|
||||
err = w.driver.Heartbeat()
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
w.stateLock.Lock() // Lock state to tear the wallet down
|
||||
w.close()
|
||||
w.stateLock.Unlock()
|
||||
}
|
||||
// Ignore non hardware related errors
|
||||
err = nil
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("USB wallet health-check failed", "err", err)
|
||||
errc = <-w.healthQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Close implements accounts.Wallet, closing the USB connection to the device.
|
||||
func (w *wallet) Close() error {
|
||||
// Ensure the wallet was opened
|
||||
w.stateLock.RLock()
|
||||
hQuit, dQuit := w.healthQuit, w.deriveQuit
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Terminate the health checks
|
||||
var herr error
|
||||
if hQuit != nil {
|
||||
errc := make(chan error)
|
||||
hQuit <- errc
|
||||
herr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the self-derivations
|
||||
var derr error
|
||||
if dQuit != nil {
|
||||
errc := make(chan error)
|
||||
dQuit <- errc
|
||||
derr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the device connection
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.healthQuit = nil
|
||||
w.deriveQuit = nil
|
||||
w.deriveReq = nil
|
||||
|
||||
if err := w.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
return derr
|
||||
}
|
||||
|
||||
// close is the internal wallet closer that terminates the USB connection and
|
||||
// resets all the fields to their defaults.
|
||||
//
|
||||
// Note, close assumes the state lock is held!
|
||||
func (w *wallet) close() error {
|
||||
// Allow duplicate closes, especially for health-check failures
|
||||
if w.device == nil {
|
||||
return nil
|
||||
}
|
||||
// Close the device, clear everything, then return
|
||||
w.device.Close()
|
||||
w.device = nil
|
||||
|
||||
w.accounts, w.paths = nil, nil
|
||||
w.driver.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
|
||||
// the USB hardware wallet. If self-derivation was enabled, the account list is
|
||||
// periodically expanded based on current chain state.
|
||||
func (w *wallet) Accounts() []accounts.Account {
|
||||
// Attempt self-derivation if it's running
|
||||
reqc := make(chan struct{}, 1)
|
||||
select {
|
||||
case w.deriveReq <- reqc:
|
||||
// Self-derivation request accepted, wait for it
|
||||
<-reqc
|
||||
default:
|
||||
// Self-derivation offline, throttled or busy, skip
|
||||
}
|
||||
// Return whatever account list we ended up with
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
cpy := make([]accounts.Account, len(w.accounts))
|
||||
copy(cpy, w.accounts)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// selfDerive is an account derivation loop that upon request attempts to find
|
||||
// new non-zero accounts.
|
||||
func (w *wallet) selfDerive() {
|
||||
w.log.Debug("USB wallet self-derivation started")
|
||||
defer w.log.Debug("USB wallet self-derivation stopped")
|
||||
|
||||
// Execute self-derivations until termination or error
|
||||
var (
|
||||
reqc chan struct{}
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until either derivation or termination is requested
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case reqc = <-w.deriveReq:
|
||||
// Account discovery requested
|
||||
}
|
||||
// Derivation needs a chain and device access, skip if either unavailable
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil || w.deriveChain == nil {
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case <-w.commsLock:
|
||||
default:
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
// Device lock obtained, derive the next batch of accounts
|
||||
var (
|
||||
accs []accounts.Account
|
||||
paths []accounts.DerivationPath
|
||||
|
||||
nextAddr = w.deriveNextAddr
|
||||
nextPath = w.deriveNextPath
|
||||
|
||||
context = context.Background()
|
||||
)
|
||||
for empty := false; !empty; {
|
||||
// Retrieve the next derived Ethereum account
|
||||
if nextAddr == (common.Address{}) {
|
||||
if nextAddr, err = w.driver.Derive(nextPath); err != nil {
|
||||
w.log.Warn("USB wallet account derivation failed", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check the account's status against the current chain state
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
)
|
||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("USB wallet balance retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("USB wallet nonce retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||
if balance.Sign() == 0 && nonce == 0 {
|
||||
empty = true
|
||||
}
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
path := make(accounts.DerivationPath, len(nextPath))
|
||||
copy(path[:], nextPath[:])
|
||||
paths = append(paths, path)
|
||||
|
||||
account := accounts.Account{
|
||||
Address: nextAddr,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
accs = append(accs, account)
|
||||
|
||||
// Display a log message to the user for new (or previously empty accounts)
|
||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||
w.log.Info("USB wallet discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
|
||||
}
|
||||
// Fetch the next potential account
|
||||
if !empty {
|
||||
nextAddr = common.Address{}
|
||||
nextPath[len(nextPath)-1]++
|
||||
}
|
||||
}
|
||||
// Self derivation complete, release device lock
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Insert any accounts successfully derived
|
||||
w.stateLock.Lock()
|
||||
for i := 0; i < len(accs); i++ {
|
||||
if _, ok := w.paths[accs[i].Address]; !ok {
|
||||
w.accounts = append(w.accounts, accs[i])
|
||||
w.paths[accs[i].Address] = paths[i]
|
||||
}
|
||||
}
|
||||
// Shift the self-derivation forward
|
||||
// TODO(karalabe): don't overwrite changes from wallet.SelfDerive
|
||||
w.deriveNextAddr = nextAddr
|
||||
w.deriveNextPath = nextPath
|
||||
w.stateLock.Unlock()
|
||||
|
||||
// Notify the user of termination and loop after a bit of time (to avoid trashing)
|
||||
reqc <- struct{}{}
|
||||
if err == nil {
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested, abort
|
||||
case <-time.After(selfDeriveThrottling):
|
||||
// Waited enough, willing to self-derive again
|
||||
}
|
||||
}
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("USB wallet self-derivation failed", "err", err)
|
||||
errc = <-w.deriveQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Contains implements accounts.Wallet, returning whether a particular account is
|
||||
// or is not pinned into this wallet instance. Although we could attempt to resolve
|
||||
// unpinned accounts, that would be an non-negligible hardware operation.
|
||||
func (w *wallet) Contains(account accounts.Account) bool {
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
_, exists := w.paths[account.Address]
|
||||
return exists
|
||||
}
|
||||
|
||||
// Derive implements accounts.Wallet, deriving a new account at the specific
|
||||
// derivation path. If pin is set to true, the account will be added to the list
|
||||
// of tracked accounts.
|
||||
func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
|
||||
// Try to derive the actual account and update its URL if successful
|
||||
w.stateLock.RLock() // Avoid device disappearing during derivation
|
||||
|
||||
if w.device == nil {
|
||||
w.stateLock.RUnlock()
|
||||
return accounts.Account{}, accounts.ErrWalletClosed
|
||||
}
|
||||
<-w.commsLock // Avoid concurrent hardware access
|
||||
address, err := w.driver.Derive(path)
|
||||
w.commsLock <- struct{}{}
|
||||
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// If an error occurred or no pinning was requested, return
|
||||
if err != nil {
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
account := accounts.Account{
|
||||
Address: address,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
if !pin {
|
||||
return account, nil
|
||||
}
|
||||
// Pinning needs to modify the state
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
if _, ok := w.paths[address]; !ok {
|
||||
w.accounts = append(w.accounts, account)
|
||||
w.paths[address] = path
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// SelfDerive implements accounts.Wallet, trying to discover accounts that the
|
||||
// user used previously (based on the chain state), but ones that he/she did not
|
||||
// explicitly pin to the wallet manually. To avoid chain head monitoring, self
|
||||
// derivation only runs during account listing (and even then throttled).
|
||||
func (w *wallet) SelfDerive(base accounts.DerivationPath, chain ethereum.ChainStateReader) {
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.deriveNextPath = make(accounts.DerivationPath, len(base))
|
||||
copy(w.deriveNextPath[:], base[:])
|
||||
|
||||
w.deriveNextAddr = common.Address{}
|
||||
w.deriveChain = chain
|
||||
}
|
||||
|
||||
// SignHash implements accounts.Wallet, however signing arbitrary data is not
|
||||
// supported for hardware wallets, so this method will always return an error.
|
||||
func (w *wallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTx implements accounts.Wallet. It sends the transaction over to the Ledger
|
||||
// wallet to request a confirmation from the user. It returns either the signed
|
||||
// transaction or a failure if the user denied the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
w.stateLock.RLock() // Comms have own mutex, this is for the state fields
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
// If the wallet is closed, abort
|
||||
if w.device == nil {
|
||||
return nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Make sure the requested account is contained within
|
||||
path, ok := w.paths[account.Address]
|
||||
if !ok {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
<-w.commsLock
|
||||
defer func() { w.commsLock <- struct{}{} }()
|
||||
|
||||
// Ensure the device isn't screwed with while user confirmation is pending
|
||||
// TODO(karalabe): remove if hotplug lands on Windows
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend++
|
||||
w.hub.commsLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend--
|
||||
w.hub.commsLock.Unlock()
|
||||
}()
|
||||
// Sign the transaction and verify the sender to avoid hardware fault surprises
|
||||
sender, signed, err := w.driver.SignTx(path, tx, chainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sender != account.Address {
|
||||
return nil, fmt.Errorf("signer mismatch: expected %s, got %s", account.Address.Hex(), sender.Hex())
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
|
||||
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
|
||||
// data is not supported for Ledger wallets, so this method will always return
|
||||
// an error.
|
||||
func (w *wallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
||||
return w.SignHash(account, hash)
|
||||
}
|
||||
|
||||
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
|
||||
// transaction with the given account using passphrase as extra authentication.
|
||||
// Since USB wallets don't rely on passphrases, these are silently ignored.
|
||||
func (w *wallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
return w.SignTx(account, tx, chainID)
|
||||
}
|
@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.8.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.2.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.9.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
561
bmt/bmt.go
Normal file
561
bmt/bmt.go
Normal file
@ -0,0 +1,561 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
|
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution
|
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for righmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
depth: depth(p.SegmentCount),
|
||||
size: p.SegmentSize,
|
||||
blocksize: p.SegmentSize,
|
||||
count: p.SegmentCount,
|
||||
result: make(chan []byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct {
|
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte
|
||||
}
|
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node {
|
||||
return &Node{
|
||||
parent: parent,
|
||||
level: level,
|
||||
index: index,
|
||||
initial: index == 0,
|
||||
isLeft: index%2 == 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *Tree
|
||||
hasher BaseHasher
|
||||
SegmentSize int
|
||||
SegmentCount int
|
||||
Capacity int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
|
||||
return &TreePool{
|
||||
c: make(chan *Tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: hasher().Size(),
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool uptil it has no more than n resources
|
||||
func (self *TreePool) Drain(n int) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
for len(self.c) > n {
|
||||
<-self.c
|
||||
self.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (self *TreePool) Reserve() *Tree {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
var t *Tree
|
||||
if self.count == self.Capacity {
|
||||
return <-self.c
|
||||
}
|
||||
select {
|
||||
case t = <-self.c:
|
||||
default:
|
||||
t = NewTree(self.hasher, self.SegmentSize, self.SegmentCount)
|
||||
self.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (self *TreePool) Release(t *Tree) {
|
||||
self.c <- t // can never fail but...
|
||||
}
|
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct {
|
||||
leaves []*Node
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (self *Tree) Draw(hash []byte, d int) string {
|
||||
var left, right []string
|
||||
var anc []*Node
|
||||
for i, n := range self.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = self.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*Node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
|
||||
n := NewNode(0, 0, nil)
|
||||
n.root = true
|
||||
prevlevel := []*Node{n}
|
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1
|
||||
count := 2
|
||||
for d := 1; d <= depth(segmentCount); d++ {
|
||||
nodes := make([]*Node, count)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
parent := prevlevel[i/2]
|
||||
t := NewNode(level, i, parent)
|
||||
nodes[i] = t
|
||||
}
|
||||
prevlevel = nodes
|
||||
level++
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{
|
||||
leaves: prevlevel,
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (self *Hasher) Size() int {
|
||||
return self.size
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (self *Hasher) BlockSize() int {
|
||||
return self.blocksize
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (self *Hasher) Sum(b []byte) (r []byte) {
|
||||
t := self.bmt
|
||||
i := self.cur
|
||||
n := t.leaves[i]
|
||||
j := i
|
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(self.segment) > self.size && i > 0 && n.parent != nil {
|
||||
n = n.parent
|
||||
} else {
|
||||
i *= 2
|
||||
}
|
||||
d := self.finalise(n, i)
|
||||
self.writeSegment(j, self.segment, d)
|
||||
c := <-self.result
|
||||
self.releaseTree()
|
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if self.blockLength == nil {
|
||||
return c
|
||||
}
|
||||
res := self.pool.hasher()
|
||||
res.Reset()
|
||||
res.Write(self.blockLength)
|
||||
res.Write(c)
|
||||
return res.Sum(nil)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (self *Hasher) Hash() []byte {
|
||||
return <-self.result
|
||||
}
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (self *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
s := self.segment
|
||||
i := self.cur
|
||||
count := (self.count + 1) / 2
|
||||
need := self.count*self.size - self.cur*2*self.size
|
||||
size := self.size
|
||||
if need > size {
|
||||
size *= 2
|
||||
}
|
||||
if l < need {
|
||||
need = l
|
||||
}
|
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s)
|
||||
if need < rest {
|
||||
rest = need
|
||||
}
|
||||
s = append(s, b[:rest]...)
|
||||
need -= rest
|
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 {
|
||||
// push all finished chunks we read
|
||||
self.writeSegment(i, s, self.depth)
|
||||
need -= size
|
||||
if need < 0 {
|
||||
size += need
|
||||
}
|
||||
s = b[rest : rest+size]
|
||||
rest += size
|
||||
i++
|
||||
}
|
||||
self.segment = s
|
||||
self.cur = i
|
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
|
||||
bufsize := self.size*self.count - self.size*self.cur - len(self.segment)
|
||||
buf := make([]byte, bufsize)
|
||||
var read int
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Read(buf)
|
||||
read += n
|
||||
if err == io.EOF || read == len(buf) {
|
||||
hash := self.Sum(buf[:n])
|
||||
if read == len(buf) {
|
||||
err = NewEOC(hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
n, err = self.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return int64(read), err
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (self *Hasher) Reset() {
|
||||
self.getTree()
|
||||
self.blockLength = nil
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the legth of the data subsumed under the hash
|
||||
func (self *Hasher) ResetWithLength(l []byte) {
|
||||
self.Reset()
|
||||
self.blockLength = l
|
||||
|
||||
}
|
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (self *Hasher) releaseTree() {
|
||||
if self.bmt != nil {
|
||||
n := self.bmt.leaves[self.cur]
|
||||
for ; n != nil; n = n.parent {
|
||||
n.unbalanced = false
|
||||
if n.parent != nil {
|
||||
n.root = false
|
||||
}
|
||||
}
|
||||
self.pool.Release(self.bmt)
|
||||
self.bmt = nil
|
||||
|
||||
}
|
||||
self.cur = 0
|
||||
self.segment = nil
|
||||
}
|
||||
|
||||
func (self *Hasher) writeSegment(i int, s []byte, d int) {
|
||||
h := self.pool.hasher()
|
||||
n := self.bmt.leaves[i]
|
||||
|
||||
if len(s) > self.size && n.parent != nil {
|
||||
go func() {
|
||||
h.Reset()
|
||||
h.Write(s)
|
||||
s = h.Sum(nil)
|
||||
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
self.run(n.parent, h, d, n.index, s)
|
||||
}()
|
||||
return
|
||||
}
|
||||
go self.run(n, h, d, i*2, s)
|
||||
}
|
||||
|
||||
func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
if !n.unbalanced && n.toggle() {
|
||||
return
|
||||
}
|
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
|
||||
h.Reset()
|
||||
h.Write(n.left)
|
||||
h.Write(n.right)
|
||||
s = h.Sum(nil)
|
||||
|
||||
} else {
|
||||
s = append(n.left, n.right...)
|
||||
}
|
||||
|
||||
self.hash = s
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (self *Hasher) getTree() *Tree {
|
||||
if self.bmt != nil {
|
||||
return self.bmt
|
||||
}
|
||||
t := self.pool.Reserve()
|
||||
self.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (self *Node) toggle() bool {
|
||||
return atomic.AddInt32(&self.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
func depth(n int) (d int) {
|
||||
for l := (n - 1) / 2; l > 0; l /= 2 {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (self *Hasher) finalise(n *Node, i int) (d int) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toogle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft
|
||||
n.right = nil
|
||||
if n.initial {
|
||||
n.root = true
|
||||
return d
|
||||
}
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
d++
|
||||
}
|
||||
}
|
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct {
|
||||
Hash []byte // read the hash of the chunk off the error
|
||||
}
|
||||
|
||||
// Error returns the error string
|
||||
func (self *EOC) Error() string {
|
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", self.Hash)
|
||||
}
|
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC {
|
||||
return &EOC{hash}
|
||||
}
|
85
bmt/bmt_r.go
Normal file
85
bmt/bmt_r.go
Normal file
@ -0,0 +1,85 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
span int
|
||||
section int
|
||||
cap int
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
maxsize := hashsize * count
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
if c > 2 {
|
||||
c /= 2
|
||||
}
|
||||
return &RefHasher{
|
||||
section: 2 * hashsize,
|
||||
span: c * hashsize,
|
||||
cap: maxsize,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte {
|
||||
if len(d) > rh.cap {
|
||||
d = d[:rh.cap]
|
||||
}
|
||||
|
||||
return rh.hash(d, rh.span)
|
||||
}
|
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte {
|
||||
l := len(d)
|
||||
left := d
|
||||
var right []byte
|
||||
if l > rh.section {
|
||||
for ; s >= l; s /= 2 {
|
||||
}
|
||||
left = rh.hash(d[:s], s)
|
||||
right = d[s:]
|
||||
if l-s > rh.section/2 {
|
||||
right = rh.hash(right, s)
|
||||
}
|
||||
}
|
||||
defer rh.h.Reset()
|
||||
rh.h.Write(left)
|
||||
rh.h.Write(right)
|
||||
h := rh.h.Sum(nil)
|
||||
return h
|
||||
}
|
481
bmt/bmt_test.go
Normal file
481
bmt/bmt_test.go
Normal file
@ -0,0 +1,481 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxproccnt = 8
|
||||
)
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) {
|
||||
hashFunc := sha3.NewKeccak256
|
||||
|
||||
sha3 := func(data ...[]byte) []byte {
|
||||
h := hashFunc()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct {
|
||||
from int64
|
||||
to int64
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 0,
|
||||
to: 64,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 65,
|
||||
to: 96,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), data[64:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 97,
|
||||
to: 128,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), sha3(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 129,
|
||||
to: 160,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 161,
|
||||
to: 192,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 193,
|
||||
to: 224,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 225,
|
||||
to: 256,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for _, x := range tests {
|
||||
for length := x.from; length <= x.to; length++ {
|
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(hashFunc, 128).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDataReader(l int) (r io.Reader) {
|
||||
return io.LimitReader(crand.Reader, int64(l))
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
err := testHasher(testBaseHasher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
|
||||
tdata := testDataReader(4128)
|
||||
data := make([]byte, 4128)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
max := count * size
|
||||
incr := 1
|
||||
for n := 0; n <= max+incr; n += incr {
|
||||
err = f(hasher, data, n, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
}
|
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) {
|
||||
testHasherReuse(maxproccnt, t)
|
||||
}
|
||||
|
||||
func testHasherReuse(i int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, i)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherConcurrency(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, maxproccnt)
|
||||
defer pool.Drain(0)
|
||||
wg := sync.WaitGroup{}
|
||||
cycles := 100
|
||||
wg.Add(maxproccnt * cycles)
|
||||
errc := make(chan error)
|
||||
|
||||
for p := 0; p < maxproccnt; p++ {
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
err = fmt.Errorf("timed out")
|
||||
case err = <-errc:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
|
||||
pool := NewTreePool(hasher, count, 1)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
return testHasherCorrectness(bmt, hasher, d, n, count)
|
||||
}
|
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(data)
|
||||
timeout := time.NewTimer(time.Second)
|
||||
c := make(chan error)
|
||||
|
||||
go func() {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
got := bmt.Sum(nil)
|
||||
if !bytes.Equal(got, exp) {
|
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
err = fmt.Errorf("BMT hash calculation timed out")
|
||||
case err = <-c:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
|
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
|
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
|
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
|
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
|
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
|
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
|
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
|
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
|
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
|
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
|
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
|
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
|
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
|
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
|
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
tdata := testDataReader(64)
|
||||
data := make([]byte, 64)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hasher().Size() + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(maxproccnt)
|
||||
var i int32
|
||||
for j := 0; j < maxproccnt; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasher(n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
size := 1
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, size)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 200
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
bmt := New(pool)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
@ -21,18 +21,18 @@ variable which Travis CI makes available to certain builds.
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||
golang-1.8, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
golang-1.9, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
|
||||
## Building Packages Locally (for testing)
|
||||
|
||||
You need to run Ubuntu to do test packaging.
|
||||
|
||||
Add the gophers PPA and install Go 1.8 and Debian packaging tools:
|
||||
Add the gophers PPA and install Go 1.9 and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.8 devscripts debhelper
|
||||
$ sudo apt-get install build-essential golang-1.9 devscripts debhelper
|
||||
|
||||
Create the source packages:
|
||||
|
||||
|
109
build/ci.go
109
build/ci.go
@ -19,12 +19,13 @@
|
||||
/*
|
||||
The ci command is called from Continuous Integration scripts.
|
||||
|
||||
Usage: go run ci.go <command> <command flags/arguments>
|
||||
Usage: go run build/ci.go <command> <command flags/arguments>
|
||||
|
||||
Available commands are:
|
||||
|
||||
install [ -arch architecture ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ -misspell ] [ packages... ] -- runs the tests
|
||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
lint -- runs certain pre-selected linters
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts
|
||||
importkeys -- imports signing keys from env
|
||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||
@ -119,7 +120,9 @@ var (
|
||||
// Distros for which packages are created.
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "yakkety", "zesty"}
|
||||
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
||||
// Note: zesty is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "artful", "bionic"}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@ -145,6 +148,8 @@ func main() {
|
||||
doInstall(os.Args[2:])
|
||||
case "test":
|
||||
doTest(os.Args[2:])
|
||||
case "lint":
|
||||
doLint(os.Args[2:])
|
||||
case "archive":
|
||||
doArchive(os.Args[2:])
|
||||
case "debsrc":
|
||||
@ -169,17 +174,24 @@ func main() {
|
||||
func doInstall(cmdline []string) {
|
||||
var (
|
||||
arch = flag.String("arch", "", "Architecture to cross build for")
|
||||
cc = flag.String("cc", "", "C compiler to cross build with")
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
env := build.Env()
|
||||
|
||||
// Check Go version. People regularly open issues about compilation
|
||||
// failure with outdated Go. This should save them the trouble.
|
||||
if runtime.Version() < "go1.7" && !strings.Contains(runtime.Version(), "devel") {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.7 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
if !strings.Contains(runtime.Version(), "devel") {
|
||||
// Figure out the minor version number since we can't textually compare (1.10 < 1.7)
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if minor < 7 {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.7 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// Compile packages given as arguments, or everything if there are no arguments.
|
||||
packages := []string{"./..."}
|
||||
@ -195,7 +207,7 @@ func doInstall(cmdline []string) {
|
||||
build.MustRun(goinstall)
|
||||
return
|
||||
}
|
||||
// If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any prvious builds
|
||||
// If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any previous builds
|
||||
if *arch == "arm" {
|
||||
os.RemoveAll(filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_arm"))
|
||||
for _, path := range filepath.SplitList(build.GOPATH()) {
|
||||
@ -203,7 +215,7 @@ func doInstall(cmdline []string) {
|
||||
}
|
||||
}
|
||||
// Seems we are cross compiling, work around forbidden GOBIN
|
||||
goinstall := goToolArch(*arch, "install", buildFlags(env)...)
|
||||
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...)
|
||||
goinstall.Args = append(goinstall.Args, packages...)
|
||||
@ -217,7 +229,7 @@ func doInstall(cmdline []string) {
|
||||
}
|
||||
for name := range pkgs {
|
||||
if name == "main" {
|
||||
gobuild := goToolArch(*arch, "build", buildFlags(env)...)
|
||||
gobuild := goToolArch(*arch, *cc, "build", buildFlags(env)...)
|
||||
gobuild.Args = append(gobuild.Args, "-v")
|
||||
gobuild.Args = append(gobuild.Args, []string{"-o", executablePath(cmd.Name())}...)
|
||||
gobuild.Args = append(gobuild.Args, "."+string(filepath.Separator)+filepath.Join("cmd", cmd.Name()))
|
||||
@ -245,18 +257,18 @@ func buildFlags(env build.Environment) (flags []string) {
|
||||
}
|
||||
|
||||
func goTool(subcmd string, args ...string) *exec.Cmd {
|
||||
return goToolArch(runtime.GOARCH, subcmd, args...)
|
||||
return goToolArch(runtime.GOARCH, os.Getenv("CC"), subcmd, args...)
|
||||
}
|
||||
|
||||
func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
|
||||
gocmd := filepath.Join(runtime.GOROOT(), "bin", "go")
|
||||
cmd := exec.Command(gocmd, subcmd)
|
||||
cmd.Args = append(cmd.Args, args...)
|
||||
|
||||
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
|
||||
cmd := build.GoTool(subcmd, args...)
|
||||
if subcmd == "build" || subcmd == "install" || subcmd == "test" {
|
||||
// Go CGO has a Windows linker error prior to 1.8 (https://github.com/golang/go/issues/8756).
|
||||
// Work around issue by allowing multiple definitions for <1.8 builds.
|
||||
if runtime.GOOS == "windows" && runtime.Version() < "go1.8" {
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if runtime.GOOS == "windows" && minor < 8 {
|
||||
cmd.Args = append(cmd.Args, []string{"-ldflags", "-extldflags -Wl,--allow-multiple-definition"}...)
|
||||
}
|
||||
}
|
||||
@ -267,6 +279,9 @@ func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
|
||||
cmd.Env = append(cmd.Env, "CGO_ENABLED=1")
|
||||
cmd.Env = append(cmd.Env, "GOARCH="+arch)
|
||||
}
|
||||
if cc != "" {
|
||||
cmd.Env = append(cmd.Env, "CC="+cc)
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||
continue
|
||||
@ -282,7 +297,6 @@ func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
|
||||
|
||||
func doTest(cmdline []string) {
|
||||
var (
|
||||
misspell = flag.Bool("misspell", false, "Whether to run the spell checker")
|
||||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
@ -296,10 +310,7 @@ func doTest(cmdline []string) {
|
||||
|
||||
// Run analysis tools before the tests.
|
||||
build.MustRun(goTool("vet", packages...))
|
||||
if *misspell {
|
||||
// TODO(karalabe): Reenable after false detection is fixed: https://github.com/client9/misspell/issues/105
|
||||
// spellcheck(packages)
|
||||
}
|
||||
|
||||
// Run the actual tests.
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
// Test a single package at a time. CI builders are slow
|
||||
@ -308,35 +319,39 @@ func doTest(cmdline []string) {
|
||||
if *coverage {
|
||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||
}
|
||||
|
||||
gotest.Args = append(gotest.Args, packages...)
|
||||
build.MustRun(gotest)
|
||||
}
|
||||
|
||||
// spellcheck runs the client9/misspell spellchecker package on all Go, Cgo and
|
||||
// test files in the requested packages.
|
||||
func spellcheck(packages []string) {
|
||||
// Ensure the spellchecker is available
|
||||
build.MustRun(goTool("get", "github.com/client9/misspell/cmd/misspell"))
|
||||
// runs gometalinter on requested packages
|
||||
func doLint(cmdline []string) {
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
// Windows chokes on long argument lists, check packages individually
|
||||
for _, pkg := range packages {
|
||||
// The spell checker doesn't work on packages, gather all .go files for it
|
||||
out, err := goTool("list", "-f", "{{.Dir}}{{range .GoFiles}}\n{{.}}{{end}}{{range .CgoFiles}}\n{{.}}{{end}}{{range .TestGoFiles}}\n{{.}}{{end}}", pkg).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("source file listing failed: %v\n%s", err, string(out))
|
||||
}
|
||||
// Retrieve the folder and assemble the source list
|
||||
lines := strings.Split(string(out), "\n")
|
||||
root := lines[0]
|
||||
packages := []string{"./..."}
|
||||
if len(flag.CommandLine.Args()) > 0 {
|
||||
packages = flag.CommandLine.Args()
|
||||
}
|
||||
// Get metalinter and install all supported linters
|
||||
build.MustRun(goTool("get", "gopkg.in/alecthomas/gometalinter.v2"))
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), "--install")
|
||||
|
||||
sources := make([]string, 0, len(lines)-1)
|
||||
for _, line := range lines[1:] {
|
||||
if line = strings.TrimSpace(line); line != "" {
|
||||
sources = append(sources, filepath.Join(root, line))
|
||||
}
|
||||
}
|
||||
// Run the spell checker for this particular package
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "misspell"), append([]string{"-error"}, sources...)...)
|
||||
// Run fast linters batched together
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--disable-all",
|
||||
"--enable=vet",
|
||||
"--enable=gofmt",
|
||||
"--enable=misspell",
|
||||
"--enable=goconst",
|
||||
"--min-occurrences=6", // for goconst
|
||||
}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...)
|
||||
|
||||
// Run slow linters one by one
|
||||
for _, linter := range []string{"unconvert", "gosimple"} {
|
||||
configs = []string{"--vendor", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.8
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.9
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
|
@ -5,7 +5,7 @@
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.8/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
build/env.sh /usr/lib/go-1.9/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
|
@ -55,10 +55,9 @@ var (
|
||||
"crypto/sha3/",
|
||||
"internal/jsre/deps",
|
||||
"log/",
|
||||
"common/bitutil/bitutil",
|
||||
// don't license generated files
|
||||
"contracts/chequebook/contract/",
|
||||
"contracts/ens/contract/",
|
||||
"contracts/release/contract.go",
|
||||
"contracts/chequebook/contract/code.go",
|
||||
}
|
||||
|
||||
// paths with this prefix are licensed as GPL. all other files are LGPL.
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@ -96,12 +97,37 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", *listenAddr)
|
||||
if err != nil {
|
||||
utils.Fatalf("-ResolveUDPAddr: %v", err)
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
utils.Fatalf("-ListenUDP: %v", err)
|
||||
}
|
||||
|
||||
realaddr := conn.LocalAddr().(*net.UDPAddr)
|
||||
if natm != nil {
|
||||
if !realaddr.IP.IsLoopback() {
|
||||
go nat.Map(natm, nil, "udp", realaddr.Port, realaddr.Port, "ethereum discovery")
|
||||
}
|
||||
// TODO: react to external IP changes over time.
|
||||
if ext, err := natm.ExternalIP(); err == nil {
|
||||
realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port}
|
||||
}
|
||||
}
|
||||
|
||||
if *runv5 {
|
||||
if _, err := discv5.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil {
|
||||
if _, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", restrictList); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil {
|
||||
cfg := discover.Config{
|
||||
PrivateKey: nodeKey,
|
||||
AnnounceAddr: realaddr,
|
||||
NetRestrict: restrictList,
|
||||
}
|
||||
if _, err := discover.ListenUDP(conn, cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
41
cmd/ethkey/README.md
Normal file
41
cmd/ethkey/README.md
Normal file
@ -0,0 +1,41 @@
|
||||
ethkey
|
||||
======
|
||||
|
||||
ethkey is a simple command-line tool for working with Ethereum keyfiles.
|
||||
|
||||
|
||||
# Usage
|
||||
|
||||
### `ethkey generate`
|
||||
|
||||
Generate a new keyfile.
|
||||
If you want to use an existing private key to use in the keyfile, it can be
|
||||
specified by setting `--privatekey` with the location of the file containing the
|
||||
private key.
|
||||
|
||||
|
||||
### `ethkey inspect <keyfile>`
|
||||
|
||||
Print various information about the keyfile.
|
||||
Private key information can be printed by using the `--private` flag;
|
||||
make sure to use this feature with great caution!
|
||||
|
||||
|
||||
### `ethkey sign <keyfile> <message/file>`
|
||||
|
||||
Sign the message with a keyfile.
|
||||
It is possible to refer to a file containing the message.
|
||||
|
||||
|
||||
### `ethkey verify <address> <signature> <message/file>`
|
||||
|
||||
Verify the signature of the message.
|
||||
It is possible to refer to a file containing the message.
|
||||
|
||||
|
||||
## Passphrases
|
||||
|
||||
For every command that uses a keyfile, you will be prompted to provide the
|
||||
passphrase for decrypting the keyfile. To avoid this message, it is possible
|
||||
to pass the passphrase by using the `--passphrase` flag pointing to a file that
|
||||
contains the passphrase.
|
118
cmd/ethkey/generate.go
Normal file
118
cmd/ethkey/generate.go
Normal file
@ -0,0 +1,118 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/pborman/uuid"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
type outputGenerate struct {
|
||||
Address string
|
||||
AddressEIP55 string
|
||||
}
|
||||
|
||||
var commandGenerate = cli.Command{
|
||||
Name: "generate",
|
||||
Usage: "generate new keyfile",
|
||||
ArgsUsage: "[ <keyfile> ]",
|
||||
Description: `
|
||||
Generate a new keyfile.
|
||||
|
||||
If you want to encrypt an existing private key, it can be specified by setting
|
||||
--privatekey with the location of the file containing the private key.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
jsonFlag,
|
||||
cli.StringFlag{
|
||||
Name: "privatekey",
|
||||
Usage: "file containing a raw private key to encrypt",
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
// Check if keyfile path given and make sure it doesn't already exist.
|
||||
keyfilepath := ctx.Args().First()
|
||||
if keyfilepath == "" {
|
||||
keyfilepath = defaultKeyfileName
|
||||
}
|
||||
if _, err := os.Stat(keyfilepath); err == nil {
|
||||
utils.Fatalf("Keyfile already exists at %s.", keyfilepath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
utils.Fatalf("Error checking if keyfile exists: %v", err)
|
||||
}
|
||||
|
||||
var privateKey *ecdsa.PrivateKey
|
||||
var err error
|
||||
if file := ctx.String("privatekey"); file != "" {
|
||||
// Load private key from file.
|
||||
privateKey, err = crypto.LoadECDSA(file)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't load private key: %v", err)
|
||||
}
|
||||
} else {
|
||||
// If not loaded, generate random.
|
||||
privateKey, err = crypto.GenerateKey()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate random private key: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the keyfile object with a random UUID.
|
||||
id := uuid.NewRandom()
|
||||
key := &keystore.Key{
|
||||
Id: id,
|
||||
Address: crypto.PubkeyToAddress(privateKey.PublicKey),
|
||||
PrivateKey: privateKey,
|
||||
}
|
||||
|
||||
// Encrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, true)
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting key: %v", err)
|
||||
}
|
||||
|
||||
// Store the file to disk.
|
||||
if err := os.MkdirAll(filepath.Dir(keyfilepath), 0700); err != nil {
|
||||
utils.Fatalf("Could not create directory %s", filepath.Dir(keyfilepath))
|
||||
}
|
||||
if err := ioutil.WriteFile(keyfilepath, keyjson, 0600); err != nil {
|
||||
utils.Fatalf("Failed to write keyfile to %s: %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Output some information.
|
||||
out := outputGenerate{
|
||||
Address: key.Address.Hex(),
|
||||
}
|
||||
if ctx.Bool(jsonFlag.Name) {
|
||||
mustPrintJSON(out)
|
||||
} else {
|
||||
fmt.Println("Address:", out.Address)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
91
cmd/ethkey/inspect.go
Normal file
91
cmd/ethkey/inspect.go
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
type outputInspect struct {
|
||||
Address string
|
||||
PublicKey string
|
||||
PrivateKey string
|
||||
}
|
||||
|
||||
var commandInspect = cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "inspect a keyfile",
|
||||
ArgsUsage: "<keyfile>",
|
||||
Description: `
|
||||
Print various information about the keyfile.
|
||||
|
||||
Private key information can be printed by using the --private flag;
|
||||
make sure to use this feature with great caution!`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
jsonFlag,
|
||||
cli.BoolFlag{
|
||||
Name: "private",
|
||||
Usage: "include the private key in the output",
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
keyfilepath := ctx.Args().First()
|
||||
|
||||
// Read key from file.
|
||||
keyjson, err := ioutil.ReadFile(keyfilepath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
}
|
||||
|
||||
// Output all relevant information we can retrieve.
|
||||
showPrivate := ctx.Bool("private")
|
||||
out := outputInspect{
|
||||
Address: key.Address.Hex(),
|
||||
PublicKey: hex.EncodeToString(
|
||||
crypto.FromECDSAPub(&key.PrivateKey.PublicKey)),
|
||||
}
|
||||
if showPrivate {
|
||||
out.PrivateKey = hex.EncodeToString(crypto.FromECDSA(key.PrivateKey))
|
||||
}
|
||||
|
||||
if ctx.Bool(jsonFlag.Name) {
|
||||
mustPrintJSON(out)
|
||||
} else {
|
||||
fmt.Println("Address: ", out.Address)
|
||||
fmt.Println("Public key: ", out.PublicKey)
|
||||
if showPrivate {
|
||||
fmt.Println("Private key: ", out.PrivateKey)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
67
cmd/ethkey/main.go
Normal file
67
cmd/ethkey/main.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultKeyfileName = "keyfile.json"
|
||||
)
|
||||
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
var gitCommit = ""
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
app = utils.NewApp(gitCommit, "an Ethereum key manager")
|
||||
app.Commands = []cli.Command{
|
||||
commandGenerate,
|
||||
commandInspect,
|
||||
commandSignMessage,
|
||||
commandVerifyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
// Commonly used command line flags.
|
||||
var (
|
||||
passphraseFlag = cli.StringFlag{
|
||||
Name: "passwordfile",
|
||||
Usage: "the file that contains the passphrase for the keyfile",
|
||||
}
|
||||
jsonFlag = cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output JSON instead of human-readable format",
|
||||
}
|
||||
messageFlag = cli.StringFlag{
|
||||
Name: "message",
|
||||
Usage: "the file that contains the message to sign/verify",
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
159
cmd/ethkey/message.go
Normal file
159
cmd/ethkey/message.go
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
type outputSign struct {
|
||||
Signature string
|
||||
}
|
||||
|
||||
var msgfileFlag = cli.StringFlag{
|
||||
Name: "msgfile",
|
||||
Usage: "file containing the message to sign/verify",
|
||||
}
|
||||
|
||||
var commandSignMessage = cli.Command{
|
||||
Name: "signmessage",
|
||||
Usage: "sign a message",
|
||||
ArgsUsage: "<keyfile> <message>",
|
||||
Description: `
|
||||
Sign the message with a keyfile.
|
||||
|
||||
To sign a message contained in a file, use the --msgfile flag.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
jsonFlag,
|
||||
msgfileFlag,
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
message := getMessage(ctx, 1)
|
||||
|
||||
// Load the keyfile.
|
||||
keyfilepath := ctx.Args().First()
|
||||
keyjson, err := ioutil.ReadFile(keyfilepath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
}
|
||||
|
||||
signature, err := crypto.Sign(signHash(message), key.PrivateKey)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to sign message: %v", err)
|
||||
}
|
||||
out := outputSign{Signature: hex.EncodeToString(signature)}
|
||||
if ctx.Bool(jsonFlag.Name) {
|
||||
mustPrintJSON(out)
|
||||
} else {
|
||||
fmt.Println("Signature:", out.Signature)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type outputVerify struct {
|
||||
Success bool
|
||||
RecoveredAddress string
|
||||
RecoveredPublicKey string
|
||||
}
|
||||
|
||||
var commandVerifyMessage = cli.Command{
|
||||
Name: "verifymessage",
|
||||
Usage: "verify the signature of a signed message",
|
||||
ArgsUsage: "<address> <signature> <message>",
|
||||
Description: `
|
||||
Verify the signature of the message.
|
||||
It is possible to refer to a file containing the message.`,
|
||||
Flags: []cli.Flag{
|
||||
jsonFlag,
|
||||
msgfileFlag,
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
addressStr := ctx.Args().First()
|
||||
signatureHex := ctx.Args().Get(1)
|
||||
message := getMessage(ctx, 2)
|
||||
|
||||
if !common.IsHexAddress(addressStr) {
|
||||
utils.Fatalf("Invalid address: %s", addressStr)
|
||||
}
|
||||
address := common.HexToAddress(addressStr)
|
||||
signature, err := hex.DecodeString(signatureHex)
|
||||
if err != nil {
|
||||
utils.Fatalf("Signature encoding is not hexadecimal: %v", err)
|
||||
}
|
||||
|
||||
recoveredPubkey, err := crypto.SigToPub(signHash(message), signature)
|
||||
if err != nil || recoveredPubkey == nil {
|
||||
utils.Fatalf("Signature verification failed: %v", err)
|
||||
}
|
||||
recoveredPubkeyBytes := crypto.FromECDSAPub(recoveredPubkey)
|
||||
recoveredAddress := crypto.PubkeyToAddress(*recoveredPubkey)
|
||||
success := address == recoveredAddress
|
||||
|
||||
out := outputVerify{
|
||||
Success: success,
|
||||
RecoveredPublicKey: hex.EncodeToString(recoveredPubkeyBytes),
|
||||
RecoveredAddress: recoveredAddress.Hex(),
|
||||
}
|
||||
if ctx.Bool(jsonFlag.Name) {
|
||||
mustPrintJSON(out)
|
||||
} else {
|
||||
if out.Success {
|
||||
fmt.Println("Signature verification successful!")
|
||||
} else {
|
||||
fmt.Println("Signature verification failed!")
|
||||
}
|
||||
fmt.Println("Recovered public key:", out.RecoveredPublicKey)
|
||||
fmt.Println("Recovered address:", out.RecoveredAddress)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func getMessage(ctx *cli.Context, msgarg int) []byte {
|
||||
if file := ctx.String("msgfile"); file != "" {
|
||||
if len(ctx.Args()) > msgarg {
|
||||
utils.Fatalf("Can't use --msgfile and message argument at the same time.")
|
||||
}
|
||||
msg, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't read message file: %v", err)
|
||||
}
|
||||
return msg
|
||||
} else if len(ctx.Args()) == msgarg+1 {
|
||||
return []byte(ctx.Args().Get(msgarg))
|
||||
}
|
||||
utils.Fatalf("Invalid number of arguments: want %d, got %d", msgarg+1, len(ctx.Args()))
|
||||
return nil
|
||||
}
|
70
cmd/ethkey/message_test.go
Normal file
70
cmd/ethkey/message_test.go
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMessageSignVerify(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "ethkey-test")
|
||||
if err != nil {
|
||||
t.Fatal("Can't create temporary directory:", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
||||
message := "test message"
|
||||
|
||||
// Create the key.
|
||||
generate := runEthkey(t, "generate", keyfile)
|
||||
generate.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
Repeat passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
_, matches := generate.ExpectRegexp(`Address: (0x[0-9a-fA-F]{40})\n`)
|
||||
address := matches[1]
|
||||
generate.ExpectExit()
|
||||
|
||||
// Sign a message.
|
||||
sign := runEthkey(t, "signmessage", keyfile, message)
|
||||
sign.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
_, matches = sign.ExpectRegexp(`Signature: ([0-9a-f]+)\n`)
|
||||
signature := matches[1]
|
||||
sign.ExpectExit()
|
||||
|
||||
// Verify the message.
|
||||
verify := runEthkey(t, "verifymessage", address, signature, message)
|
||||
_, matches = verify.ExpectRegexp(`
|
||||
Signature verification successful!
|
||||
Recovered public key: [0-9a-f]+
|
||||
Recovered address: (0x[0-9a-fA-F]{40})
|
||||
`)
|
||||
recovered := matches[1]
|
||||
verify.ExpectExit()
|
||||
|
||||
if recovered != address {
|
||||
t.Error("recovered address doesn't match generated key")
|
||||
}
|
||||
}
|
54
cmd/ethkey/run_test.go
Normal file
54
cmd/ethkey/run_test.go
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
)
|
||||
|
||||
type testEthkey struct {
|
||||
*cmdtest.TestCmd
|
||||
}
|
||||
|
||||
// spawns ethkey with the given command line args.
|
||||
func runEthkey(t *testing.T, args ...string) *testEthkey {
|
||||
tt := new(testEthkey)
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
tt.Run("ethkey-test", args...)
|
||||
return tt
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Run the app if we've been exec'd as "ethkey-test" in runEthkey.
|
||||
reexec.Register("ethkey-test", func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
83
cmd/ethkey/utils.go
Normal file
83
cmd/ethkey/utils.go
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// getPassPhrase obtains a passphrase given by the user. It first checks the
|
||||
// --passphrase command line flag and ultimately prompts the user for a
|
||||
// passphrase.
|
||||
func getPassPhrase(ctx *cli.Context, confirmation bool) string {
|
||||
// Look for the --passphrase flag.
|
||||
passphraseFile := ctx.String(passphraseFlag.Name)
|
||||
if passphraseFile != "" {
|
||||
content, err := ioutil.ReadFile(passphraseFile)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase file '%s': %v",
|
||||
passphraseFile, err)
|
||||
}
|
||||
return strings.TrimRight(string(content), "\r\n")
|
||||
}
|
||||
|
||||
// Otherwise prompt the user for the passphrase.
|
||||
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if passphrase != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
return passphrase
|
||||
}
|
||||
|
||||
// signHash is a helper function that calculates a hash for the given message
|
||||
// that can be safely used to calculate a signature from.
|
||||
//
|
||||
// The hash is calulcated as
|
||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||
//
|
||||
// This gives context to the signed message and prevents signing of transactions.
|
||||
func signHash(data []byte) []byte {
|
||||
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
|
||||
return crypto.Keccak256([]byte(msg))
|
||||
}
|
||||
|
||||
// mustPrintJSON prints the JSON encoding of the given object and
|
||||
// exits the program with an error message when the marshaling fails.
|
||||
func mustPrintJSON(jsonObject interface{}) {
|
||||
str, err := json.MarshalIndent(jsonObject, "", " ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to marshal JSON object: %v", err)
|
||||
}
|
||||
fmt.Println(string(str))
|
||||
}
|
@ -46,8 +46,5 @@ func disasmCmd(ctx *cli.Context) error {
|
||||
|
||||
code := strings.TrimSpace(string(in[:]))
|
||||
fmt.Printf("%v\n", code)
|
||||
if err = asm.PrintDisassembled(code); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
||||
|
@ -1,24 +1,25 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -35,12 +36,16 @@ func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
}
|
||||
|
||||
func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureState outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
log := vm.StructLog{
|
||||
Pc: pc,
|
||||
Op: op,
|
||||
Gas: gas + cost,
|
||||
Gas: gas,
|
||||
GasCost: cost,
|
||||
MemorySize: memory.Len(),
|
||||
Storage: nil,
|
||||
@ -56,12 +61,21 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
|
||||
return l.encoder.Encode(log)
|
||||
}
|
||||
|
||||
// CaptureFault outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureEnd is triggered at end of execution.
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration) error {
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
|
||||
type endLog struct {
|
||||
Output string `json:"output"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time time.Duration `json:"time"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t})
|
||||
if err != nil {
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()})
|
||||
}
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""})
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ var (
|
||||
}
|
||||
CodeFileFlag = cli.StringFlag{
|
||||
Name: "codefile",
|
||||
Usage: "file containing EVM code",
|
||||
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
|
||||
}
|
||||
GasFlag = cli.Uint64Flag{
|
||||
Name: "gas",
|
||||
@ -102,6 +102,10 @@ var (
|
||||
Name: "sender",
|
||||
Usage: "The transaction origin",
|
||||
}
|
||||
ReceiverFlag = cli.StringFlag{
|
||||
Name: "receiver",
|
||||
Usage: "The transaction receiver (execution context)",
|
||||
}
|
||||
DisableMemoryFlag = cli.BoolFlag{
|
||||
Name: "nomemory",
|
||||
Usage: "disable memory output",
|
||||
@ -131,6 +135,7 @@ func init() {
|
||||
GenesisFlag,
|
||||
MachineFlag,
|
||||
SenderFlag,
|
||||
ReceiverFlag,
|
||||
DisableMemoryFlag,
|
||||
DisableStackFlag,
|
||||
}
|
||||
@ -138,6 +143,7 @@ func init() {
|
||||
compileCommand,
|
||||
disasmCommand,
|
||||
runCommand,
|
||||
stateTestCommand,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,6 +84,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
statedb *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.StringToAddress("sender")
|
||||
receiver = common.StringToAddress("receiver")
|
||||
)
|
||||
if ctx.GlobalBool(MachineFlag.Name) {
|
||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
||||
@ -95,7 +96,9 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||
_, statedb = gen.ToBlock()
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
genesis := gen.ToBlock(db)
|
||||
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
|
||||
chainConfig = gen.Config
|
||||
} else {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
@ -104,46 +107,52 @@ func runCmd(ctx *cli.Context) error {
|
||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
|
||||
}
|
||||
|
||||
statedb.CreateAccount(sender)
|
||||
|
||||
if ctx.GlobalString(ReceiverFlag.Name) != "" {
|
||||
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
|
||||
}
|
||||
|
||||
var (
|
||||
code []byte
|
||||
ret []byte
|
||||
err error
|
||||
)
|
||||
if fn := ctx.Args().First(); len(fn) > 0 {
|
||||
// The '--code' or '--codefile' flag overrides code in state
|
||||
if ctx.GlobalString(CodeFileFlag.Name) != "" {
|
||||
var hexcode []byte
|
||||
var err error
|
||||
// If - is specified, it means that code comes from stdin
|
||||
if ctx.GlobalString(CodeFileFlag.Name) == "-" {
|
||||
//Try reading from stdin
|
||||
if hexcode, err = ioutil.ReadAll(os.Stdin); err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Codefile with hex assembly
|
||||
if hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name)); err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
|
||||
} else if ctx.GlobalString(CodeFlag.Name) != "" {
|
||||
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
|
||||
} else if fn := ctx.Args().First(); len(fn) > 0 {
|
||||
// EASM-file to compile
|
||||
src, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bin, err := compiler.Compile(fn, src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
code = common.Hex2Bytes(bin)
|
||||
} else if ctx.GlobalString(CodeFlag.Name) != "" {
|
||||
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
|
||||
} else {
|
||||
var hexcode []byte
|
||||
if ctx.GlobalString(CodeFileFlag.Name) != "" {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name))
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
}
|
||||
|
||||
initialGas := ctx.GlobalUint64(GasFlag.Name)
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
@ -180,9 +189,9 @@ func runCmd(ctx *cli.Context) error {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
||||
} else {
|
||||
receiver := common.StringToAddress("receiver")
|
||||
statedb.SetCode(receiver, code)
|
||||
|
||||
if len(code) > 0 {
|
||||
statedb.SetCode(receiver, code)
|
||||
}
|
||||
ret, leftOverGas, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
@ -227,13 +236,13 @@ Gas used: %d
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer != nil {
|
||||
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime)
|
||||
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime, err)
|
||||
} else {
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
125
cmd/evm/staterunner.go
Normal file
125
cmd/evm/staterunner.go
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var stateTestCommand = cli.Command{
|
||||
Action: stateTestCmd,
|
||||
Name: "statetest",
|
||||
Usage: "executes the given state tests",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
type StatetestResult struct {
|
||||
Name string `json:"name"`
|
||||
Pass bool `json:"pass"`
|
||||
Fork string `json:"fork"`
|
||||
Error string `json:"error,omitempty"`
|
||||
State *state.Dump `json:"state,omitempty"`
|
||||
}
|
||||
|
||||
func stateTestCmd(ctx *cli.Context) error {
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("path-to-test argument required")
|
||||
}
|
||||
// Configure the go-ethereum logger
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
// Configure the EVM logger
|
||||
config := &vm.LogConfig{
|
||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||
}
|
||||
var (
|
||||
tracer vm.Tracer
|
||||
debugger *vm.StructLogger
|
||||
)
|
||||
switch {
|
||||
case ctx.GlobalBool(MachineFlag.Name):
|
||||
tracer = NewJSONLogger(config, os.Stderr)
|
||||
|
||||
case ctx.GlobalBool(DebugFlag.Name):
|
||||
debugger = vm.NewStructLogger(config)
|
||||
tracer = debugger
|
||||
|
||||
default:
|
||||
debugger = vm.NewStructLogger(config)
|
||||
}
|
||||
// Load the test content from the input file
|
||||
src, err := ioutil.ReadFile(ctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var tests map[string]tests.StateTest
|
||||
if err = json.Unmarshal(src, &tests); err != nil {
|
||||
return err
|
||||
}
|
||||
// Iterate over all the tests, run them and aggregate the results
|
||||
cfg := vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
}
|
||||
results := make([]StatetestResult, 0, len(tests))
|
||||
for key, test := range tests {
|
||||
for _, st := range test.Subtests() {
|
||||
// Run the test and aggregate the result
|
||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
state, err := test.Run(st, cfg)
|
||||
if err != nil {
|
||||
// Test failed, mark as so and dump any state to aid debugging
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
if ctx.GlobalBool(DumpFlag.Name) && state != nil {
|
||||
dump := state.RawDump()
|
||||
result.State = &dump
|
||||
}
|
||||
}
|
||||
// print state root for evmlab tracing (already committed above, so no need to delete objects again
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
}
|
||||
|
||||
results = append(results, *result)
|
||||
|
||||
// Print any structured logs collected
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugger.StructLogs())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Println(string(out))
|
||||
return nil
|
||||
}
|
@ -18,11 +18,13 @@
|
||||
package main
|
||||
|
||||
//go:generate go-bindata -nometadata -o website.go faucet.html
|
||||
//go:generate gofmt -w -s website.go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html/template"
|
||||
@ -33,6 +35,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -80,7 +83,8 @@ var (
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
||||
noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication")
|
||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
||||
)
|
||||
|
||||
var (
|
||||
@ -129,6 +133,7 @@ func main() {
|
||||
"Amounts": amounts,
|
||||
"Periods": periods,
|
||||
"Recaptcha": *captchaToken,
|
||||
"NoAuth": *noauthFlag,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("Failed to render the faucet template", "err", err)
|
||||
@ -181,10 +186,10 @@ func main() {
|
||||
|
||||
// request represents an accepted funding request.
|
||||
type request struct {
|
||||
Username string `json:"username"` // GitHub user for displaying an avatar
|
||||
Account common.Address `json:"account"` // Ethereum address being funded
|
||||
Time time.Time `json:"time"` // Timestamp when te request was accepted
|
||||
Tx *types.Transaction `json:"tx"` // Transaction funding the account
|
||||
Avatar string `json:"avatar"` // Avatar URL to make the UI nicer
|
||||
Account common.Address `json:"account"` // Ethereum address being funded
|
||||
Time time.Time `json:"time"` // Timestamp when the request was accepted
|
||||
Tx *types.Transaction `json:"tx"` // Transaction funding the account
|
||||
}
|
||||
|
||||
// faucet represents a crypto faucet backed by an Ethereum light client.
|
||||
@ -218,7 +223,6 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
NoDiscovery: true,
|
||||
DiscoveryV5: true,
|
||||
ListenAddr: fmt.Sprintf(":%d", port),
|
||||
DiscoveryV5Addr: fmt.Sprintf(":%d", port+1),
|
||||
MaxPeers: 25,
|
||||
BootstrapNodesV5: enodes,
|
||||
},
|
||||
@ -299,6 +303,8 @@ func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// apiHandler handles requests for Ether grants and transaction statuses.
|
||||
func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
// Start tracking the connection and drop at the end
|
||||
defer conn.Close()
|
||||
|
||||
f.lock.Lock()
|
||||
f.conns = append(f.conns, conn)
|
||||
f.lock.Unlock()
|
||||
@ -313,25 +319,50 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
f.lock.Unlock()
|
||||
}()
|
||||
// Send a few initial stats to the client
|
||||
balance, _ := f.client.BalanceAt(context.Background(), f.account.Address, nil)
|
||||
nonce, _ := f.client.NonceAt(context.Background(), f.account.Address, nil)
|
||||
// Gather the initial stats from the network to report
|
||||
var (
|
||||
head *types.Header
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
err error
|
||||
)
|
||||
for {
|
||||
// Attempt to retrieve the stats, may error on no faucet connectivity
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
head, err = f.client.HeaderByNumber(ctx, nil)
|
||||
if err == nil {
|
||||
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
|
||||
if err == nil {
|
||||
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
|
||||
websocket.JSON.Send(conn, map[string]interface{}{
|
||||
// If stats retrieval failed, wait a bit and retry
|
||||
if err != nil {
|
||||
if err = sendError(conn, errors.New("Faucet offline: "+err.Error())); err != nil {
|
||||
log.Warn("Failed to send faucet error to client", "err", err)
|
||||
return
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
// Initial stats reported successfully, proceed with user interaction
|
||||
break
|
||||
}
|
||||
// Send over the initial stats and the latest header
|
||||
if err = send(conn, map[string]interface{}{
|
||||
"funds": balance.Div(balance, ether),
|
||||
"funded": nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
})
|
||||
// Send the initial block to the client
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
header, err := f.client.HeaderByNumber(ctx, nil)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to retrieve latest header", "err", err)
|
||||
} else {
|
||||
websocket.JSON.Send(conn, header)
|
||||
}, 3*time.Second); err != nil {
|
||||
log.Warn("Failed to send initial stats to client", "err", err)
|
||||
return
|
||||
}
|
||||
if err = send(conn, head, 3*time.Second); err != nil {
|
||||
log.Warn("Failed to send initial header to client", "err", err)
|
||||
return
|
||||
}
|
||||
// Keep reading requests from the websocket until the connection breaks
|
||||
for {
|
||||
@ -341,18 +372,25 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
Tier uint `json:"tier"`
|
||||
Captcha string `json:"captcha"`
|
||||
}
|
||||
if err := websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
if err = websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(msg.URL, "https://gist.github.com/") {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "URL doesn't link to GitHub Gists"})
|
||||
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") &&
|
||||
!strings.HasPrefix(msg.URL, "https://plus.google.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
|
||||
if err = sendError(conn, errors.New("URL doesn't link to supported services")); err != nil {
|
||||
log.Warn("Failed to send URL error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if msg.Tier >= uint(*tiersFlag) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid funding tier requested"})
|
||||
if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
|
||||
log.Warn("Failed to send tier error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet funds requested", "gist", msg.URL, "tier", msg.Tier)
|
||||
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
|
||||
|
||||
// If captcha verifications are enabled, make sure we're not dealing with a robot
|
||||
if *captchaToken != "" {
|
||||
@ -362,7 +400,10 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
|
||||
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send captcha post error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
var result struct {
|
||||
@ -372,108 +413,108 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
err = json.NewDecoder(res.Body).Decode(&result)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send captcha decode error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Beep-bop, you're a robot!"})
|
||||
if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
|
||||
log.Warn("Failed to send captcha failure to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(msg.URL, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Anonymous Gists not allowed"})
|
||||
continue
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
if len(file.Content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(file.Content)
|
||||
// Retrieve the Ethereum address to fund, the requesting user and a profile picture
|
||||
var (
|
||||
username string
|
||||
avatar string
|
||||
address common.Address
|
||||
)
|
||||
switch {
|
||||
case strings.HasPrefix(msg.URL, "https://gist.github.com/"):
|
||||
if err = sendError(conn, errors.New("GitHub authentication discontinued at the official request of GitHub")); err != nil {
|
||||
log.Warn("Failed to send GitHub deprecation to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
|
||||
username, avatar, address, err = authTwitter(msg.URL)
|
||||
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
|
||||
username, avatar, address, err = authGooglePlus(msg.URL)
|
||||
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
|
||||
username, avatar, address, err = authFacebook(msg.URL)
|
||||
case *noauthFlag:
|
||||
username, avatar, address, err = authNoAuth(msg.URL)
|
||||
default:
|
||||
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "No Ethereum address found to fund"})
|
||||
if err != nil {
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send prefix error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
res.Body.Close()
|
||||
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid user... boom!"})
|
||||
continue
|
||||
}
|
||||
// Ensure the user didn't request funds too recently
|
||||
f.lock.Lock()
|
||||
var (
|
||||
fund bool
|
||||
timeout time.Time
|
||||
)
|
||||
if timeout = f.timeouts[gist.Owner.Login]; time.Now().After(timeout) {
|
||||
if timeout = f.timeouts[username]; time.Now().After(timeout) {
|
||||
// User wasn't funded recently, create the funding transaction
|
||||
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
|
||||
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
|
||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, big.NewInt(21000), f.price, nil)
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
f.lock.Unlock()
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send transaction creation error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Submit the transaction and mark as funded if successful
|
||||
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
f.lock.Unlock()
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send transaction transmission error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
f.reqs = append(f.reqs, &request{
|
||||
Username: gist.Owner.Login,
|
||||
Account: address,
|
||||
Time: time.Now(),
|
||||
Tx: signed,
|
||||
Avatar: avatar,
|
||||
Account: address,
|
||||
Time: time.Now(),
|
||||
Tx: signed,
|
||||
})
|
||||
f.timeouts[gist.Owner.Login] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute)
|
||||
f.timeouts[username] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute)
|
||||
fund = true
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": fmt.Sprintf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))})
|
||||
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil { // nolint: gosimple
|
||||
log.Warn("Failed to send funding error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
websocket.JSON.Send(conn, map[string]string{"success": fmt.Sprintf("Funding request accepted for %s into %s", gist.Owner.Login, address.Hex())})
|
||||
if err = sendSuccess(conn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
|
||||
log.Warn("Failed to send funding success to client", "err", err)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case f.update <- struct{}{}:
|
||||
default:
|
||||
@ -496,11 +537,31 @@ func (f *faucet) loop() {
|
||||
select {
|
||||
case head := <-heads:
|
||||
// New chain head arrived, query the current stats and stream to clients
|
||||
balance, _ := f.client.BalanceAt(context.Background(), f.account.Address, nil)
|
||||
balance = new(big.Int).Div(balance, ether)
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
price *big.Int
|
||||
err error
|
||||
)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
|
||||
if err == nil {
|
||||
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
|
||||
if err == nil {
|
||||
price, err = f.client.SuggestGasPrice(ctx)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
|
||||
price, _ := f.client.SuggestGasPrice(context.Background())
|
||||
nonce, _ := f.client.NonceAt(context.Background(), f.account.Address, nil)
|
||||
// If querying the data failed, try for the next block
|
||||
if err != nil {
|
||||
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
|
||||
continue
|
||||
} else {
|
||||
log.Info("Updated faucet state", "block", head.Number, "hash", head.Hash(), "balance", balance, "nonce", nonce, "price", price)
|
||||
}
|
||||
// Faucet state retrieved, update locally and send to clients
|
||||
balance = new(big.Int).Div(balance, ether)
|
||||
|
||||
f.lock.Lock()
|
||||
f.price, f.nonce = price, nonce
|
||||
@ -511,17 +572,17 @@ func (f *faucet) loop() {
|
||||
|
||||
f.lock.RLock()
|
||||
for _, conn := range f.conns {
|
||||
if err := websocket.JSON.Send(conn, map[string]interface{}{
|
||||
if err := send(conn, map[string]interface{}{
|
||||
"funds": balance,
|
||||
"funded": f.nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
}); err != nil {
|
||||
}, time.Second); err != nil {
|
||||
log.Warn("Failed to send stats to client", "err", err)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
if err := websocket.JSON.Send(conn, head); err != nil {
|
||||
if err := send(conn, head, time.Second); err != nil {
|
||||
log.Warn("Failed to send header to client", "err", err)
|
||||
conn.Close()
|
||||
}
|
||||
@ -532,7 +593,7 @@ func (f *faucet) loop() {
|
||||
// Pending requests updated, stream to clients
|
||||
f.lock.RLock()
|
||||
for _, conn := range f.conns {
|
||||
if err := websocket.JSON.Send(conn, map[string]interface{}{"requests": f.reqs}); err != nil {
|
||||
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
|
||||
log.Warn("Failed to send requests to client", "err", err)
|
||||
conn.Close()
|
||||
}
|
||||
@ -541,3 +602,191 @@ func (f *faucet) loop() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sends transmits a data packet to the remote end of the websocket, but also
|
||||
// setting a write deadline to prevent waiting forever on the node.
|
||||
func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error {
|
||||
if timeout == 0 {
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
conn.SetWriteDeadline(time.Now().Add(timeout))
|
||||
return websocket.JSON.Send(conn, value)
|
||||
}
|
||||
|
||||
// sendError transmits an error to the remote end of the websocket, also setting
|
||||
// the write deadline to 1 second to prevent waiting forever.
|
||||
func sendError(conn *websocket.Conn, err error) error {
|
||||
return send(conn, map[string]string{"error": err.Error()}, time.Second)
|
||||
}
|
||||
|
||||
// sendSuccess transmits a success message to the remote end of the websocket, also
|
||||
// setting the write deadline to 1 second to prevent waiting forever.
|
||||
func sendSuccess(conn *websocket.Conn, msg string) error {
|
||||
return send(conn, map[string]string{"success": msg}, time.Second)
|
||||
}
|
||||
|
||||
// authGitHub tries to authenticate a faucet request using GitHub gists, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authGitHub(url string) (string, string, common.Address, error) {
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(url, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
return "", "", common.Address{}, errors.New("Anonymous Gists not allowed")
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
content := strings.TrimSpace(file.Content)
|
||||
if len(content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(content)
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", common.Address{}, errors.New("Invalid user... boom!")
|
||||
}
|
||||
// Everything passed validation, return the gathered infos
|
||||
return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil
|
||||
}
|
||||
|
||||
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authTwitter(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
// Twitter's API isn't really friendly with direct links. Still, we don't
|
||||
// want to do ask read permissions from users, so just load the public posts and
|
||||
// scrape it for the Ethereum address and profile URL.
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||
avatar = parts[1]
|
||||
}
|
||||
return username + "@twitter", avatar, address, nil
|
||||
}
|
||||
|
||||
// authGooglePlus tries to authenticate a faucet request using GooglePlus posts,
|
||||
// returning the username, avatar URL and Ethereum address to fund on success.
|
||||
func authGooglePlus(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
||||
return "", "", common.Address{}, errors.New("Invalid Google+ post URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
// Google's API isn't really friendly with direct links. Still, we don't
|
||||
// want to do ask read permissions from users, so just load the public posts and
|
||||
// scrape it for the Ethereum address and profile URL.
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
if parts = regexp.MustCompile("src=\"([^\"]+googleusercontent.com[^\"]+photo.jpg)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||
avatar = parts[1]
|
||||
}
|
||||
return username + "@google+", avatar, address, nil
|
||||
}
|
||||
|
||||
// authFacebook tries to authenticate a faucet request using Facebook posts,
|
||||
// returning the username, avatar URL and Ethereum address to fund on success.
|
||||
func authFacebook(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
||||
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
// Facebook's Graph API isn't really friendly with direct links. Still, we don't
|
||||
// want to do ask read permissions from users, so just load the public posts and
|
||||
// scrape it for the Ethereum address and profile URL.
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
if parts = regexp.MustCompile("src=\"([^\"]+fbcdn.net[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||
avatar = parts[1]
|
||||
}
|
||||
return username + "@facebook", avatar, address, nil
|
||||
}
|
||||
|
||||
// authNoAuth tries to interpret a faucet request as a plain Ethereum address,
|
||||
// without actually performing any remote authentication. This mode is prone to
|
||||
// Byzantine attack, so only ever use for truly private networks.
|
||||
func authNoAuth(url string) (string, string, common.Address, error) {
|
||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
return address.Hex() + "@noauth", "", address, nil
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>{{.Network}}: GitHub Faucet</title>
|
||||
<title>{{.Network}}: Authenticated Faucet</title>
|
||||
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" />
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" />
|
||||
@ -43,13 +43,13 @@
|
||||
<div class="container">
|
||||
<div class="row" style="margin-bottom: 16px;">
|
||||
<div class="col-lg-12">
|
||||
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} GitHub Authenticated Faucet <i class="fa fa-github-alt" aria-hidden="true"></i></h1>
|
||||
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} Authenticated Faucet</h1>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-lg-8 col-lg-offset-2">
|
||||
<div class="input-group">
|
||||
<input id="gist" type="text" class="form-control" placeholder="GitHub Gist URL containing your Ethereum address...">
|
||||
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address...">
|
||||
<span class="input-group-btn">
|
||||
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
|
||||
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
|
||||
@ -80,8 +80,23 @@
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-12">
|
||||
<h3>How does this work?</h3>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to GitHub accounts. Anyone having a GitHub account may request funds within the permitted limits.</p>
|
||||
<p>To request funds, simply create a <a href="https://gist.github.com/" target="_about:blank">GitHub Gist</a> with your Ethereum address pasted into the contents (the file name doesn't matter), copy paste the gists URL into the above input box and fire away! You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to common 3rd party social network accounts. Anyone having a Twitter, Google+ or Facebook account may request funds within the permitted limits.</p>
|
||||
<dl class="dl-horizontal">
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-twitter" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Twitter, make a <a href="https://twitter.com/intent/tweet?text=Requesting%20faucet%20funds%20into%200x0000000000000000000000000000000000000000%20on%20the%20%23{{.Network}}%20%23Ethereum%20test%20network." target="_about:blank">tweet</a> with your Ethereum address pasted into the contents (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://support.twitter.com/articles/80586" target="_about:blank">tweets URL</a> into the above input box and fire away!</dd>
|
||||
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-google-plus-official" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Google Plus, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the posts URL into the above input box and fire away!</dd>
|
||||
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-facebook" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Facebook, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://www.facebook.com/help/community/question/?id=282662498552845" target="_about:blank">posts URL</a> into the above input box and fire away!</dd>
|
||||
|
||||
{{if .NoAuth}}
|
||||
<dt class="text-danger" style="width: auto; margin-left: 40px;"><i class="fa fa-unlock-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd class="text-danger" style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds <strong>without authentication</strong>, simply copy-paste your Ethereum address into the above input box (surrounding text doesn't matter) and fire away.<br/>This mode is susceptible to Byzantine attacks. Only use for debugging or private networks!</dd>
|
||||
{{end}}
|
||||
</dl>
|
||||
<p>You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
@ -93,20 +108,27 @@
|
||||
var attempt = 0;
|
||||
var server;
|
||||
var tier = 0;
|
||||
var requests = [];
|
||||
|
||||
// Define a function that creates closures to drop old requests
|
||||
var dropper = function(hash) {
|
||||
return function() {
|
||||
for (var i=0; i<requests.length; i++) {
|
||||
if (requests[i].tx.hash == hash) {
|
||||
requests.splice(i, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
// Define the function that submits a gist url to the server
|
||||
var submit = function({{if .Recaptcha}}captcha{{end}}) {
|
||||
server.send(JSON.stringify({url: $("#gist")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
||||
server.send(JSON.stringify({url: $("#url")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
||||
grecaptcha.reset();{{end}}
|
||||
};
|
||||
// Define a method to reconnect upon server loss
|
||||
var reconnect = function() {
|
||||
if (attempt % 2 == 0) {
|
||||
server = new WebSocket("wss://" + location.host + "/api");
|
||||
} else {
|
||||
server = new WebSocket("ws://" + location.host + "/api");
|
||||
}
|
||||
attempt++;
|
||||
server = new WebSocket(((window.location.protocol === "https:") ? "wss://" : "ws://") + window.location.host + "/api");
|
||||
|
||||
server.onmessage = function(event) {
|
||||
var msg = JSON.parse(event.data);
|
||||
@ -127,21 +149,85 @@
|
||||
$("#block").text(parseInt(msg.number, 16));
|
||||
}
|
||||
if (msg.error !== undefined) {
|
||||
noty({layout: 'topCenter', text: msg.error, type: 'error'});
|
||||
noty({layout: 'topCenter', text: msg.error, type: 'error', timeout: 5000, progressBar: true});
|
||||
}
|
||||
if (msg.success !== undefined) {
|
||||
noty({layout: 'topCenter', text: msg.success, type: 'success'});
|
||||
noty({layout: 'topCenter', text: msg.success, type: 'success', timeout: 5000, progressBar: true});
|
||||
}
|
||||
if (msg.requests !== undefined && msg.requests !== null) {
|
||||
// Mark all previous requests missing as done
|
||||
for (var i=0; i<requests.length; i++) {
|
||||
if (msg.requests.length > 0 && msg.requests[0].tx.hash == requests[i].tx.hash) {
|
||||
break;
|
||||
}
|
||||
if (requests[i].time != "") {
|
||||
requests[i].time = "";
|
||||
setTimeout(dropper(requests[i].tx.hash), 3000);
|
||||
}
|
||||
}
|
||||
// Append any new requests into our local collection
|
||||
var common = -1;
|
||||
if (requests.length > 0) {
|
||||
for (var i=0; i<msg.requests.length; i++) {
|
||||
if (requests[requests.length-1].tx.hash == msg.requests[i].tx.hash) {
|
||||
common = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (var i=common+1; i<msg.requests.length; i++) {
|
||||
requests.push(msg.requests[i]);
|
||||
}
|
||||
// Iterate over our entire local collection and re-render the funding table
|
||||
var content = "";
|
||||
for (var i=0; i<msg.requests.length; i++) {
|
||||
content += "<tr><td><div style=\"background: url('https://github.com/" + msg.requests[i].username + ".png?size=64'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td><td><pre>" + msg.requests[i].account + "</pre></td><td style=\"width: 100%; text-align: center; vertical-align: middle;\">" + moment.duration(moment(msg.requests[i].time).unix()-moment().unix(), 'seconds').humanize(true) + "</td></tr>";
|
||||
for (var i=0; i<requests.length; i++) {
|
||||
var done = requests[i].time == "";
|
||||
var elapsed = moment().unix()-moment(requests[i].time).unix();
|
||||
|
||||
content += "<tr id='" + requests[i].tx.hash + "'>";
|
||||
content += " <td><div style=\"background: url('" + requests[i].avatar + "'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td>";
|
||||
content += " <td><pre>" + requests[i].account + "</pre></td>";
|
||||
content += " <td style=\"width: 100%; text-align: center; vertical-align: middle;\">";
|
||||
if (done) {
|
||||
content += " funded";
|
||||
} else {
|
||||
content += " <span id='time-" + i + "' class='timer'>" + moment.duration(-elapsed, 'seconds').humanize(true) + "</span>";
|
||||
}
|
||||
content += " <div class='progress' style='height: 4px; margin: 0;'>";
|
||||
if (done) {
|
||||
content += " <div class='progress-bar progress-bar-success' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
|
||||
} else if (elapsed > 30) {
|
||||
content += " <div class='progress-bar progress-bar-danger progress-bar-striped active' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
|
||||
} else {
|
||||
content += " <div class='progress-bar progress-bar-striped active' role='progressbar' aria-valuenow='" + elapsed + "' style='width:" + (elapsed * 100 / 30) + "%;'></div>";
|
||||
}
|
||||
content += " </div>";
|
||||
content += " </td>";
|
||||
content += "</tr>";
|
||||
}
|
||||
$("#requests").html("<tbody>" + content + "</tbody>");
|
||||
}
|
||||
}
|
||||
server.onclose = function() { setTimeout(reconnect, 3000); };
|
||||
}
|
||||
// Start a UI updater to push the progress bars forward until they are done
|
||||
setInterval(function() {
|
||||
$('.progress-bar').each(function() {
|
||||
var progress = Number($(this).attr('aria-valuenow')) + 1;
|
||||
if (progress < 30) {
|
||||
$(this).attr('aria-valuenow', progress);
|
||||
$(this).css('width', (progress * 100 / 30) + '%');
|
||||
} else if (progress == 30) {
|
||||
$(this).css('width', '100%');
|
||||
$(this).addClass("progress-bar-danger");
|
||||
}
|
||||
})
|
||||
$('.timer').each(function() {
|
||||
var index = Number($(this).attr('id').substring(5));
|
||||
$(this).html(moment.duration(moment(requests[index].time).unix()-moment().unix(), 'seconds').humanize(true));
|
||||
})
|
||||
}, 1000);
|
||||
|
||||
// Establish a websocket connection to the API server
|
||||
reconnect();
|
||||
</script>{{if .Recaptcha}}
|
||||
|
File diff suppressed because one or more lines are too long
@ -291,15 +291,28 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
|
||||
|
||||
// accountCreate creates a new account into the keystore defined by the CLI flags.
|
||||
func accountCreate(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
cfg := gethConfig{Node: defaultNodeConfig()}
|
||||
// Load config file.
|
||||
if file := ctx.GlobalString(configFileFlag.Name); file != "" {
|
||||
if err := loadConfig(file, &cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
utils.SetNodeConfig(ctx, &cfg.Node)
|
||||
scryptN, scryptP, keydir, err := cfg.Node.AccountConfig()
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read configuration: %v", err)
|
||||
}
|
||||
|
||||
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
account, err := ks.NewAccount(password)
|
||||
address, err := keystore.StoreKey(keydir, password, scryptN, scryptP)
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to create account: %v", err)
|
||||
}
|
||||
fmt.Printf("Address: {%x}\n", account.Address)
|
||||
fmt.Printf("Address: {%x}\n", address)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ Fatal: could not decrypt key with given passphrase
|
||||
func TestUnlockFlag(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
geth.Expect(`
|
||||
@ -146,7 +146,7 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
"=0xf466859eAD1932D743d622CB74FC058882E8648A",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
@ -158,7 +158,7 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
func TestUnlockFlagWrongPassword(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
@ -177,7 +177,7 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could
|
||||
func TestUnlockFlagMultiIndex(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.Expect(`
|
||||
@ -191,8 +191,8 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
"=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8",
|
||||
"=0x289d485D9771714CCe91D3393D764E1311907ACc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
@ -204,15 +204,15 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--password", "testdata/passwords.txt", "--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
"=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8",
|
||||
"=0x289d485D9771714CCe91D3393D764E1311907ACc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
@ -224,7 +224,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--password", "testdata/wrong-passwords.txt", "--unlock", "0,2")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
@ -235,7 +235,7 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given passphrase)
|
||||
func TestUnlockFlagAmbiguous(t *testing.T) {
|
||||
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
||||
geth := runGeth(t,
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
defer geth.ExpectExit()
|
||||
@ -261,7 +261,7 @@ In order to avoid this warning, you need to remove the following duplicate key f
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
"=0xf466859eAD1932D743d622CB74FC058882E8648A",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
@ -273,7 +273,7 @@ In order to avoid this warning, you need to remove the following duplicate key f
|
||||
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
|
||||
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
||||
geth := runGeth(t,
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.ExpectExit()
|
||||
|
||||
|
@ -31,7 +31,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
@ -71,7 +73,7 @@ It expects the genesis file as argument.`,
|
||||
The import command imports blocks from an RLP-encoded form. The form can be one file
|
||||
with several RLP-encoded blocks, or several files can be used.
|
||||
|
||||
If only one file is used, import error will result in failure. If several files are used,
|
||||
If only one file is used, import error will result in failure. If several files are used,
|
||||
processing will proceed even if an individual RLP-file import failure occurs.`,
|
||||
}
|
||||
exportCommand = cli.Command{
|
||||
@ -90,6 +92,23 @@ Requires a first argument of the file to write to.
|
||||
Optional second and third arguments control the first and
|
||||
last block to write. In this mode, the file will be appended
|
||||
if already existing.`,
|
||||
}
|
||||
copydbCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(copyDb),
|
||||
Name: "copydb",
|
||||
Usage: "Create a local chain from a target chaindata folder",
|
||||
ArgsUsage: "<sourceChaindataDir>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The first argument must be the directory containing the blockchain to download from`,
|
||||
}
|
||||
removedbCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(removeDB),
|
||||
@ -183,7 +202,7 @@ func importChain(ctx *cli.Context) error {
|
||||
|
||||
if len(ctx.Args()) == 1 {
|
||||
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
|
||||
utils.Fatalf("Import error: %v", err)
|
||||
log.Error("Import error", "err", err)
|
||||
}
|
||||
} else {
|
||||
for _, arg := range ctx.Args() {
|
||||
@ -192,7 +211,7 @@ func importChain(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chain.Stop()
|
||||
fmt.Printf("Import done in %v.\n\n", time.Since(start))
|
||||
|
||||
// Output pre-compaction stats mostly to see the import trashing
|
||||
@ -268,6 +287,54 @@ func exportChain(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDb(ctx *cli.Context) error {
|
||||
// Ensure we have a source chain directory to copy
|
||||
if len(ctx.Args()) != 1 {
|
||||
utils.Fatalf("Source chaindata directory path argument missing")
|
||||
}
|
||||
// Initialize a new chain for the running node to sync into
|
||||
stack := makeFullNode(ctx)
|
||||
chain, chainDb := utils.MakeChain(ctx, stack)
|
||||
|
||||
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
|
||||
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
|
||||
|
||||
// Create a source peer to satisfy downloader requests from
|
||||
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peer := downloader.NewFakePeer("local", db, hc, dl)
|
||||
if err = dl.RegisterPeer("local", 63, peer); err != nil {
|
||||
return err
|
||||
}
|
||||
// Synchronise with the simulated peer
|
||||
start := time.Now()
|
||||
|
||||
currentHeader := hc.CurrentHeader()
|
||||
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
|
||||
return err
|
||||
}
|
||||
for dl.Synchronising() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
fmt.Printf("Database copy done in %v\n", time.Since(start))
|
||||
|
||||
// Compact the entire database to remove any sync overhead
|
||||
start = time.Now()
|
||||
fmt.Println("Compacting entire database...")
|
||||
if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
|
||||
utils.Fatalf("Compaction failed: %v", err)
|
||||
}
|
||||
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeDB(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
|
||||
|
@ -18,7 +18,6 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -29,7 +28,7 @@ import (
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/contracts/release"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -76,10 +75,11 @@ type ethstatsConfig struct {
|
||||
}
|
||||
|
||||
type gethConfig struct {
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
Dashboard dashboard.Config
|
||||
}
|
||||
|
||||
func loadConfig(file string, cfg *gethConfig) error {
|
||||
@ -110,9 +110,10 @@ func defaultNodeConfig() node.Config {
|
||||
func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
Dashboard: dashboard.DefaultConfig,
|
||||
}
|
||||
|
||||
// Load config file.
|
||||
@ -134,6 +135,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
}
|
||||
|
||||
utils.SetShhConfig(ctx, stack, &cfg.Shh)
|
||||
utils.SetDashboardConfig(ctx, &cfg.Dashboard)
|
||||
|
||||
return stack, cfg
|
||||
}
|
||||
@ -153,9 +155,12 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit)
|
||||
}
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DevModeFlag.Name)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DeveloperFlag.Name)
|
||||
if shhEnabled || shhAutoEnabled {
|
||||
if ctx.GlobalIsSet(utils.WhisperMaxMessageSizeFlag.Name) {
|
||||
cfg.Shh.MaxMessageSize = uint32(ctx.Int(utils.WhisperMaxMessageSizeFlag.Name))
|
||||
@ -170,21 +175,6 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
if cfg.Ethstats.URL != "" {
|
||||
utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
|
||||
}
|
||||
|
||||
// Add the release oracle service so it boots along with node.
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
config := release.Config{
|
||||
Oracle: relOracle,
|
||||
Major: uint32(params.VersionMajor),
|
||||
Minor: uint32(params.VersionMinor),
|
||||
Patch: uint32(params.VersionPatch),
|
||||
}
|
||||
commit, _ := hex.DecodeString(gitCommit)
|
||||
copy(config.Commit[:], commit)
|
||||
return release.NewReleaseService(ctx, config)
|
||||
}); err != nil {
|
||||
utils.Fatalf("Failed to register the Geth release oracle service: %v", err)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
|
@ -17,8 +17,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@ -112,7 +114,22 @@ func localConsole(ctx *cli.Context) error {
|
||||
// console to it.
|
||||
func remoteConsole(ctx *cli.Context) error {
|
||||
// Attach to a remotely running geth instance and start the JavaScript console
|
||||
client, err := dialRPC(ctx.Args().First())
|
||||
endpoint := ctx.Args().First()
|
||||
if endpoint == "" {
|
||||
path := node.DefaultDataDir()
|
||||
if ctx.GlobalIsSet(utils.DataDirFlag.Name) {
|
||||
path = ctx.GlobalString(utils.DataDirFlag.Name)
|
||||
}
|
||||
if path != "" {
|
||||
if ctx.GlobalBool(utils.TestnetFlag.Name) {
|
||||
path = filepath.Join(path, "testnet")
|
||||
} else if ctx.GlobalBool(utils.RinkebyFlag.Name) {
|
||||
path = filepath.Join(path, "rinkeby")
|
||||
}
|
||||
}
|
||||
endpoint = fmt.Sprintf("%s/geth.ipc", path)
|
||||
}
|
||||
client, err := dialRPC(endpoint)
|
||||
if err != nil {
|
||||
utils.Fatalf("Unable to attach to remote geth: %v", err)
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -60,6 +61,11 @@ var (
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.NoUSBFlag,
|
||||
utils.DashboardEnabledFlag,
|
||||
utils.DashboardAddrFlag,
|
||||
utils.DashboardPortFlag,
|
||||
utils.DashboardRefreshFlag,
|
||||
utils.DashboardAssetsFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
@ -67,6 +73,8 @@ var (
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
@ -77,10 +85,13 @@ var (
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.LightServFlag,
|
||||
utils.LightPeersFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
@ -96,12 +107,14 @@ var (
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.DeveloperFlag,
|
||||
utils.DeveloperPeriodFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.RPCVirtualHostsFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
@ -143,6 +156,7 @@ func init() {
|
||||
initCommand,
|
||||
importCommand,
|
||||
exportCommand,
|
||||
copydbCommand,
|
||||
removedbCommand,
|
||||
dumpCommand,
|
||||
// See monitorcmd.go:
|
||||
@ -155,6 +169,7 @@ func init() {
|
||||
attachCommand,
|
||||
javascriptCommand,
|
||||
// See misccmd.go:
|
||||
makecacheCommand,
|
||||
makedagCommand,
|
||||
versionCommand,
|
||||
bugCommand,
|
||||
@ -162,6 +177,7 @@ func init() {
|
||||
// See config.go
|
||||
dumpConfigCommand,
|
||||
}
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = append(app.Flags, nodeFlags...)
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
@ -234,35 +250,44 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}
|
||||
stateReader := ethclient.NewClient(rpcClient)
|
||||
|
||||
// Open and self derive any wallets already attached
|
||||
// Open any wallets already attached
|
||||
for _, wallet := range stack.AccountManager().Wallets() {
|
||||
if err := wallet.Open(""); err != nil {
|
||||
log.Warn("Failed to open wallet", "url", wallet.URL(), "err", err)
|
||||
} else {
|
||||
wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||
}
|
||||
}
|
||||
// Listen for wallet event till termination
|
||||
for event := range events {
|
||||
if event.Arrive {
|
||||
switch event.Kind {
|
||||
case accounts.WalletArrived:
|
||||
if err := event.Wallet.Open(""); err != nil {
|
||||
log.Warn("New wallet appeared, failed to open", "url", event.Wallet.URL(), "err", err)
|
||||
}
|
||||
case accounts.WalletOpened:
|
||||
status, _ := event.Wallet.Status()
|
||||
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status)
|
||||
|
||||
if event.Wallet.URL().Scheme == "ledger" {
|
||||
event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader)
|
||||
} else {
|
||||
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", event.Wallet.Status())
|
||||
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||
}
|
||||
} else {
|
||||
|
||||
case accounts.WalletDropped:
|
||||
log.Info("Old wallet dropped", "url", event.Wallet.URL())
|
||||
event.Wallet.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) {
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
utils.Fatalf("Light clients do not support mining")
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
utils.Fatalf("ethereum service not running: %v", err)
|
||||
utils.Fatalf("Ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||
|
@ -18,9 +18,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -33,14 +31,27 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
makedagCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makedag),
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash DAG (for testing)",
|
||||
makecacheCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makecache),
|
||||
Name: "makecache",
|
||||
Usage: "Generate ethash verification cache (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `
|
||||
The makedag command generates an ethash DAG in /tmp/dag.
|
||||
The makecache command generates an ethash cache in <outputDir>.
|
||||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
`,
|
||||
}
|
||||
makedagCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makedag),
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash mining DAG (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `
|
||||
The makedag command generates an ethash DAG in <outputDir>.
|
||||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
@ -65,33 +76,33 @@ The output of this command is supposed to be machine-readable.
|
||||
}
|
||||
)
|
||||
|
||||
// makecache generates an ethash verification cache into the provided folder.
|
||||
func makecache(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf(`Usage: geth makecache <block number> <outputdir>`)
|
||||
}
|
||||
block, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("Invalid block number: %v", err)
|
||||
}
|
||||
ethash.MakeCache(block, args[1])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makedag generates an ethash mining DAG into the provided folder.
|
||||
func makedag(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
wrongArgs := func() {
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
|
||||
}
|
||||
switch {
|
||||
case len(args) == 2:
|
||||
blockNum, err := strconv.ParseUint(args[0], 0, 64)
|
||||
dir := args[1]
|
||||
if err != nil {
|
||||
wrongArgs()
|
||||
} else {
|
||||
dir = filepath.Clean(dir)
|
||||
// seems to require a trailing slash
|
||||
if !strings.HasSuffix(dir, "/") {
|
||||
dir = dir + "/"
|
||||
}
|
||||
_, err = ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't find dir")
|
||||
}
|
||||
fmt.Println("making DAG, this could take awhile...")
|
||||
ethash.MakeDataset(blockNum, dir)
|
||||
}
|
||||
default:
|
||||
wrongArgs()
|
||||
block, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("Invalid block number: %v", err)
|
||||
}
|
||||
ethash.MakeDataset(block, args[1])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -123,7 +134,6 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with geth. If not, see <http://www.gnu.org/licenses/>.
|
||||
`)
|
||||
along with geth. If not, see <http://www.gnu.org/licenses/>.`)
|
||||
return nil
|
||||
}
|
||||
|
@ -22,6 +22,8 @@ import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
@ -72,8 +74,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.NetworkIdFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.IdentityFlag,
|
||||
utils.LightServFlag,
|
||||
@ -81,6 +83,12 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
},
|
||||
{Name: "DEVELOPER CHAIN",
|
||||
Flags: []cli.Flag{
|
||||
utils.DeveloperFlag,
|
||||
utils.DeveloperPeriodFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ETHASH",
|
||||
Flags: []cli.Flag{
|
||||
@ -92,10 +100,22 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
},
|
||||
},
|
||||
//{
|
||||
// Name: "DASHBOARD",
|
||||
// Flags: []cli.Flag{
|
||||
// utils.DashboardEnabledFlag,
|
||||
// utils.DashboardAddrFlag,
|
||||
// utils.DashboardPortFlag,
|
||||
// utils.DashboardRefreshFlag,
|
||||
// utils.DashboardAssetsFlag,
|
||||
// },
|
||||
//},
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
@ -109,6 +129,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Name: "PERFORMANCE TUNING",
|
||||
Flags: []cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
},
|
||||
},
|
||||
@ -134,6 +156,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.RPCVirtualHostsFlag,
|
||||
utils.JSpathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreloadJSFlag,
|
||||
@ -261,6 +284,9 @@ func init() {
|
||||
uncategorized := []cli.Flag{}
|
||||
for _, flag := range data.(*cli.App).Flags {
|
||||
if _, ok := categorized[flag.String()]; !ok {
|
||||
if strings.HasPrefix(flag.GetName(), "dashboard") {
|
||||
continue
|
||||
}
|
||||
uncategorized = append(uncategorized, flag)
|
||||
}
|
||||
}
|
||||
|
430
cmd/p2psim/main.go
Normal file
430
cmd/p2psim/main.go
Normal file
@ -0,0 +1,430 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// p2psim provides a command-line client for a simulation HTTP API.
|
||||
//
|
||||
// Here is an example of creating a 2 node network with the first node
|
||||
// connected to the second:
|
||||
//
|
||||
// $ p2psim node create
|
||||
// Created node01
|
||||
//
|
||||
// $ p2psim node start node01
|
||||
// Started node01
|
||||
//
|
||||
// $ p2psim node create
|
||||
// Created node02
|
||||
//
|
||||
// $ p2psim node start node02
|
||||
// Started node02
|
||||
//
|
||||
// $ p2psim node connect node01 node02
|
||||
// Connected node01 to node02
|
||||
//
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var client *simulations.Client
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Usage = "devp2p simulation command-line client"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "api",
|
||||
Value: "http://localhost:8888",
|
||||
Usage: "simulation API URL",
|
||||
EnvVar: "P2PSIM_API_URL",
|
||||
},
|
||||
}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
client = simulations.NewClient(ctx.GlobalString("api"))
|
||||
return nil
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "show network information",
|
||||
Action: showNetwork,
|
||||
},
|
||||
{
|
||||
Name: "events",
|
||||
Usage: "stream network events",
|
||||
Action: streamNetwork,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "current",
|
||||
Usage: "get existing nodes and conns first",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filter",
|
||||
Value: "",
|
||||
Usage: "message filter",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "snapshot",
|
||||
Usage: "create a network snapshot to stdout",
|
||||
Action: createSnapshot,
|
||||
},
|
||||
{
|
||||
Name: "load",
|
||||
Usage: "load a network snapshot from stdin",
|
||||
Action: loadSnapshot,
|
||||
},
|
||||
{
|
||||
Name: "node",
|
||||
Usage: "manage simulation nodes",
|
||||
Action: listNodes,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "list nodes",
|
||||
Action: listNodes,
|
||||
},
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "create a node",
|
||||
Action: createNode,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Value: "",
|
||||
Usage: "node name",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "services",
|
||||
Value: "",
|
||||
Usage: "node services (comma separated)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Value: "",
|
||||
Usage: "node private key (hex encoded)",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "show",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "show node information",
|
||||
Action: showNode,
|
||||
},
|
||||
{
|
||||
Name: "start",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "start a node",
|
||||
Action: startNode,
|
||||
},
|
||||
{
|
||||
Name: "stop",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "stop a node",
|
||||
Action: stopNode,
|
||||
},
|
||||
{
|
||||
Name: "connect",
|
||||
ArgsUsage: "<node> <peer>",
|
||||
Usage: "connect a node to a peer node",
|
||||
Action: connectNode,
|
||||
},
|
||||
{
|
||||
Name: "disconnect",
|
||||
ArgsUsage: "<node> <peer>",
|
||||
Usage: "disconnect a node from a peer node",
|
||||
Action: disconnectNode,
|
||||
},
|
||||
{
|
||||
Name: "rpc",
|
||||
ArgsUsage: "<node> <method> [<args>]",
|
||||
Usage: "call a node RPC method",
|
||||
Action: rpcNode,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "subscribe",
|
||||
Usage: "method is a subscription",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
func showNetwork(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
network, err := client.GetNetwork()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NODES\t%d\n", len(network.Nodes))
|
||||
fmt.Fprintf(w, "CONNS\t%d\n", len(network.Conns))
|
||||
return nil
|
||||
}
|
||||
|
||||
func streamNetwork(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
events := make(chan *simulations.Event)
|
||||
sub, err := client.SubscribeNetwork(events, simulations.SubscribeOpts{
|
||||
Current: ctx.Bool("current"),
|
||||
Filter: ctx.String("filter"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
enc := json.NewEncoder(ctx.App.Writer)
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
if err := enc.Encode(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createSnapshot(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
snap, err := client.CreateSnapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(os.Stdout).Encode(snap)
|
||||
}
|
||||
|
||||
func loadSnapshot(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
snap := &simulations.Snapshot{}
|
||||
if err := json.NewDecoder(os.Stdin).Decode(snap); err != nil {
|
||||
return err
|
||||
}
|
||||
return client.LoadSnapshot(snap)
|
||||
}
|
||||
|
||||
func listNodes(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodes, err := client.GetNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NAME\tPROTOCOLS\tID\n")
|
||||
for _, node := range nodes {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\n", node.Name, strings.Join(protocolList(node), ","), node.ID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func protocolList(node *p2p.NodeInfo) []string {
|
||||
protos := make([]string, 0, len(node.Protocols))
|
||||
for name := range node.Protocols {
|
||||
protos = append(protos, name)
|
||||
}
|
||||
return protos
|
||||
}
|
||||
|
||||
func createNode(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
config := &adapters.NodeConfig{
|
||||
Name: ctx.String("name"),
|
||||
}
|
||||
if key := ctx.String("key"); key != "" {
|
||||
privKey, err := crypto.HexToECDSA(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ID = discover.PubkeyID(&privKey.PublicKey)
|
||||
config.PrivateKey = privKey
|
||||
}
|
||||
if services := ctx.String("services"); services != "" {
|
||||
config.Services = strings.Split(services, ",")
|
||||
}
|
||||
node, err := client.CreateNode(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Created", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func showNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
node, err := client.GetNode(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NAME\t%s\n", node.Name)
|
||||
fmt.Fprintf(w, "PROTOCOLS\t%s\n", strings.Join(protocolList(node), ","))
|
||||
fmt.Fprintf(w, "ID\t%s\n", node.ID)
|
||||
fmt.Fprintf(w, "ENODE\t%s\n", node.Enode)
|
||||
for name, proto := range node.Protocols {
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintf(w, "--- PROTOCOL INFO: %s\n", name)
|
||||
fmt.Fprintf(w, "%v\n", proto)
|
||||
fmt.Fprintf(w, "---\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func startNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
if err := client.StartNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Started", nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
if err := client.StopNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Stopped", nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func connectNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
peerName := args[1]
|
||||
if err := client.ConnectNode(nodeName, peerName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Connected", nodeName, "to", peerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func disconnectNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
peerName := args[1]
|
||||
if err := client.DisconnectNode(nodeName, peerName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Disconnected", nodeName, "from", peerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func rpcNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) < 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
method := args[1]
|
||||
rpcClient, err := client.RPCClient(context.Background(), nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Bool("subscribe") {
|
||||
return rpcSubscribe(rpcClient, ctx.App.Writer, method, args[3:]...)
|
||||
}
|
||||
var result interface{}
|
||||
params := make([]interface{}, len(args[3:]))
|
||||
for i, v := range args[3:] {
|
||||
params[i] = v
|
||||
}
|
||||
if err := rpcClient.Call(&result, method, params...); err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(ctx.App.Writer).Encode(result)
|
||||
}
|
||||
|
||||
func rpcSubscribe(client *rpc.Client, out io.Writer, method string, args ...string) error {
|
||||
parts := strings.SplitN(method, "_", 2)
|
||||
namespace := parts[0]
|
||||
method = parts[1]
|
||||
ch := make(chan interface{})
|
||||
subArgs := make([]interface{}, len(args)+1)
|
||||
subArgs[0] = method
|
||||
for i, v := range args {
|
||||
subArgs[i+1] = v
|
||||
}
|
||||
sub, err := client.Subscribe(context.Background(), namespace, ch, subArgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
enc := json.NewEncoder(out)
|
||||
for {
|
||||
select {
|
||||
case v := <-ch:
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
379
cmd/puppeth/genesis.go
Normal file
379
cmd/puppeth/genesis.go
Normal file
@ -0,0 +1,379 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// cppEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// C++ Ethereum implementation.
|
||||
type cppEthereumGenesisSpec struct {
|
||||
SealEngine string `json:"sealEngine"`
|
||||
Params struct {
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
||||
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
||||
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
||||
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
||||
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
MixHash common.Hash `json:"mixHash"`
|
||||
Author common.Address `json:"author"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
} `json:"genesis"`
|
||||
|
||||
Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
|
||||
}
|
||||
|
||||
// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type cppEthereumGenesisSpecAccount struct {
|
||||
Balance *hexutil.Big `json:"balance"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
||||
}
|
||||
|
||||
// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
|
||||
type cppEthereumGenesisSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
||||
Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
|
||||
}
|
||||
|
||||
type cppEthereumGenesisSpecLinearPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Word uint64 `json:"word"`
|
||||
}
|
||||
|
||||
// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// chain specification format.
|
||||
func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
|
||||
// Only ethash is currently supported between go-ethereum and cpp-ethereum
|
||||
if genesis.Config.Ethash == nil {
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
// Reconstruct the chain spec in Parity's format
|
||||
spec := &cppEthereumGenesisSpec{
|
||||
SealEngine: "Ethash",
|
||||
}
|
||||
spec.Params.AccountStartNonce = 0
|
||||
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
|
||||
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
|
||||
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
|
||||
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
|
||||
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||
spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
|
||||
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Nonce[:], genesis.Nonce)
|
||||
|
||||
spec.Genesis.MixHash = genesis.Mixhash
|
||||
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
|
||||
spec.Genesis.Author = genesis.Coinbase
|
||||
spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp)
|
||||
spec.Genesis.ParentHash = genesis.ParentHash
|
||||
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
||||
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
||||
|
||||
spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
|
||||
for address, account := range genesis.Alloc {
|
||||
spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
|
||||
Balance: (*hexutil.Big)(account.Balance),
|
||||
Nonce: account.Nonce,
|
||||
}
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
|
||||
}
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
}
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// parityChainSpec is the chain specification format used by Parity.
|
||||
type parityChainSpec struct {
|
||||
Name string `json:"name"`
|
||||
Engine struct {
|
||||
Ethash struct {
|
||||
Params struct {
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
HomesteadTransition uint64 `json:"homesteadTransition"`
|
||||
EIP150Transition uint64 `json:"eip150Transition"`
|
||||
EIP160Transition uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition uint64 `json:"eip161dTransition"`
|
||||
EIP649Reward *hexutil.Big `json:"eip649Reward"`
|
||||
EIP100bTransition uint64 `json:"eip100bTransition"`
|
||||
EIP649Transition uint64 `json:"eip649Transition"`
|
||||
} `json:"params"`
|
||||
} `json:"Ethash"`
|
||||
} `json:"engine"`
|
||||
|
||||
Params struct {
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
MaxCodeSize uint64 `json:"maxCodeSize"`
|
||||
EIP155Transition uint64 `json:"eip155Transition"`
|
||||
EIP98Transition uint64 `json:"eip98Transition"`
|
||||
EIP86Transition uint64 `json:"eip86Transition"`
|
||||
EIP140Transition uint64 `json:"eip140Transition"`
|
||||
EIP211Transition uint64 `json:"eip211Transition"`
|
||||
EIP214Transition uint64 `json:"eip214Transition"`
|
||||
EIP658Transition uint64 `json:"eip658Transition"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
Seal struct {
|
||||
Ethereum struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
MixHash hexutil.Bytes `json:"mixHash"`
|
||||
} `json:"ethereum"`
|
||||
} `json:"seal"`
|
||||
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
Author common.Address `json:"author"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
} `json:"genesis"`
|
||||
|
||||
Nodes []string `json:"nodes"`
|
||||
Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
|
||||
}
|
||||
|
||||
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type parityChainSpecAccount struct {
|
||||
Balance *hexutil.Big `json:"balance"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
|
||||
}
|
||||
|
||||
// parityChainSpecBuiltin is the precompiled contract definition.
|
||||
type parityChainSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ActivateAt uint64 `json:"activate_at,omitempty"`
|
||||
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
|
||||
}
|
||||
|
||||
// parityChainSpecPricing represents the different pricing models that builtin
|
||||
// contracts might advertise using.
|
||||
type parityChainSpecPricing struct {
|
||||
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
|
||||
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
|
||||
AltBnPairing *parityChainSpecAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
}
|
||||
|
||||
type parityChainSpecLinearPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Word uint64 `json:"word"`
|
||||
}
|
||||
|
||||
type parityChainSpecModExpPricing struct {
|
||||
Divisor uint64 `json:"divisor"`
|
||||
}
|
||||
|
||||
type parityChainSpecAltBnPairingPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Pair uint64 `json:"pair"`
|
||||
}
|
||||
|
||||
// newParityChainSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// chain specification format.
|
||||
func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []string) (*parityChainSpec, error) {
|
||||
// Only ethash is currently supported between go-ethereum and Parity
|
||||
if genesis.Config.Ethash == nil {
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
// Reconstruct the chain spec in Parity's format
|
||||
spec := &parityChainSpec{
|
||||
Name: network,
|
||||
Nodes: bootnodes,
|
||||
}
|
||||
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||
spec.Engine.Ethash.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
|
||||
spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Params.EIP98Transition = math.MaxUint64
|
||||
spec.Params.EIP86Transition = math.MaxUint64
|
||||
spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
|
||||
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
|
||||
|
||||
spec.Genesis.Seal.Ethereum.MixHash = (hexutil.Bytes)(genesis.Mixhash[:])
|
||||
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
|
||||
spec.Genesis.Author = genesis.Coinbase
|
||||
spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp)
|
||||
spec.Genesis.ParentHash = genesis.ParentHash
|
||||
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
||||
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
||||
|
||||
spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
|
||||
for address, account := range genesis.Alloc {
|
||||
spec.Accounts[address] = &parityChainSpecAccount{
|
||||
Balance: (*hexutil.Big)(account.Balance),
|
||||
Nonce: account.Nonce,
|
||||
}
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
|
||||
}
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||
}
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// Python Ethereum implementation.
|
||||
type pyEthereumGenesisSpec struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
Mixhash common.Hash `json:"mixhash"`
|
||||
Coinbase common.Address `json:"coinbase"`
|
||||
Alloc core.GenesisAlloc `json:"alloc"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
}
|
||||
|
||||
// newPyEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// chain specification format.
|
||||
func newPyEthereumGenesisSpec(network string, genesis *core.Genesis) (*pyEthereumGenesisSpec, error) {
|
||||
// Only ethash is currently supported between go-ethereum and pyethereum
|
||||
if genesis.Config.Ethash == nil {
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
spec := &pyEthereumGenesisSpec{
|
||||
Timestamp: (hexutil.Uint64)(genesis.Timestamp),
|
||||
ExtraData: genesis.ExtraData,
|
||||
GasLimit: (hexutil.Uint64)(genesis.GasLimit),
|
||||
Difficulty: (*hexutil.Big)(genesis.Difficulty),
|
||||
Mixhash: genesis.Mixhash,
|
||||
Coinbase: genesis.Coinbase,
|
||||
Alloc: genesis.Alloc,
|
||||
ParentHash: genesis.ParentHash,
|
||||
}
|
||||
spec.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Nonce[:], genesis.Nonce)
|
||||
|
||||
return spec, nil
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
@ -30,21 +31,9 @@ import (
|
||||
// ethstatsDockerfile is the Dockerfile required to build an ethstats backend
|
||||
// and associated monitoring site.
|
||||
var ethstatsDockerfile = `
|
||||
FROM mhart/alpine-node:latest
|
||||
FROM puppeth/ethstats:latest
|
||||
|
||||
RUN \
|
||||
apk add --update git && \
|
||||
git clone --depth=1 https://github.com/karalabe/eth-netstats && \
|
||||
apk del git && rm -rf /var/cache/apk/* && \
|
||||
\
|
||||
cd /eth-netstats && npm install && npm install -g grunt-cli && grunt
|
||||
|
||||
WORKDIR /eth-netstats
|
||||
EXPOSE 3000
|
||||
|
||||
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: []};' > lib/utils/config.js
|
||||
|
||||
CMD ["npm", "start"]
|
||||
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: [{{.Banned}}], reserved: ["yournode"]};' > lib/utils/config.js
|
||||
`
|
||||
|
||||
// ethstatsComposefile is the docker-compose.yml file required to deploy and
|
||||
@ -59,25 +48,37 @@ services:
|
||||
- "{{.Port}}:3000"{{end}}
|
||||
environment:
|
||||
- WS_SECRET={{.Secret}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}{{if .Banned}}
|
||||
- BANNED={{.Banned}}{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployEthstats deploys a new ethstats container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string) ([]byte, error) {
|
||||
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string, banned []string, nocache bool) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
|
||||
trustedLabels := make([]string, len(trusted))
|
||||
for i, address := range trusted {
|
||||
trusted[i] = fmt.Sprintf("\"%s\"", address)
|
||||
trustedLabels[i] = fmt.Sprintf("\"%s\"", address)
|
||||
}
|
||||
bannedLabels := make([]string, len(banned))
|
||||
for i, address := range banned {
|
||||
bannedLabels[i] = fmt.Sprintf("\"%s\"", address)
|
||||
}
|
||||
|
||||
dockerfile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(ethstatsDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"Trusted": strings.Join(trusted, ", "),
|
||||
"Trusted": strings.Join(trustedLabels, ", "),
|
||||
"Banned": strings.Join(bannedLabels, ", "),
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
@ -87,6 +88,7 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
|
||||
"Port": port,
|
||||
"Secret": secret,
|
||||
"VHost": vhost,
|
||||
"Banned": strings.Join(banned, ","),
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -97,7 +99,10 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the ethstats service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// ethstatsInfos is returned from an ethstats status check to allow reporting
|
||||
@ -107,11 +112,18 @@ type ethstatsInfos struct {
|
||||
port int
|
||||
secret string
|
||||
config string
|
||||
banned []string
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *ethstatsInfos) String() string {
|
||||
return fmt.Sprintf("host=%s, port=%d, secret=%s", info.host, info.port, info.secret)
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *ethstatsInfos) Report() map[string]string {
|
||||
return map[string]string{
|
||||
"Website address": info.host,
|
||||
"Website listener port": strconv.Itoa(info.port),
|
||||
"Login secret": info.secret,
|
||||
"Banned addresses": fmt.Sprintf("%v", info.banned),
|
||||
}
|
||||
}
|
||||
|
||||
// checkEthstats does a health-check against an ethstats server to verify whether
|
||||
@ -145,6 +157,9 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
|
||||
if port != 80 && port != 443 {
|
||||
config += fmt.Sprintf(":%d", port)
|
||||
}
|
||||
// Retrieve the IP blacklist
|
||||
banned := strings.Split(infos.envvars["BANNED"], ",")
|
||||
|
||||
// Run a sanity check to see if the port is reachable
|
||||
if err = checkPort(host, port); err != nil {
|
||||
log.Warn("Ethstats service seems unreachable", "server", host, "port", port, "err", err)
|
||||
@ -155,5 +170,6 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
|
||||
port: port,
|
||||
secret: secret,
|
||||
config: config,
|
||||
banned: banned,
|
||||
}, nil
|
||||
}
|
||||
|
211
cmd/puppeth/module_explorer.go
Normal file
211
cmd/puppeth/module_explorer.go
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// explorerDockerfile is the Dockerfile required to run a block explorer.
|
||||
var explorerDockerfile = `
|
||||
FROM puppeth/explorer:latest
|
||||
|
||||
ADD ethstats.json /ethstats.json
|
||||
ADD chain.json /chain.json
|
||||
|
||||
RUN \
|
||||
echo '(cd ../eth-net-intelligence-api && pm2 start /ethstats.json)' > explorer.sh && \
|
||||
echo '(cd ../etherchain-light && npm start &)' >> explorer.sh && \
|
||||
echo '/parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "explorer.sh"]
|
||||
`
|
||||
|
||||
// explorerEthstats is the configuration file for the ethstats javascript client.
|
||||
var explorerEthstats = `[
|
||||
{
|
||||
"name" : "node-app",
|
||||
"script" : "app.js",
|
||||
"log_date_format" : "YYYY-MM-DD HH:mm Z",
|
||||
"merge_logs" : false,
|
||||
"watch" : false,
|
||||
"max_restarts" : 10,
|
||||
"exec_interpreter" : "node",
|
||||
"exec_mode" : "fork_mode",
|
||||
"env":
|
||||
{
|
||||
"NODE_ENV" : "production",
|
||||
"RPC_HOST" : "localhost",
|
||||
"RPC_PORT" : "8545",
|
||||
"LISTENING_PORT" : "{{.Port}}",
|
||||
"INSTANCE_NAME" : "{{.Name}}",
|
||||
"CONTACT_DETAILS" : "",
|
||||
"WS_SERVER" : "{{.Host}}",
|
||||
"WS_SECRET" : "{{.Secret}}",
|
||||
"VERBOSITY" : 2
|
||||
}
|
||||
}
|
||||
]`
|
||||
|
||||
// explorerComposefile is the docker-compose.yml file required to deploy and
|
||||
// maintain a block explorer.
|
||||
var explorerComposefile = `
|
||||
version: '2'
|
||||
services:
|
||||
explorer:
|
||||
build: .
|
||||
image: {{.Network}}/explorer
|
||||
ports:
|
||||
- "{{.NodePort}}:{{.NodePort}}"
|
||||
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}
|
||||
- "{{.WebPort}}:3000"{{end}}
|
||||
volumes:
|
||||
- {{.Datadir}}:/root/.local/share/io.parity.ethereum
|
||||
environment:
|
||||
- NODE_PORT={{.NodePort}}/tcp
|
||||
- STATS={{.Ethstats}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}
|
||||
- VIRTUAL_PORT=3000{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployExplorer deploys a new block explorer container to a remote machine via
|
||||
// SSH, docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployExplorer(client *sshClient, network string, chainspec []byte, config *explorerInfos, nocache bool) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
|
||||
dockerfile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(explorerDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"NodePort": config.nodePort,
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
ethstats := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(explorerEthstats)).Execute(ethstats, map[string]interface{}{
|
||||
"Port": config.nodePort,
|
||||
"Name": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
"Secret": config.ethstats[strings.Index(config.ethstats, ":")+1 : strings.Index(config.ethstats, "@")],
|
||||
"Host": config.ethstats[strings.Index(config.ethstats, "@")+1:],
|
||||
})
|
||||
files[filepath.Join(workdir, "ethstats.json")] = ethstats.Bytes()
|
||||
|
||||
composefile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(explorerComposefile)).Execute(composefile, map[string]interface{}{
|
||||
"Datadir": config.datadir,
|
||||
"Network": network,
|
||||
"NodePort": config.nodePort,
|
||||
"VHost": config.webHost,
|
||||
"WebPort": config.webPort,
|
||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
files[filepath.Join(workdir, "chain.json")] = chainspec
|
||||
|
||||
// Upload the deployment files to the remote server (and clean up afterwards)
|
||||
if out, err := client.Upload(files); err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// explorerInfos is returned from a block explorer status check to allow reporting
|
||||
// various configuration parameters.
|
||||
type explorerInfos struct {
|
||||
datadir string
|
||||
ethstats string
|
||||
nodePort int
|
||||
webHost string
|
||||
webPort int
|
||||
}
|
||||
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *explorerInfos) Report() map[string]string {
|
||||
report := map[string]string{
|
||||
"Data directory": info.datadir,
|
||||
"Node listener port ": strconv.Itoa(info.nodePort),
|
||||
"Ethstats username": info.ethstats,
|
||||
"Website address ": info.webHost,
|
||||
"Website listener port ": strconv.Itoa(info.webPort),
|
||||
}
|
||||
return report
|
||||
}
|
||||
|
||||
// checkExplorer does a health-check against an block explorer server to verify
|
||||
// whether it's running, and if yes, whether it's responsive.
|
||||
func checkExplorer(client *sshClient, network string) (*explorerInfos, error) {
|
||||
// Inspect a possible block explorer container on the host
|
||||
infos, err := inspectContainer(client, fmt.Sprintf("%s_explorer_1", network))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !infos.running {
|
||||
return nil, ErrServiceOffline
|
||||
}
|
||||
// Resolve the port from the host, or the reverse proxy
|
||||
webPort := infos.portmap["3000/tcp"]
|
||||
if webPort == 0 {
|
||||
if proxy, _ := checkNginx(client, network); proxy != nil {
|
||||
webPort = proxy.port
|
||||
}
|
||||
}
|
||||
if webPort == 0 {
|
||||
return nil, ErrNotExposed
|
||||
}
|
||||
// Resolve the host from the reverse-proxy and the config values
|
||||
host := infos.envvars["VIRTUAL_HOST"]
|
||||
if host == "" {
|
||||
host = client.server
|
||||
}
|
||||
// Run a sanity check to see if the devp2p is reachable
|
||||
nodePort := infos.portmap[infos.envvars["NODE_PORT"]]
|
||||
if err = checkPort(client.server, nodePort); err != nil {
|
||||
log.Warn(fmt.Sprintf("Explorer devp2p port seems unreachable"), "server", client.server, "port", nodePort, "err", err)
|
||||
}
|
||||
// Assemble and return the useful infos
|
||||
stats := &explorerInfos{
|
||||
datadir: infos.volumes["/root/.local/share/io.parity.ethereum"],
|
||||
nodePort: nodePort,
|
||||
webHost: host,
|
||||
webPort: webPort,
|
||||
ethstats: infos.envvars["STATS"],
|
||||
}
|
||||
return stats, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user