Compare commits
674 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4e13a09c50 | ||
|
009d2fe2d6 | ||
|
e872ba7a9e | ||
|
9d9c6b5847 | ||
|
8ca6454807 | ||
|
0e63a70505 | ||
|
f1b00cffc8 | ||
|
442320a8ae | ||
|
af401d03a3 | ||
|
80a2a35bc3 | ||
|
fca5f9fd6f | ||
|
38c30f8dd8 | ||
|
c942700427 | ||
|
cde35439e0 | ||
|
4f908db69e | ||
|
320d132925 | ||
|
7ae2a7bd84 | ||
|
fd34bf594c | ||
|
996230174c | ||
|
8857707606 | ||
|
d6c1fcbe04 | ||
|
79cac793c0 | ||
|
5de6b6b529 | ||
|
3d2bedf8d0 | ||
|
8ea3d8ad90 | ||
|
a0127019c3 | ||
|
7a333e4104 | ||
|
799fe99537 | ||
|
3b02b0ba4b | ||
|
85217b08bd | ||
|
dcff622d43 | ||
|
a3db00f270 | ||
|
769e43e334 | ||
|
8d8ddea1a3 | ||
|
068725c5b0 | ||
|
710775f435 | ||
|
0fd0108507 | ||
|
3c62cc6bba | ||
|
333b1bfb6c | ||
|
d1ace4f344 | ||
|
637a75d61a | ||
|
355d55bd34 | ||
|
7038b5734c | ||
|
1ecf2860cf | ||
|
034f65e9e8 | ||
|
607a1968e6 | ||
|
3f54994db0 | ||
|
2695aa9e0d | ||
|
e247dcc141 | ||
|
b774d0a507 | ||
|
4976fcc91a | ||
|
878aa58ec6 | ||
|
475a0664c5 | ||
|
4625b1257f | ||
|
21d54bcaac | ||
|
7383db4dac | ||
|
afb65f6ace | ||
|
1f1c751b6e | ||
|
a3f31f51f3 | ||
|
e63995b3f3 | ||
|
dd3e894747 | ||
|
df355eceb4 | ||
|
84cb00a94d | ||
|
992a7bbad5 | ||
|
a458153098 | ||
|
fe5258b41e | ||
|
d9be337669 | ||
|
7bd6f39dc3 | ||
|
b247052a64 | ||
|
276f824707 | ||
|
048b463b30 | ||
|
9f5fb15097 | ||
|
2072c26a96 | ||
|
4da2092908 | ||
|
3ab9dcc3bd | ||
|
18f702faf7 | ||
|
3a95128b22 | ||
|
631e2f07f6 | ||
|
7fa3509e2e | ||
|
86ec742f97 | ||
|
d9a07fba67 | ||
|
4cd90e02e2 | ||
|
1f3dfed19e | ||
|
2ae481ff6b | ||
|
c7664b0636 | ||
|
9dc5d1a915 | ||
|
c03f694be5 | ||
|
2a2fd5adf8 | ||
|
115b1c38ac | ||
|
4aeeecfded | ||
|
1636d9574b | ||
|
88168ff5c5 | ||
|
d5cad488be | ||
|
2eb838ed97 | ||
|
38cce9ac33 | ||
|
7240f4d800 | ||
|
7ca40306af | ||
|
6df3e4eeb0 | ||
|
d70c4faf20 | ||
|
81f04fa606 | ||
|
ae857e74bf | ||
|
56a3f6c03c | ||
|
356c49fa7e | ||
|
428eabe28d | ||
|
e05d468075 | ||
|
aca588a8e4 | ||
|
fe03b76ffe | ||
|
072c95fb74 | ||
|
e8ff318205 | ||
|
c1c4301121 | ||
|
391d4cb9b5 | ||
|
3f421aca54 | ||
|
8ec344bf60 | ||
|
33d233d3e1 | ||
|
49975264a8 | ||
|
1ea5279d5d | ||
|
27913dd226 | ||
|
ddaf48bf84 | ||
|
57a90ad450 | ||
|
1d284c201d | ||
|
b025053ab0 | ||
|
9bfd0b60cc | ||
|
a4af734328 | ||
|
6537ab5dd3 | ||
|
735343430d | ||
|
9e9fc87e70 | ||
|
335760bf06 | ||
|
7df52e324c | ||
|
5e4fd8e7db | ||
|
880de230b4 | ||
|
81c3dc728f | ||
|
ca7c13ba8f | ||
|
e1edfe0689 | ||
|
27ce4eb78b | ||
|
5f251a6448 | ||
|
fe86a707d8 | ||
|
b01cfce362 | ||
|
de4265fa02 | ||
|
90ea542e9e | ||
|
472c23a801 | ||
|
d322c9d550 | ||
|
3ad73443c7 | ||
|
7dbb075c07 | ||
|
aebf9e2fe7 | ||
|
aad3c67a92 | ||
|
fe26b2f366 | ||
|
88d7d4fed4 | ||
|
9940d93a43 | ||
|
3796751efc | ||
|
e79821cabe | ||
|
e57e4571d3 | ||
|
b3be9b7cd8 | ||
|
4e6f53ac33 | ||
|
ebbf3dfafb | ||
|
1e190a3b1c | ||
|
24d727b6d6 | ||
|
83a9a73b89 | ||
|
5584574217 | ||
|
38c3d88cea | ||
|
69a8d9841a | ||
|
bb724080ca | ||
|
b2aac658b0 | ||
|
9fe5d20011 | ||
|
dd98d1da94 | ||
|
362e2ba792 | ||
|
31b3334922 | ||
|
48b70ecff1 | ||
|
c1e3fe6b14 | ||
|
2fdff33803 | ||
|
da6e6e7971 | ||
|
af8daf91a6 | ||
|
fd66af5ee5 | ||
|
0983d02aa9 | ||
|
42a914a84f | ||
|
09d588e0da | ||
|
6a1a4375c6 | ||
|
dfa16a3e4e | ||
|
c1d462ee5d | ||
|
f32790fb05 | ||
|
d2328b604a | ||
|
661809714e | ||
|
de39513ced | ||
|
3ac633ba84 | ||
|
b98d2e9a1c | ||
|
92639b676a | ||
|
f74077b4c2 | ||
|
d4415f5e40 | ||
|
7a5c1b28dd | ||
|
8698fbabf6 | ||
|
a3fd415c0f | ||
|
4825d9c3dd | ||
|
f850123ec7 | ||
|
efe5886877 | ||
|
085f89172f | ||
|
54abb97e3b | ||
|
ef8ced4151 | ||
|
7e7781ffaa | ||
|
cf62bd2e88 | ||
|
01371469e6 | ||
|
32d35c9c08 | ||
|
a4428c505e | ||
|
55a4ff806f | ||
|
8380a1303c | ||
|
7c657fc789 | ||
|
3d21d455dc | ||
|
3dba6a6d27 | ||
|
a7501d0c41 | ||
|
dae82f0985 | ||
|
8fdbbef72d | ||
|
8696986547 | ||
|
d606a7a46a | ||
|
174083c3ae | ||
|
bfed28a421 | ||
|
edc39aaedf | ||
|
89fe24bbcc | ||
|
8b9f469419 | ||
|
695a5cce1e | ||
|
c207edf2a3 | ||
|
4f0d978eaa | ||
|
1cd007ecae | ||
|
bba5fd8192 | ||
|
2714e8f091 | ||
|
0699287440 | ||
|
197d609b9a | ||
|
ca228569e4 | ||
|
f5e6634fd2 | ||
|
93854bbad4 | ||
|
f0515800e6 | ||
|
bb29d20828 | ||
|
38592a13a3 | ||
|
a5898ba621 | ||
|
2a113f6d72 | ||
|
b24ef5e05d | ||
|
76f5f662cc | ||
|
6b2cc8950e | ||
|
2843001ac2 | ||
|
9d5e3e0637 | ||
|
3ba0418a9a | ||
|
e0d091e090 | ||
|
070caec4bd | ||
|
4c181e4fb9 | ||
|
333b5fb123 | ||
|
3fd87f2193 | ||
|
c7e522fd17 | ||
|
5d80a1b665 | ||
|
21dd59bd04 | ||
|
493903eede | ||
|
3d997b6dec | ||
|
d876f214e5 | ||
|
7bf7bd2f50 | ||
|
d31f1f4fdb | ||
|
6b6c4d1c27 | ||
|
3333fe660f | ||
|
51e2e78d26 | ||
|
d136e985e8 | ||
|
91c66d47ef | ||
|
accc0fab4f | ||
|
51b2f1620c | ||
|
68be45e5f8 | ||
|
ffe2fc3bc4 | ||
|
324027640b | ||
|
b91766fe6d | ||
|
a6942b9f25 | ||
|
17d67c5834 | ||
|
434dd5bc00 | ||
|
14346e4ef9 | ||
|
b8a2ac3fcf | ||
|
9a000601c6 | ||
|
23de6197f9 | ||
|
698843b45f | ||
|
48b4e8069c | ||
|
58632d4402 | ||
|
eb8fa3cc89 | ||
|
cff97119a7 | ||
|
cef7ed53bd | ||
|
c41e1bd1eb | ||
|
4fecc7a3b1 | ||
|
588aa88121 | ||
|
8080265f3f | ||
|
1212c7b844 | ||
|
201a0bf181 | ||
|
a0876f7433 | ||
|
1ff152f3a4 | ||
|
f574c4e74b | ||
|
870efeef01 | ||
|
144c1c6c52 | ||
|
b16cc501a8 | ||
|
9313fa63f9 | ||
|
d0675e9d9c | ||
|
bd519ab8ae | ||
|
1064e3283d | ||
|
a5dc087845 | ||
|
212bf266c5 | ||
|
c71e4fc4d5 | ||
|
968f6019d0 | ||
|
503993c819 | ||
|
cf3b187bde | ||
|
81533deae5 | ||
|
0bcff8f525 | ||
|
36ca85fa1c | ||
|
b35165555d | ||
|
5b74bb6445 | ||
|
eea3ae42a3 | ||
|
dc6648bb58 | ||
|
0fe0b8f7b9 | ||
|
80e2f3aca4 | ||
|
e2640a96d4 | ||
|
79c7a69ac8 | ||
|
53eb4e0b0f | ||
|
baee850471 | ||
|
126dfde6c9 | ||
|
f08f596a37 | ||
|
3e1cfbae93 | ||
|
54f650a3be | ||
|
1b6fd032e3 | ||
|
8ed4739176 | ||
|
80d3907767 | ||
|
6810933640 | ||
|
7f22b59f87 | ||
|
4c0883e20d | ||
|
3088c122d8 | ||
|
88b41a9e68 | ||
|
66debd91d9 | ||
|
75060ef96e | ||
|
6ff97bf2e5 | ||
|
d98c45f70f | ||
|
aeb733623e | ||
|
97fb08342d | ||
|
cdf5982cfc | ||
|
4e693ad5a6 | ||
|
4466c7b971 | ||
|
2868acd80b | ||
|
6c313fff7b | ||
|
a352de6a08 | ||
|
6a7695e367 | ||
|
16e4d0e005 | ||
|
331fa6d307 | ||
|
3e92c853fb | ||
|
60827dc50f | ||
|
2e98631c5e | ||
|
6566a0a3b8 | ||
|
dc3c3fb1e1 | ||
|
862d6f2fbf | ||
|
4868964bb9 | ||
|
6f607de5d5 | ||
|
dcae0d348b | ||
|
f951e23fb5 | ||
|
aff421e78c | ||
|
4e474c74dc | ||
|
da290e9707 | ||
|
0fe9a372b3 | ||
|
d5c7a6056a | ||
|
ff5538ad4c | ||
|
8bbe72075e | ||
|
97b2806686 | ||
|
11d0ff6578 | ||
|
72a076840b | ||
|
459278cd57 | ||
|
cfcc47529d | ||
|
c5d34fc94e | ||
|
53634f1e04 | ||
|
31c4e3a118 | ||
|
1d3d4a4d57 | ||
|
c5cb214f68 | ||
|
f95811e65b | ||
|
5ed3960b9b | ||
|
5b0c9c8ae5 | ||
|
58e868b759 | ||
|
5d3b7bb023 | ||
|
6ee3b26f44 | ||
|
092df3ab59 | ||
|
81375a3801 | ||
|
d79602d2d4 | ||
|
ff7fad18fb | ||
|
89a32451ae | ||
|
8c63d0d2e4 | ||
|
1895059119 | ||
|
127553253e | ||
|
ff6e0351ab | ||
|
b8a0daf0cc | ||
|
bfa0f96822 | ||
|
82a1c771ef | ||
|
9d06b2c5f3 | ||
|
e5677114dc | ||
|
303b99663e | ||
|
14bef9a2db | ||
|
d3a773c284 | ||
|
de01178c18 | ||
|
696bc9b01c | ||
|
58c0879c2f | ||
|
68b8088cb9 | ||
|
b6ccc06cda | ||
|
83705ef6aa | ||
|
b35622cf3c | ||
|
f1e86ad9cf | ||
|
bd1f7ebda2 | ||
|
26a37c5351 | ||
|
0bf3065fb4 | ||
|
83116a3479 | ||
|
2c8d5dec50 | ||
|
668c37fde1 | ||
|
b7bbe66b19 | ||
|
96fd50be10 | ||
|
634e963f02 | ||
|
dc5d643bb5 | ||
|
9a749dcde5 | ||
|
b69942befe | ||
|
86ec213076 | ||
|
3d782bc727 | ||
|
01d9f29805 | ||
|
024b22c30e | ||
|
ffca6dfe01 | ||
|
107f556b2d | ||
|
44eb69561a | ||
|
d9e324a331 | ||
|
a5aaab2f22 | ||
|
f1b9a3e2f4 | ||
|
79ca6c7a65 | ||
|
4d8c7248bd | ||
|
7e1c374dc6 | ||
|
7910dd5179 | ||
|
0ee44e796a | ||
|
bf37241eb5 | ||
|
d5837e84ff | ||
|
dcaabfe7f6 | ||
|
2c110c81ee | ||
|
c63985d194 | ||
|
0da3b17a11 | ||
|
d8d8669271 | ||
|
bd1f74f654 | ||
|
86f68cf04f | ||
|
a5e6bf7eef | ||
|
e39a9b3480 | ||
|
c3cfdfacd0 | ||
|
4b6824e07b | ||
|
3f7acbbeb9 | ||
|
0d5e1e7bc9 | ||
|
26cf866349 | ||
|
60577d48d5 | ||
|
bf411a04ba | ||
|
24349144b6 | ||
|
7d56602391 | ||
|
d3441ebb56 | ||
|
09dde380f9 | ||
|
a95a601f35 | ||
|
d5db4f810e | ||
|
b66f793443 | ||
|
6663e5da10 | ||
|
30cd5c1854 | ||
|
9e99a0c2b9 | ||
|
0ae462fb80 | ||
|
477eb0933b | ||
|
1d9d3815e5 | ||
|
06d40d37b8 | ||
|
ee92bc537f | ||
|
d3f056bd68 | ||
|
81080bf8cb | ||
|
c528e3e3cf | ||
|
1a16cc71c6 | ||
|
b0d60721f1 | ||
|
ab13cd9924 | ||
|
32c05e82a3 | ||
|
5e32152c02 | ||
|
457e930f27 | ||
|
ba0a8b7887 | ||
|
f55c26ae6d | ||
|
d6254f827b | ||
|
af89093116 | ||
|
f89dce0126 | ||
|
0f2ba07c41 | ||
|
c37238cae9 | ||
|
da29332c5f | ||
|
3fec73500b | ||
|
6975c72981 | ||
|
c35659c6a0 | ||
|
6f004c46d5 | ||
|
16e95f33b7 | ||
|
f5c7d1c8eb | ||
|
736b45a876 | ||
|
bd9d79adba | ||
|
16bc8741bf | ||
|
0b477712a1 | ||
|
faa69bea1c | ||
|
67c332e9b5 | ||
|
360a72d54e | ||
|
5d921fa3a0 | ||
|
1f45ba9bb1 | ||
|
caa2c23a38 | ||
|
58374e28d9 | ||
|
b8aa5980cf | ||
|
bd58098f2d | ||
|
5d1d1a808d | ||
|
41ac8dd803 | ||
|
c1345b0742 | ||
|
7efb12d29b | ||
|
cc21928e12 | ||
|
3df7df0386 | ||
|
7c71e936a7 | ||
|
d4a28a13ca | ||
|
86a03f97d3 | ||
|
44a1764f9c | ||
|
7bb95a9a64 | ||
|
72c820c49e | ||
|
3ff2f75636 | ||
|
ff3a5d24d2 | ||
|
0732617b65 | ||
|
bfce00385f | ||
|
b06ff563a1 | ||
|
b040b75075 | ||
|
933ebaa47e | ||
|
2d98099c25 | ||
|
4bb25042eb | ||
|
6dd87483d4 | ||
|
10bac36647 | ||
|
0e32989a08 | ||
|
bcfb7f58b9 | ||
|
ae992a5d73 | ||
|
8b9b149d54 | ||
|
70d31fb278 | ||
|
580145e96d | ||
|
4c15ffffdd | ||
|
5918b88a8f | ||
|
8711e2b636 | ||
|
cf33d8b83c | ||
|
42bd67bd6f | ||
|
beee7a52e0 | ||
|
661aa4dc20 | ||
|
3e81840061 | ||
|
84084df26c | ||
|
003e031994 | ||
|
32f28a9360 | ||
|
6a33954731 | ||
|
6fc8494620 | ||
|
cc2b39bbd1 | ||
|
c9a0b36a5f | ||
|
e1c64a7d89 | ||
|
992b77992f | ||
|
5c0954afff | ||
|
62e94895da | ||
|
89451f7c38 | ||
|
f0242ee76d | ||
|
a4bc2c31e1 | ||
|
75ae5af62a | ||
|
9574968116 | ||
|
e29c2e4364 | ||
|
f751c6ed47 | ||
|
e8f229b82e | ||
|
c1c003e4ff | ||
|
63352bf424 | ||
|
b69476b372 | ||
|
c64d72bea2 | ||
|
0bec85f2e2 | ||
|
f236ac710e | ||
|
1acefafe22 | ||
|
f0488e80f7 | ||
|
d1aa605f1e | ||
|
70398d300d | ||
|
c134e00e48 | ||
|
40a71f28cf | ||
|
c3f7e3be3b | ||
|
1136269a79 | ||
|
67d6d0bb7d | ||
|
1e63a015a5 | ||
|
92381ee009 | ||
|
1df1187d83 | ||
|
f34f361ca6 | ||
|
2993fb519d | ||
|
316fc7ecfc | ||
|
af85d8e2b3 | ||
|
6a8b47c880 | ||
|
e0d0e64ce2 | ||
|
b2c644ffb5 | ||
|
522cfc68ff | ||
|
a063fe9b2d | ||
|
1dcad8b2de | ||
|
86acdf1a5b | ||
|
9f036647e4 | ||
|
355fc47d39 | ||
|
76301ca051 | ||
|
c582667c9b | ||
|
dcd97c41fa | ||
|
85c6a1c526 | ||
|
ecca49e078 | ||
|
106d196ec4 | ||
|
a6d45a5d00 | ||
|
7d38d53ae4 | ||
|
1de9ada401 | ||
|
c929030e28 | ||
|
87f294aa0b | ||
|
68f0a414ea | ||
|
55d050ccd8 | ||
|
a8aa89accb | ||
|
c4078fc805 | ||
|
d3488c1aff | ||
|
0fd02fe9cf | ||
|
251c868008 | ||
|
99e1a5e0fb | ||
|
22cd3f70a6 | ||
|
2695fa2213 | ||
|
f44046a1c6 | ||
|
2a06791461 | ||
|
60390878a5 | ||
|
9bf6bb8f63 | ||
|
1d439b5e10 | ||
|
62f5137a72 | ||
|
54216811a0 | ||
|
5952d962dc | ||
|
3e21adc648 | ||
|
b24fb76a3a | ||
|
2cdf6ee7e0 | ||
|
e8752f4e9f | ||
|
d8541a9f99 | ||
|
040aa2bb10 | ||
|
e598ae5c01 | ||
|
2a17fe2561 | ||
|
212bba47ff | ||
|
b52bb31b76 | ||
|
b2ddb1fcbf | ||
|
a1783d1697 | ||
|
e0e0e53401 | ||
|
97887d98da | ||
|
8a040de60b | ||
|
e07e507d1a | ||
|
d8328a96b4 | ||
|
fb368723ac | ||
|
6d1e292eef | ||
|
3ec5dda4d2 | ||
|
f0998415ba | ||
|
45eaef2431 | ||
|
3bcb501c8f | ||
|
d3e4c2dcb0 | ||
|
7b5c375825 | ||
|
beade042d1 | ||
|
11bbc66082 | ||
|
834057592f | ||
|
abbb219933 | ||
|
8051a0768a | ||
|
a1eb9c7d13 | ||
|
00e6da9704 | ||
|
9df16f3468 | ||
|
8461fea44b | ||
|
4bb2dc3d09 | ||
|
cf05ef9106 | ||
|
de9b0660ac | ||
|
042191338d | ||
|
93fe16b0a5 | ||
|
64a4e89504 | ||
|
eef65b20fc | ||
|
c4df67461f | ||
|
941018b570 | ||
|
a72ba5a55b | ||
|
6711f098d5 | ||
|
c376a5263f | ||
|
2901b8b2d2 | ||
|
35fcd2f423 | ||
|
faf0e06ed8 | ||
|
51db5975cc | ||
|
70176cda0e | ||
|
d56fa8a659 | ||
|
e4cb158d01 | ||
|
0ab54de1a5 | ||
|
adc2944b4c | ||
|
16eaf2b158 | ||
|
83e2761c3a | ||
|
353a82385b | ||
|
46d4721519 | ||
|
454382e81a | ||
|
6209545083 | ||
|
193a402cc0 | ||
|
dcca66bce8 | ||
|
399aa710d5 | ||
|
699794d88d | ||
|
773857a524 | ||
|
2a75fe3308 |
21
.github/CODEOWNERS
vendored
@@ -2,6 +2,7 @@
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
accounts/abi @gballet
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
@@ -9,24 +10,4 @@ les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
swarm/bmt @zelig
|
||||
swarm/dev @lmars
|
||||
swarm/fuse @jmozah @holisticode
|
||||
swarm/grafana_dashboards @nonsense
|
||||
swarm/metrics @nonsense @holisticode
|
||||
swarm/multihash @nolash
|
||||
swarm/network/bitvector @zelig @janos @gbalint
|
||||
swarm/network/priorityqueue @zelig @janos @gbalint
|
||||
swarm/network/simulations @zelig
|
||||
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
|
||||
swarm/network/stream/intervals @janos
|
||||
swarm/network/stream/testing @zelig
|
||||
swarm/pot @zelig
|
||||
swarm/pss @nolash @zelig @nonsense
|
||||
swarm/services @zelig
|
||||
swarm/state @justelad
|
||||
swarm/storage/encryption @gbalint @zelig @nagydani
|
||||
swarm/storage/mock @janos
|
||||
swarm/storage/mru @nolash
|
||||
swarm/testutil @lmars
|
||||
whisper/ @gballet @gluk256
|
||||
|
46
.github/CONTRIBUTING.md
vendored
@@ -1,16 +1,40 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for considering to help out with the source code! We welcome
|
||||
contributions from anyone on the internet, and are grateful for even the
|
||||
smallest of fixes!
|
||||
|
||||
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a
|
||||
pull request for the maintainers to review and merge into the main code base. If
|
||||
you wish to submit more complex changes though, please check up with the core
|
||||
devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) to
|
||||
ensure those changes are in line with the general philosophy of the project
|
||||
and/or get some early feedback which can make both your efforts much lighter as
|
||||
well as our review and merge procedures quick and simple.
|
||||
|
||||
## Coding guidelines
|
||||
|
||||
Please make sure your contributions adhere to our coding guidelines:
|
||||
|
||||
* Code must adhere to the official Go
|
||||
[formatting](https://golang.org/doc/effective_go.html#formatting) guidelines
|
||||
(i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
|
||||
* Code must be documented adhering to the official Go
|
||||
[commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
|
||||
* Pull requests need to be based on and opened against the `master` branch.
|
||||
* Commit messages should be prefixed with the package(s) they modify.
|
||||
* E.g. "eth, rpc: make trace configs optional"
|
||||
|
||||
## Can I have feature X
|
||||
|
||||
Before you do a feature request please check and make sure that it isn't possible
|
||||
through some other means. The JavaScript enabled console is a powerful feature
|
||||
in the right hands. Please check our [Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
|
||||
Before you submit a feature request, please check and make sure that it isn't
|
||||
possible through some other means. The JavaScript-enabled console is a powerful
|
||||
feature in the right hands. Please check our
|
||||
[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
|
||||
and help.
|
||||
|
||||
## Contributing
|
||||
## Configuration, dependencies, and tests
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits which do not comply with the coding standards
|
||||
are ignored (use gofmt!).
|
||||
|
||||
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, testing, and
|
||||
dependency management.
|
||||
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, managing project dependencies
|
||||
and testing procedures.
|
||||
|
6
.github/no-response.yml
vendored
@@ -1,11 +1,11 @@
|
||||
# Number of days of inactivity before an Issue is closed for lack of response
|
||||
daysUntilClose: 30
|
||||
# Label requiring a response
|
||||
responseRequiredLabel: more-information-needed
|
||||
responseRequiredLabel: "need:more-information"
|
||||
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
|
||||
closeComment: >
|
||||
This issue has been automatically closed because there has been no response
|
||||
to our request for more information from the original author. With only the
|
||||
information that is currently in the issue, we don't have enough information
|
||||
to take action. Please reach out if you have or find the answers we need so
|
||||
that we can investigate further.
|
||||
to take action. Please reach out if you have more relevant information or
|
||||
answers to our questions so that we can investigate further.
|
||||
|
2
.github/stale.yml
vendored
@@ -7,7 +7,7 @@ exemptLabels:
|
||||
- pinned
|
||||
- security
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: stale
|
||||
staleLabel: "status:inactive"
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
|
86
.travis.yml
@@ -4,21 +4,21 @@ sudo: false
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
go: 1.10.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
# These are the latest Go versions.
|
||||
- os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.10.x
|
||||
go: 1.11.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
@@ -27,18 +27,24 @@ matrix:
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- os: osx
|
||||
go: 1.10.x
|
||||
go: 1.11.x
|
||||
script:
|
||||
- echo "Increase the maximum number of open file descriptors on macOS"
|
||||
- NOFILE=20480
|
||||
- sudo sysctl -w kern.maxfiles=$NOFILE
|
||||
- sudo sysctl -w kern.maxfilesperproc=$NOFILE
|
||||
- sudo launchctl limit maxfiles $NOFILE $NOFILE
|
||||
- sudo launchctl limit maxfiles
|
||||
- ulimit -S -n $NOFILE
|
||||
- ulimit -n
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- brew update
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
# This builder only tests code linters on latest version of Go
|
||||
- os: linux
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
@@ -47,9 +53,10 @@ matrix:
|
||||
- go run build/ci.go lint
|
||||
|
||||
# This builder does the Ubuntu PPA upload
|
||||
- os: linux
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
git:
|
||||
@@ -61,14 +68,18 @@ matrix:
|
||||
- debhelper
|
||||
- dput
|
||||
- fakeroot
|
||||
- python-bzrlib
|
||||
- python-paramiko
|
||||
script:
|
||||
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
|
||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.10.x
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-linux
|
||||
git:
|
||||
@@ -98,11 +109,12 @@ matrix:
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
services:
|
||||
- docker
|
||||
go: 1.10.x
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
git:
|
||||
@@ -125,8 +137,9 @@ matrix:
|
||||
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
@@ -146,7 +159,7 @@ matrix:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.10.3.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.11.5.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
@@ -162,8 +175,9 @@ matrix:
|
||||
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.10.x
|
||||
- if: type = push
|
||||
os: osx
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
@@ -190,19 +204,13 @@ matrix:
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
- os: linux
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
- if: type = cron
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-purge
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go purge -store gethstore/builds -days 14
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
|
||||
on_success: change
|
||||
on_failure: always
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.10-alpine as builder
|
||||
FROM golang:1.11-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.10-alpine as builder
|
||||
FROM golang:1.11-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
|
3
Makefile
@@ -57,6 +57,9 @@ devtools:
|
||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||
|
||||
swarm-devtools:
|
||||
env GOBIN= go install ./cmd/swarm/mimegen
|
||||
|
||||
# Cross Compilation Targets (xgo)
|
||||
|
||||
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
|
||||
|
10
README.md
@@ -7,7 +7,7 @@ https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/6874
|
||||
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||
[](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||
[](https://travis-ci.org/ethereum/go-ethereum)
|
||||
[](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
[](https://discord.gg/nthXNEv)
|
||||
|
||||
Automated builds are available for stable releases and the unstable master branch.
|
||||
Binary archives are published at https://geth.ethereum.org/downloads/.
|
||||
@@ -18,7 +18,7 @@ For prerequisites and detailed build instructions please read the
|
||||
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
|
||||
on the wiki.
|
||||
|
||||
Building geth requires both a Go (version 1.7 or later) and a C compiler.
|
||||
Building geth requires both a Go (version 1.9 or later) and a C compiler.
|
||||
You can install them using your favourite package manager.
|
||||
Once the dependencies are installed, run
|
||||
|
||||
@@ -34,7 +34,7 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
||||
|
||||
| Command | Description |
|
||||
|:----------:|-------------|
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
@@ -69,7 +69,7 @@ This command will:
|
||||
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
This too is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
This tool is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
with `geth attach`.
|
||||
|
||||
### Full node on the Ethereum test network
|
||||
@@ -168,7 +168,7 @@ HTTP based JSON-RPC API options:
|
||||
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
||||
|
||||
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
|
||||
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification)
|
||||
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification)
|
||||
on all transports. You can reuse the same connection for multiple requests!
|
||||
|
||||
**Note: Please understand the security implications of opening up an HTTP/WS based transport before
|
||||
|
@@ -58,13 +58,11 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
return arguments, nil
|
||||
|
||||
}
|
||||
method, exist := abi.Methods[name]
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("method '%s' not found", name)
|
||||
}
|
||||
|
||||
arguments, err := method.Inputs.Pack(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -82,7 +80,7 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
if len(output)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output")
|
||||
return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(output), output)
|
||||
}
|
||||
return method.Outputs.Unpack(v, output)
|
||||
} else if event, ok := abi.Events[name]; ok {
|
||||
@@ -137,6 +135,9 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
// MethodById looks up a method by the 4-byte id
|
||||
// returns nil if none found
|
||||
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||
if len(sigdata) < 4 {
|
||||
return nil, fmt.Errorf("data too short (% bytes) for abi method lookup", len(sigdata))
|
||||
}
|
||||
for _, method := range abi.Methods {
|
||||
if bytes.Equal(method.Id(), sigdata[:4]) {
|
||||
return &method, nil
|
||||
|
@@ -22,11 +22,10 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
@@ -52,11 +51,14 @@ const jsondata2 = `
|
||||
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
||||
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
||||
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
|
||||
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
|
||||
{ "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
|
||||
{ "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
|
||||
{ "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
|
||||
]`
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
Uint256, _ := NewType("uint256")
|
||||
Uint256, _ := NewType("uint256", nil)
|
||||
exp := ABI{
|
||||
Methods: map[string]Method{
|
||||
"balance": {
|
||||
@@ -177,7 +179,7 @@ func TestTestSlice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMethodSignature(t *testing.T) {
|
||||
String, _ := NewType("string")
|
||||
String, _ := NewType("string", nil)
|
||||
m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
|
||||
exp := "foo(string,string)"
|
||||
if m.Sig() != exp {
|
||||
@@ -189,12 +191,31 @@ func TestMethodSignature(t *testing.T) {
|
||||
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
|
||||
}
|
||||
|
||||
uintt, _ := NewType("uint256")
|
||||
uintt, _ := NewType("uint256", nil)
|
||||
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
|
||||
exp = "foo(uint256)"
|
||||
if m.Sig() != exp {
|
||||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
||||
}
|
||||
|
||||
// Method with tuple arguments
|
||||
s, _ := NewType("tuple", []ArgumentMarshaling{
|
||||
{Name: "a", Type: "int256"},
|
||||
{Name: "b", Type: "int256[]"},
|
||||
{Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
|
||||
{Name: "x", Type: "int256"},
|
||||
{Name: "y", Type: "int256"},
|
||||
}},
|
||||
{Name: "d", Type: "tuple[2]", Components: []ArgumentMarshaling{
|
||||
{Name: "x", Type: "int256"},
|
||||
{Name: "y", Type: "int256"},
|
||||
}},
|
||||
})
|
||||
m = Method{"foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
|
||||
exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
|
||||
if m.Sig() != exp {
|
||||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiPack(t *testing.T) {
|
||||
@@ -564,11 +585,13 @@ func TestBareEvents(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "type" : "event", "name" : "balance" },
|
||||
{ "type" : "event", "name" : "anon", "anonymous" : true},
|
||||
{ "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
||||
{ "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] },
|
||||
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
||||
]`
|
||||
|
||||
arg0, _ := NewType("uint256")
|
||||
arg1, _ := NewType("address")
|
||||
arg0, _ := NewType("uint256", nil)
|
||||
arg1, _ := NewType("address", nil)
|
||||
tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||
|
||||
expectedEvents := map[string]struct {
|
||||
Anonymous bool
|
||||
@@ -580,6 +603,10 @@ func TestBareEvents(t *testing.T) {
|
||||
{Name: "arg0", Type: arg0, Indexed: false},
|
||||
{Name: "arg1", Type: arg1, Indexed: true},
|
||||
}},
|
||||
"tuple": {false, []Argument{
|
||||
{Name: "t", Type: tuple, Indexed: false},
|
||||
{Name: "arg1", Type: arg1, Indexed: true},
|
||||
}},
|
||||
}
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
@@ -646,28 +673,24 @@ func TestUnpackEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
type ReceivedEvent struct {
|
||||
Address common.Address
|
||||
Amount *big.Int
|
||||
Memo []byte
|
||||
Sender common.Address
|
||||
Amount *big.Int
|
||||
Memo []byte
|
||||
}
|
||||
var ev ReceivedEvent
|
||||
|
||||
err = abi.Unpack(&ev, "received", data)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
t.Logf("len(data): %d; received event: %+v", len(data), ev)
|
||||
}
|
||||
|
||||
type ReceivedAddrEvent struct {
|
||||
Address common.Address
|
||||
Sender common.Address
|
||||
}
|
||||
var receivedAddrEv ReceivedAddrEvent
|
||||
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
t.Logf("len(data): %d; received event: %+v", len(data), receivedAddrEv)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -711,5 +734,14 @@ func TestABI_MethodById(t *testing.T) {
|
||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
||||
}
|
||||
}
|
||||
|
||||
// Also test empty
|
||||
if _, err := abi.MethodById([]byte{0x00}); err == nil {
|
||||
t.Errorf("Expected error, too short to decode data")
|
||||
}
|
||||
if _, err := abi.MethodById([]byte{}); err == nil {
|
||||
t.Errorf("Expected error, too short to decode data")
|
||||
}
|
||||
if _, err := abi.MethodById(nil); err == nil {
|
||||
t.Errorf("Expected error, nil is short to decode data")
|
||||
}
|
||||
}
|
||||
|
@@ -33,24 +33,27 @@ type Argument struct {
|
||||
|
||||
type Arguments []Argument
|
||||
|
||||
type ArgumentMarshaling struct {
|
||||
Name string
|
||||
Type string
|
||||
Components []ArgumentMarshaling
|
||||
Indexed bool
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||
var extarg struct {
|
||||
Name string
|
||||
Type string
|
||||
Indexed bool
|
||||
}
|
||||
err := json.Unmarshal(data, &extarg)
|
||||
var arg ArgumentMarshaling
|
||||
err := json.Unmarshal(data, &arg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("argument json err: %v", err)
|
||||
}
|
||||
|
||||
argument.Type, err = NewType(extarg.Type)
|
||||
argument.Type, err = NewType(arg.Type, arg.Components)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
argument.Name = extarg.Name
|
||||
argument.Indexed = extarg.Indexed
|
||||
argument.Name = arg.Name
|
||||
argument.Indexed = arg.Indexed
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -85,7 +88,6 @@ func (arguments Arguments) isTuple() bool {
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
|
||||
// make sure the passed value is arguments pointer
|
||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
@@ -97,52 +99,134 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
if arguments.isTuple() {
|
||||
return arguments.unpackTuple(v, marshalledValues)
|
||||
}
|
||||
return arguments.unpackAtomic(v, marshalledValues)
|
||||
return arguments.unpackAtomic(v, marshalledValues[0])
|
||||
}
|
||||
|
||||
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
||||
// unpack sets the unmarshalled value to go format.
|
||||
// Note the dst here must be settable.
|
||||
func unpack(t *Type, dst interface{}, src interface{}) error {
|
||||
var (
|
||||
dstVal = reflect.ValueOf(dst).Elem()
|
||||
srcVal = reflect.ValueOf(src)
|
||||
)
|
||||
|
||||
if t.T != TupleTy && !((t.T == SliceTy || t.T == ArrayTy) && t.Elem.T == TupleTy) {
|
||||
return set(dstVal, srcVal)
|
||||
}
|
||||
|
||||
switch t.T {
|
||||
case TupleTy:
|
||||
if dstVal.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind())
|
||||
}
|
||||
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, elem := range t.TupleElems {
|
||||
fname := fieldmap[t.TupleRawNames[i]]
|
||||
field := dstVal.FieldByName(fname)
|
||||
if !field.IsValid() {
|
||||
return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i])
|
||||
}
|
||||
if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case SliceTy:
|
||||
if dstVal.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind())
|
||||
}
|
||||
slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
dstVal.Set(slice)
|
||||
case ArrayTy:
|
||||
if dstVal.Kind() != reflect.Array {
|
||||
return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind())
|
||||
}
|
||||
array := reflect.New(dstVal.Type()).Elem()
|
||||
for i := 0; i < array.Len(); i++ {
|
||||
if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
dstVal.Set(array)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
|
||||
if arguments.LengthNonIndexed() == 0 {
|
||||
return nil
|
||||
}
|
||||
argument := arguments.NonIndexed()[0]
|
||||
elem := reflect.ValueOf(v).Elem()
|
||||
|
||||
if elem.Kind() == reflect.Struct {
|
||||
fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
field := elem.FieldByName(fieldmap[argument.Name])
|
||||
if !field.IsValid() {
|
||||
return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name)
|
||||
}
|
||||
return unpack(&argument.Type, field.Addr().Interface(), marshalledValues)
|
||||
}
|
||||
return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues)
|
||||
}
|
||||
|
||||
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
|
||||
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
||||
var (
|
||||
value = reflect.ValueOf(v).Elem()
|
||||
typ = value.Type()
|
||||
kind = value.Kind()
|
||||
)
|
||||
|
||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the interface is a struct, get of abi->struct_field mapping
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
var err error
|
||||
abi2struct, err = mapAbiToStructFields(arguments, value)
|
||||
var (
|
||||
argNames []string
|
||||
err error
|
||||
)
|
||||
for _, arg := range arguments.NonIndexed() {
|
||||
argNames = append(argNames, arg.Name)
|
||||
}
|
||||
abi2struct, err = mapArgNamesToStructFields(argNames, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i, arg := range arguments.NonIndexed() {
|
||||
|
||||
reflectValue := reflect.ValueOf(marshalledValues[i])
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
field := value.FieldByName(abi2struct[arg.Name])
|
||||
if !field.IsValid() {
|
||||
return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
|
||||
}
|
||||
if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
|
||||
}
|
||||
v := value.Index(i)
|
||||
if err := requireAssignable(v, reflectValue); err != nil {
|
||||
if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := set(v.Elem(), reflectValue, arg); err != nil {
|
||||
if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
@@ -150,48 +234,7 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
|
||||
if len(marshalledValues) != 1 {
|
||||
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
||||
}
|
||||
|
||||
elem := reflect.ValueOf(v).Elem()
|
||||
kind := elem.Kind()
|
||||
reflectValue := reflect.ValueOf(marshalledValues[0])
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
var err error
|
||||
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
arg := arguments.NonIndexed()[0]
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
return set(elem.FieldByName(structField), reflectValue, arg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
||||
|
||||
}
|
||||
|
||||
// Computes the full size of an array;
|
||||
// i.e. counting nested arrays, which count towards size for unpacking.
|
||||
func getArraySize(arr *Type) int {
|
||||
size := arr.Size
|
||||
// Arrays can be nested, with each element being the same size
|
||||
arr = arr.Elem
|
||||
for arr.T == ArrayTy {
|
||||
// Keep multiplying by elem.Size while the elem is an array.
|
||||
size *= arr.Size
|
||||
arr = arr.Elem
|
||||
}
|
||||
// Now we have the full array size, including its children.
|
||||
return size
|
||||
}
|
||||
|
||||
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
|
||||
@@ -202,7 +245,7 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
||||
virtualArgs := 0
|
||||
for index, arg := range arguments.NonIndexed() {
|
||||
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
||||
if arg.Type.T == ArrayTy {
|
||||
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
|
||||
// If we have a static array, like [3]uint256, these are coded as
|
||||
// just like uint256,uint256,uint256.
|
||||
// This means that we need to add two 'virtual' arguments when
|
||||
@@ -213,7 +256,11 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
||||
//
|
||||
// Calculate the full array size to get the correct offset for the next argument.
|
||||
// Decrement it by 1, as the normal index increment is still applied.
|
||||
virtualArgs += getArraySize(&arg.Type) - 1
|
||||
virtualArgs += getTypeSize(arg.Type)/32 - 1
|
||||
} else if arg.Type.T == TupleTy && !isDynamicType(arg.Type) {
|
||||
// If we have a static tuple, like (uint256, bool, uint256), these are
|
||||
// coded as just like uint256,bool,uint256
|
||||
virtualArgs += getTypeSize(arg.Type)/32 - 1
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -243,11 +290,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
// input offset is the bytes offset for packed output
|
||||
inputOffset := 0
|
||||
for _, abiArg := range abiArgs {
|
||||
if abiArg.Type.T == ArrayTy {
|
||||
inputOffset += 32 * abiArg.Type.Size
|
||||
} else {
|
||||
inputOffset += 32
|
||||
}
|
||||
inputOffset += getTypeSize(abiArg.Type)
|
||||
}
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
@@ -257,14 +300,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.requiresLengthPrefix() {
|
||||
// calculate the offset
|
||||
offset := inputOffset + len(variableInput)
|
||||
// check for dynamic types
|
||||
if isDynamicType(input.Type) {
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
ret = append(ret, packNum(reflect.ValueOf(inputOffset))...)
|
||||
// calculate next offset
|
||||
inputOffset += len(packed)
|
||||
// append to variable input
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
@@ -277,14 +319,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// capitalise makes the first character of a string upper case, also removing any
|
||||
// prefixing underscores from the variable names.
|
||||
func capitalise(input string) string {
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
// ToCamelCase converts an under-score string to a camel-case string
|
||||
func ToCamelCase(input string) string {
|
||||
parts := strings.Split(input, "_")
|
||||
for i, s := range parts {
|
||||
if len(s) > 0 {
|
||||
parts[i] = strings.ToUpper(s[:1]) + s[1:]
|
||||
}
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
return strings.Join(parts, "")
|
||||
}
|
||||
|
@@ -65,11 +65,11 @@ type SimulatedBackend struct {
|
||||
|
||||
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
||||
database := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil)
|
||||
|
||||
backend := &SimulatedBackend{
|
||||
database: database,
|
||||
@@ -208,7 +208,7 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
|
||||
}
|
||||
|
||||
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
|
||||
// chain doens't have miners, we just return a gas price of 1 for any call.
|
||||
// chain doesn't have miners, we just return a gas price of 1 for any call.
|
||||
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
||||
return big.NewInt(1), nil
|
||||
}
|
||||
|
@@ -36,10 +36,10 @@ type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Tra
|
||||
|
||||
// CallOpts is the collection of options to fine tune a contract call request.
|
||||
type CallOpts struct {
|
||||
Pending bool // Whether to operate on the pending state or the last known one
|
||||
From common.Address // Optional the sender address, otherwise the first account is used
|
||||
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
Pending bool // Whether to operate on the pending state or the last known one
|
||||
From common.Address // Optional the sender address, otherwise the first account is used
|
||||
BlockNumber *big.Int // Optional the block number on which the call should be performed
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
||||
// TransactOpts is the collection of authorization data required to create a
|
||||
@@ -148,10 +148,10 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
output, err = c.caller.CallContract(ctx, msg, nil)
|
||||
output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
|
||||
if err == nil && len(output) == 0 {
|
||||
// Make sure we have a contract to operate on, and bail out otherwise.
|
||||
if code, err = c.caller.CodeAt(ctx, c.address, nil); err != nil {
|
||||
if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
|
||||
return err
|
||||
} else if len(code) == 0 {
|
||||
return ErrNoCode
|
||||
|
64
accounts/abi/bind/base_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package bind_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
type mockCaller struct {
|
||||
codeAtBlockNumber *big.Int
|
||||
callContractBlockNumber *big.Int
|
||||
}
|
||||
|
||||
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
mc.codeAtBlockNumber = blockNumber
|
||||
return []byte{1, 2, 3}, nil
|
||||
}
|
||||
|
||||
func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||
mc.callContractBlockNumber = blockNumber
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestPassingBlockNumber(t *testing.T) {
|
||||
|
||||
mc := &mockCaller{}
|
||||
|
||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
|
||||
Methods: map[string]abi.Method{
|
||||
"something": {
|
||||
Name: "something",
|
||||
Outputs: abi.Arguments{},
|
||||
},
|
||||
},
|
||||
}, mc, nil, nil)
|
||||
var ret string
|
||||
|
||||
blockNumber := big.NewInt(42)
|
||||
|
||||
bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something")
|
||||
|
||||
if mc.callContractBlockNumber != blockNumber {
|
||||
t.Fatalf("CallContract() was not passed the block number")
|
||||
}
|
||||
|
||||
if mc.codeAtBlockNumber != blockNumber {
|
||||
t.Fatalf("CodeAt() was not passed the block number")
|
||||
}
|
||||
|
||||
bc.Call(&bind.CallOpts{}, &ret, "something")
|
||||
|
||||
if mc.callContractBlockNumber != nil {
|
||||
t.Fatalf("CallContract() was passed a block number when it should not have been")
|
||||
}
|
||||
|
||||
if mc.codeAtBlockNumber != nil {
|
||||
t.Fatalf("CodeAt() was passed a block number when it should not have been")
|
||||
}
|
||||
}
|
@@ -23,13 +23,13 @@ package bind
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unicode"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
// Lang is a target programming language selector to generate bindings for.
|
||||
@@ -145,9 +145,9 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
if err := tmpl.Execute(buffer, data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// For Go bindings pass the code through goimports to clean it up and double check
|
||||
// For Go bindings pass the code through gofmt to clean it up
|
||||
if lang == LangGo {
|
||||
code, err := imports.Process(".", buffer.Bytes(), nil)
|
||||
code, err := format.Source(buffer.Bytes())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%v\n%s", err, buffer)
|
||||
}
|
||||
@@ -207,7 +207,7 @@ func bindTypeGo(kind abi.Type) string {
|
||||
|
||||
// The inner function of bindTypeGo, this finds the inner type of stringKind.
|
||||
// (Or just the type itself if it is not an array or slice)
|
||||
// The length of the matched part is returned, with the the translated type.
|
||||
// The length of the matched part is returned, with the translated type.
|
||||
func bindUnnestedTypeGo(stringKind string) (int, string) {
|
||||
|
||||
switch {
|
||||
@@ -255,7 +255,7 @@ func bindTypeJava(kind abi.Type) string {
|
||||
|
||||
// The inner function of bindTypeJava, this finds the inner type of stringKind.
|
||||
// (Or just the type itself if it is not an array or slice)
|
||||
// The length of the matched part is returned, with the the translated type.
|
||||
// The length of the matched part is returned, with the translated type.
|
||||
func bindUnnestedTypeJava(stringKind string) (int, string) {
|
||||
|
||||
switch {
|
||||
@@ -381,54 +381,23 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
|
||||
// methodNormalizer is a name transformer that modifies Solidity method names to
|
||||
// conform to target language naming concentions.
|
||||
var methodNormalizer = map[Lang]func(string) string{
|
||||
LangGo: capitalise,
|
||||
LangGo: abi.ToCamelCase,
|
||||
LangJava: decapitalise,
|
||||
}
|
||||
|
||||
// capitalise makes a camel-case string which starts with an upper case character.
|
||||
func capitalise(input string) string {
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
}
|
||||
return toCamelCase(strings.ToUpper(input[:1]) + input[1:])
|
||||
return abi.ToCamelCase(input)
|
||||
}
|
||||
|
||||
// decapitalise makes a camel-case string which starts with a lower case character.
|
||||
func decapitalise(input string) string {
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
return input
|
||||
}
|
||||
return toCamelCase(strings.ToLower(input[:1]) + input[1:])
|
||||
}
|
||||
|
||||
// toCamelCase converts an under-score string to a camel-case string
|
||||
func toCamelCase(input string) string {
|
||||
toupper := false
|
||||
|
||||
result := ""
|
||||
for k, v := range input {
|
||||
switch {
|
||||
case k == 0:
|
||||
result = strings.ToUpper(string(input[0]))
|
||||
|
||||
case toupper:
|
||||
result += strings.ToUpper(string(v))
|
||||
toupper = false
|
||||
|
||||
case v == '_':
|
||||
toupper = true
|
||||
|
||||
default:
|
||||
result += string(v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
goForm := abi.ToCamelCase(input)
|
||||
return strings.ToLower(goForm[:1]) + goForm[1:]
|
||||
}
|
||||
|
||||
// structured checks whether a list of ABI data types has enough information to
|
||||
|
@@ -64,6 +64,30 @@ const tmplSourceGo = `
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var (
|
||||
_ = big.NewInt
|
||||
_ = strings.NewReader
|
||||
_ = ethereum.NotFound
|
||||
_ = abi.U256
|
||||
_ = bind.Bind
|
||||
_ = common.Big1
|
||||
_ = types.BloomLookup
|
||||
_ = event.NewSubscription
|
||||
)
|
||||
|
||||
{{range $contract := .Contracts}}
|
||||
// {{.Type}}ABI is the input ABI used to generate the binding from.
|
||||
const {{.Type}}ABI = "{{.InputABI}}"
|
||||
|
@@ -53,9 +53,11 @@ var waitDeployedTests = map[string]struct {
|
||||
|
||||
func TestWaitDeployed(t *testing.T) {
|
||||
for name, test := range waitDeployedTests {
|
||||
backend := backends.NewSimulatedBackend(core.GenesisAlloc{
|
||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
|
||||
})
|
||||
backend := backends.NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
|
||||
// Create the transaction.
|
||||
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code))
|
||||
|
@@ -36,12 +36,12 @@ type Event struct {
|
||||
func (e Event) String() string {
|
||||
inputs := make([]string, len(e.Inputs))
|
||||
for i, input := range e.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
|
||||
if input.Indexed {
|
||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
|
||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
|
||||
return fmt.Sprintf("event %v(%v)", e.Name, strings.Join(inputs, ", "))
|
||||
}
|
||||
|
||||
// Id returns the canonical representation of the event's signature used by the
|
||||
|
@@ -87,12 +87,12 @@ func TestEventId(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
definition: `[
|
||||
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
|
||||
{ "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
|
||||
{ "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
|
||||
{ "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
|
||||
]`,
|
||||
expectations: map[string]common.Hash{
|
||||
"balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
|
||||
"check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
|
||||
"Balance": crypto.Keccak256Hash([]byte("Balance(uint256)")),
|
||||
"Check": crypto.Keccak256Hash([]byte("Check(address,uint256)")),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -111,6 +111,39 @@ func TestEventId(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventString(t *testing.T) {
|
||||
var table = []struct {
|
||||
definition string
|
||||
expectations map[string]string
|
||||
}{
|
||||
{
|
||||
definition: `[
|
||||
{ "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
|
||||
{ "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] },
|
||||
{ "type" : "event", "name" : "Transfer", "inputs": [{ "name": "from", "type": "address", "indexed": true }, { "name": "to", "type": "address", "indexed": true }, { "name": "value", "type": "uint256" }] }
|
||||
]`,
|
||||
expectations: map[string]string{
|
||||
"Balance": "event Balance(uint256 in)",
|
||||
"Check": "event Check(address t, uint256 b)",
|
||||
"Transfer": "event Transfer(address indexed from, address indexed to, uint256 value)",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
abi, err := JSON(strings.NewReader(test.definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for name, event := range abi.Events {
|
||||
if event.String() != test.expectations[name] {
|
||||
t.Errorf("expected string to be %s, got %s", test.expectations[name], event.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
||||
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
|
@@ -56,14 +56,14 @@ func (method Method) Sig() string {
|
||||
func (method Method) String() string {
|
||||
inputs := make([]string, len(method.Inputs))
|
||||
for i, input := range method.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
|
||||
}
|
||||
outputs := make([]string, len(method.Outputs))
|
||||
for i, output := range method.Outputs {
|
||||
outputs[i] = output.Type.String()
|
||||
if len(output.Name) > 0 {
|
||||
outputs[i] = fmt.Sprintf("%v ", output.Name)
|
||||
outputs[i] += fmt.Sprintf(" %v", output.Name)
|
||||
}
|
||||
outputs[i] += output.Type.String()
|
||||
}
|
||||
constant := ""
|
||||
if method.Const {
|
||||
|
61
accounts/abi/method_test.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const methoddata = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
||||
{ "type" : "function", "name" : "transfer", "constant" : false, "inputs" : [ { "name" : "from", "type" : "address" }, { "name" : "to", "type" : "address" }, { "name" : "value", "type" : "uint256" } ], "outputs" : [ { "name" : "success", "type" : "bool" } ] }
|
||||
]`
|
||||
|
||||
func TestMethodString(t *testing.T) {
|
||||
var table = []struct {
|
||||
method string
|
||||
expectation string
|
||||
}{
|
||||
{
|
||||
method: "balance",
|
||||
expectation: "function balance() constant returns()",
|
||||
},
|
||||
{
|
||||
method: "send",
|
||||
expectation: "function send(uint256 amount) returns()",
|
||||
},
|
||||
{
|
||||
method: "transfer",
|
||||
expectation: "function transfer(address from, address to, uint256 value) returns(bool success)",
|
||||
},
|
||||
}
|
||||
|
||||
abi, err := JSON(strings.NewReader(methoddata))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
got := abi.Methods[test.method].String()
|
||||
if got != test.expectation {
|
||||
t.Errorf("expected string to be %s, got %s", test.expectation, got)
|
||||
}
|
||||
}
|
||||
}
|
@@ -29,314 +29,601 @@ import (
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
||||
input interface{}
|
||||
output []byte
|
||||
typ string
|
||||
components []ArgumentMarshaling
|
||||
input interface{}
|
||||
output []byte
|
||||
}{
|
||||
{
|
||||
"uint8",
|
||||
nil,
|
||||
uint8(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint8[]",
|
||||
nil,
|
||||
[]uint8{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint16",
|
||||
nil,
|
||||
uint16(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint16[]",
|
||||
nil,
|
||||
[]uint16{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint32",
|
||||
nil,
|
||||
uint32(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint32[]",
|
||||
nil,
|
||||
[]uint32{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint64",
|
||||
nil,
|
||||
uint64(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint64[]",
|
||||
nil,
|
||||
[]uint64{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint256",
|
||||
nil,
|
||||
big.NewInt(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint256[]",
|
||||
nil,
|
||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int8",
|
||||
nil,
|
||||
int8(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int8[]",
|
||||
nil,
|
||||
[]int8{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int16",
|
||||
nil,
|
||||
int16(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int16[]",
|
||||
nil,
|
||||
[]int16{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int32",
|
||||
nil,
|
||||
int32(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int32[]",
|
||||
nil,
|
||||
[]int32{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int64",
|
||||
nil,
|
||||
int64(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int64[]",
|
||||
nil,
|
||||
[]int64{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int256",
|
||||
nil,
|
||||
big.NewInt(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int256[]",
|
||||
nil,
|
||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"bytes1",
|
||||
nil,
|
||||
[1]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes2",
|
||||
nil,
|
||||
[2]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes3",
|
||||
nil,
|
||||
[3]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes4",
|
||||
nil,
|
||||
[4]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes5",
|
||||
nil,
|
||||
[5]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes6",
|
||||
nil,
|
||||
[6]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes7",
|
||||
nil,
|
||||
[7]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes8",
|
||||
nil,
|
||||
[8]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes9",
|
||||
nil,
|
||||
[9]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes10",
|
||||
nil,
|
||||
[10]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes11",
|
||||
nil,
|
||||
[11]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes12",
|
||||
nil,
|
||||
[12]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes13",
|
||||
nil,
|
||||
[13]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes14",
|
||||
nil,
|
||||
[14]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes15",
|
||||
nil,
|
||||
[15]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes16",
|
||||
nil,
|
||||
[16]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes17",
|
||||
nil,
|
||||
[17]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes18",
|
||||
nil,
|
||||
[18]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes19",
|
||||
nil,
|
||||
[19]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes20",
|
||||
nil,
|
||||
[20]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes21",
|
||||
nil,
|
||||
[21]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes22",
|
||||
nil,
|
||||
[22]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes23",
|
||||
nil,
|
||||
[23]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes24",
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes24",
|
||||
nil,
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes25",
|
||||
nil,
|
||||
[25]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes26",
|
||||
nil,
|
||||
[26]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes27",
|
||||
nil,
|
||||
[27]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes28",
|
||||
nil,
|
||||
[28]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes29",
|
||||
nil,
|
||||
[29]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes30",
|
||||
nil,
|
||||
[30]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes31",
|
||||
nil,
|
||||
[31]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes32",
|
||||
nil,
|
||||
[32]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"uint32[2][3][4]",
|
||||
nil,
|
||||
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
|
||||
},
|
||||
{
|
||||
"address[]",
|
||||
nil,
|
||||
[]common.Address{{1}, {2}},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes32[]",
|
||||
nil,
|
||||
[]common.Hash{{1}, {2}},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"function",
|
||||
nil,
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"string",
|
||||
nil,
|
||||
"foobar",
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"string[]",
|
||||
nil,
|
||||
[]string{"hello", "foobar"},
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||
"0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
|
||||
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
|
||||
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
|
||||
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
|
||||
},
|
||||
{
|
||||
"string[2]",
|
||||
nil,
|
||||
[]string{"hello", "foobar"},
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
|
||||
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
|
||||
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
|
||||
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
|
||||
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
|
||||
},
|
||||
{
|
||||
"bytes32[][]",
|
||||
nil,
|
||||
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
|
||||
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
|
||||
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
||||
},
|
||||
|
||||
{
|
||||
"bytes32[][2]",
|
||||
nil,
|
||||
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
|
||||
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
|
||||
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
||||
},
|
||||
|
||||
{
|
||||
"bytes32[3][2]",
|
||||
nil,
|
||||
[][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||
"0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
|
||||
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
||||
},
|
||||
{
|
||||
// static tuple
|
||||
"tuple",
|
||||
[]ArgumentMarshaling{
|
||||
{Name: "a", Type: "int64"},
|
||||
{Name: "b", Type: "int256"},
|
||||
{Name: "c", Type: "int256"},
|
||||
{Name: "d", Type: "bool"},
|
||||
{Name: "e", Type: "bytes32[3][2]"},
|
||||
},
|
||||
struct {
|
||||
A int64
|
||||
B *big.Int
|
||||
C *big.Int
|
||||
D bool
|
||||
E [][]common.Hash
|
||||
}{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
|
||||
"0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
|
||||
"0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
|
||||
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
|
||||
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
|
||||
"0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
|
||||
"0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2]
|
||||
},
|
||||
{
|
||||
// dynamic tuple
|
||||
"tuple",
|
||||
[]ArgumentMarshaling{
|
||||
{Name: "a", Type: "string"},
|
||||
{Name: "b", Type: "int64"},
|
||||
{Name: "c", Type: "bytes"},
|
||||
{Name: "d", Type: "string[]"},
|
||||
{Name: "e", Type: "int256[]"},
|
||||
{Name: "f", Type: "address[]"},
|
||||
},
|
||||
struct {
|
||||
FieldA string `abi:"a"` // Test whether abi tag works
|
||||
FieldB int64 `abi:"b"`
|
||||
C []byte
|
||||
D []string
|
||||
E []*big.Int
|
||||
F []common.Address
|
||||
}{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
|
||||
common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
||||
"0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
|
||||
"666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
|
||||
"0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000003" + // foo length
|
||||
"666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
|
||||
"0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
|
||||
"6261720000000000000000000000000000000000000000000000000000000000" + // bar
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // 1
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
|
||||
"0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
|
||||
"0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2}
|
||||
},
|
||||
{
|
||||
// nested tuple
|
||||
"tuple",
|
||||
[]ArgumentMarshaling{
|
||||
{Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}},
|
||||
{Name: "b", Type: "int256[]"},
|
||||
},
|
||||
struct {
|
||||
A struct {
|
||||
FieldA *big.Int `abi:"a"`
|
||||
B []*big.Int
|
||||
}
|
||||
B []*big.Int
|
||||
}{
|
||||
A: struct {
|
||||
FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
|
||||
B []*big.Int
|
||||
}{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
||||
B: []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset
|
||||
"00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
|
||||
"0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // b length
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
|
||||
"0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value
|
||||
},
|
||||
{
|
||||
// tuple slice
|
||||
"tuple[]",
|
||||
[]ArgumentMarshaling{
|
||||
{Name: "a", Type: "int256"},
|
||||
{Name: "b", Type: "int256[]"},
|
||||
},
|
||||
[]struct {
|
||||
A *big.Int
|
||||
B []*big.Int
|
||||
}{
|
||||
{big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
||||
{big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
|
||||
},
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
|
||||
"00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
|
||||
"0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
|
||||
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value
|
||||
},
|
||||
{
|
||||
// static tuple array
|
||||
"tuple[2]",
|
||||
[]ArgumentMarshaling{
|
||||
{Name: "a", Type: "int256"},
|
||||
{Name: "b", Type: "int256"},
|
||||
},
|
||||
[2]struct {
|
||||
A *big.Int
|
||||
B *big.Int
|
||||
}{
|
||||
{big.NewInt(-1), big.NewInt(1)},
|
||||
{big.NewInt(1), big.NewInt(-1)},
|
||||
},
|
||||
common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b
|
||||
},
|
||||
{
|
||||
// dynamic tuple array
|
||||
"tuple[2]",
|
||||
[]ArgumentMarshaling{
|
||||
{Name: "a", Type: "int256[]"},
|
||||
},
|
||||
[2]struct {
|
||||
A []*big.Int
|
||||
}{
|
||||
{[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
|
||||
{[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
|
||||
},
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
|
||||
"00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
|
||||
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
|
||||
},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
typ, err := NewType(test.typ, test.components)
|
||||
if err != nil {
|
||||
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
|
||||
t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -406,6 +693,59 @@ func TestMethodPack(t *testing.T) {
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
|
||||
sig = abi.Methods["nestedArray"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
packed, err = abi.Pack("nestedArray", a, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = abi.Methods["nestedArray2"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
packed, err = abi.Pack("nestedArray2", [2][]uint8{{1}, {1}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = abi.Methods["nestedSlice"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
packed, err = abi.Pack("nestedSlice", [][]uint8{{1, 2}, {1, 2}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackNumber(t *testing.T) {
|
||||
|
@@ -71,22 +71,36 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
||||
//
|
||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||
// strict ruleset as bare `reflect` does.
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
func set(dst, src reflect.Value) error {
|
||||
dstType, srcType := dst.Type(), src.Type()
|
||||
switch {
|
||||
case dstType.AssignableTo(srcType):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
return set(dst.Elem(), src)
|
||||
case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT:
|
||||
return set(dst.Elem(), src)
|
||||
case srcType.AssignableTo(dstType) && dst.CanSet():
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Ptr:
|
||||
return set(dst.Elem(), src, output)
|
||||
case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice:
|
||||
return setSlice(dst, src)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setSlice attempts to assign src to dst when slices are not assignable by default
|
||||
// e.g. src: [][]byte -> dst: [][15]byte
|
||||
func setSlice(dst, src reflect.Value) error {
|
||||
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
|
||||
for i := 0; i < src.Len(); i++ {
|
||||
v := src.Index(i)
|
||||
reflect.Copy(slice.Index(i), v)
|
||||
}
|
||||
|
||||
dst.Set(slice)
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireAssignable assures that `dest` is a pointer and it's not an interface.
|
||||
func requireAssignable(dst, src reflect.Value) error {
|
||||
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
|
||||
@@ -112,14 +126,14 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
||||
return nil
|
||||
}
|
||||
|
||||
// mapAbiToStringField maps abi to struct fields.
|
||||
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
|
||||
// first round: for each Exportable field that contains a `abi:""` tag
|
||||
// and this field name exists in the arguments, pair them together.
|
||||
// second round: for each argument field that has not been already linked,
|
||||
// and this field name exists in the given argument name list, pair them together.
|
||||
// second round: for each argument name that has not been already linked,
|
||||
// find what variable is expected to be mapped into, if it exists and has not been
|
||||
// used, pair them.
|
||||
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
|
||||
|
||||
// Note this function assumes the given value is a struct value.
|
||||
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
|
||||
typ := value.Type()
|
||||
|
||||
abi2struct := make(map[string]string)
|
||||
@@ -133,45 +147,39 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
|
||||
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip fields that have no abi:"" tag.
|
||||
var ok bool
|
||||
var tagName string
|
||||
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// check if tag is empty.
|
||||
if tagName == "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
|
||||
}
|
||||
|
||||
// check which argument field matches with the abi tag.
|
||||
found := false
|
||||
for _, abiField := range args.NonIndexed() {
|
||||
if abiField.Name == tagName {
|
||||
if abi2struct[abiField.Name] != "" {
|
||||
for _, arg := range argNames {
|
||||
if arg == tagName {
|
||||
if abi2struct[arg] != "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
|
||||
}
|
||||
// pair them
|
||||
abi2struct[abiField.Name] = structFieldName
|
||||
struct2abi[structFieldName] = abiField.Name
|
||||
abi2struct[arg] = structFieldName
|
||||
struct2abi[structFieldName] = arg
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
// check if this tag has been mapped.
|
||||
if !found {
|
||||
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// second round ~~~
|
||||
for _, arg := range args {
|
||||
for _, argName := range argNames {
|
||||
|
||||
abiFieldName := arg.Name
|
||||
structFieldName := capitalise(abiFieldName)
|
||||
structFieldName := ToCamelCase(argName)
|
||||
|
||||
if structFieldName == "" {
|
||||
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
@@ -181,11 +189,11 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
|
||||
// struct field with the same field name. If so, raise an error:
|
||||
// abi: [ { "name": "value" } ]
|
||||
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
|
||||
if abi2struct[abiFieldName] != "" {
|
||||
if abi2struct[abiFieldName] != structFieldName &&
|
||||
if abi2struct[argName] != "" {
|
||||
if abi2struct[argName] != structFieldName &&
|
||||
struct2abi[structFieldName] == "" &&
|
||||
value.FieldByName(structFieldName).IsValid() {
|
||||
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
|
||||
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", argName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -197,16 +205,14 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
|
||||
|
||||
if value.FieldByName(structFieldName).IsValid() {
|
||||
// pair them
|
||||
abi2struct[abiFieldName] = structFieldName
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
abi2struct[argName] = structFieldName
|
||||
struct2abi[structFieldName] = argName
|
||||
} else {
|
||||
// not paired, but annotate as used, to detect cases like
|
||||
// abi : [ { "name": "value" }, { "name": "_value" } ]
|
||||
// struct { Value *big.Int }
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
struct2abi[structFieldName] = argName
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return abi2struct, nil
|
||||
}
|
||||
|
191
accounts/abi/reflect_test.go
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type reflectTest struct {
|
||||
name string
|
||||
args []string
|
||||
struc interface{}
|
||||
want map[string]string
|
||||
err string
|
||||
}
|
||||
|
||||
var reflectTests = []reflectTest{
|
||||
{
|
||||
name: "OneToOneCorrespondance",
|
||||
args: []string{"fieldA"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldA"`
|
||||
}{},
|
||||
want: map[string]string{
|
||||
"fieldA": "FieldA",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MissingFieldsInStruct",
|
||||
args: []string{"fieldA", "fieldB"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldA"`
|
||||
}{},
|
||||
want: map[string]string{
|
||||
"fieldA": "FieldA",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MoreFieldsInStructThanArgs",
|
||||
args: []string{"fieldA"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldA"`
|
||||
FieldB int
|
||||
}{},
|
||||
want: map[string]string{
|
||||
"fieldA": "FieldA",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MissingFieldInArgs",
|
||||
args: []string{"fieldA"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldA"`
|
||||
FieldB int `abi:"fieldB"`
|
||||
}{},
|
||||
err: "struct: abi tag 'fieldB' defined but not found in abi",
|
||||
},
|
||||
{
|
||||
name: "NoAbiDescriptor",
|
||||
args: []string{"fieldA"},
|
||||
struc: struct {
|
||||
FieldA int
|
||||
}{},
|
||||
want: map[string]string{
|
||||
"fieldA": "FieldA",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NoArgs",
|
||||
args: []string{},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldA"`
|
||||
}{},
|
||||
err: "struct: abi tag 'fieldA' defined but not found in abi",
|
||||
},
|
||||
{
|
||||
name: "DifferentName",
|
||||
args: []string{"fieldB"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldB"`
|
||||
}{},
|
||||
want: map[string]string{
|
||||
"fieldB": "FieldA",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DifferentName",
|
||||
args: []string{"fieldB"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldB"`
|
||||
}{},
|
||||
want: map[string]string{
|
||||
"fieldB": "FieldA",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MultipleFields",
|
||||
args: []string{"fieldA", "fieldB"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldA"`
|
||||
FieldB int `abi:"fieldB"`
|
||||
}{},
|
||||
want: map[string]string{
|
||||
"fieldA": "FieldA",
|
||||
"fieldB": "FieldB",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MultipleFieldsABIMissing",
|
||||
args: []string{"fieldA", "fieldB"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldA"`
|
||||
FieldB int
|
||||
}{},
|
||||
want: map[string]string{
|
||||
"fieldA": "FieldA",
|
||||
"fieldB": "FieldB",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NameConflict",
|
||||
args: []string{"fieldB"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldB"`
|
||||
FieldB int
|
||||
}{},
|
||||
err: "abi: multiple variables maps to the same abi field 'fieldB'",
|
||||
},
|
||||
{
|
||||
name: "Underscored",
|
||||
args: []string{"_"},
|
||||
struc: struct {
|
||||
FieldA int
|
||||
}{},
|
||||
err: "abi: purely underscored output cannot unpack to struct",
|
||||
},
|
||||
{
|
||||
name: "DoubleMapping",
|
||||
args: []string{"fieldB", "fieldC", "fieldA"},
|
||||
struc: struct {
|
||||
FieldA int `abi:"fieldC"`
|
||||
FieldB int
|
||||
}{},
|
||||
err: "abi: multiple outputs mapping to the same struct field 'FieldA'",
|
||||
},
|
||||
{
|
||||
name: "AlreadyMapped",
|
||||
args: []string{"fieldB", "fieldB"},
|
||||
struc: struct {
|
||||
FieldB int `abi:"fieldB"`
|
||||
}{},
|
||||
err: "struct: abi tag in 'FieldB' already mapped",
|
||||
},
|
||||
}
|
||||
|
||||
func TestReflectNameToStruct(t *testing.T) {
|
||||
for _, test := range reflectTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
|
||||
if len(test.err) > 0 {
|
||||
if err == nil || err.Error() != test.err {
|
||||
t.Fatalf("Invalid error: expected %v, got %v", test.err, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
for fname := range test.want {
|
||||
if m[fname] != test.want[fname] {
|
||||
t.Fatalf("Incorrect value for field %s: expected %v, got %v", fname, test.want[fname], m[fname])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -17,6 +17,7 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
@@ -32,6 +33,7 @@ const (
|
||||
StringTy
|
||||
SliceTy
|
||||
ArrayTy
|
||||
TupleTy
|
||||
AddressTy
|
||||
FixedBytesTy
|
||||
BytesTy
|
||||
@@ -43,13 +45,16 @@ const (
|
||||
// Type is the reflection of the supported argument type
|
||||
type Type struct {
|
||||
Elem *Type
|
||||
|
||||
Kind reflect.Kind
|
||||
Type reflect.Type
|
||||
Size int
|
||||
T byte // Our own type checking
|
||||
|
||||
stringKind string // holds the unparsed string for deriving signatures
|
||||
|
||||
// Tuple relative fields
|
||||
TupleElems []*Type // Type information of all tuple fields
|
||||
TupleRawNames []string // Raw field name of all tuple fields
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -58,7 +63,7 @@ var (
|
||||
)
|
||||
|
||||
// NewType creates a new reflection type of abi type given in t.
|
||||
func NewType(t string) (typ Type, err error) {
|
||||
func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
// check that array brackets are equal if they exist
|
||||
if strings.Count(t, "[") != strings.Count(t, "]") {
|
||||
return Type{}, fmt.Errorf("invalid arg type in abi")
|
||||
@@ -71,7 +76,7 @@ func NewType(t string) (typ Type, err error) {
|
||||
if strings.Count(t, "[") != 0 {
|
||||
i := strings.LastIndex(t, "[")
|
||||
// recursively embed the type
|
||||
embeddedType, err := NewType(t[:i])
|
||||
embeddedType, err := NewType(t[:i], components)
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
@@ -87,6 +92,9 @@ func NewType(t string) (typ Type, err error) {
|
||||
typ.Kind = reflect.Slice
|
||||
typ.Elem = &embeddedType
|
||||
typ.Type = reflect.SliceOf(embeddedType.Type)
|
||||
if embeddedType.T == TupleTy {
|
||||
typ.stringKind = embeddedType.stringKind + sliced
|
||||
}
|
||||
} else if len(intz) == 1 {
|
||||
// is a array
|
||||
typ.T = ArrayTy
|
||||
@@ -97,13 +105,21 @@ func NewType(t string) (typ Type, err error) {
|
||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||
}
|
||||
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
|
||||
if embeddedType.T == TupleTy {
|
||||
typ.stringKind = embeddedType.stringKind + sliced
|
||||
}
|
||||
} else {
|
||||
return Type{}, fmt.Errorf("invalid formatting of array type")
|
||||
}
|
||||
return typ, err
|
||||
}
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
matches := typeRegex.FindAllStringSubmatch(t, -1)
|
||||
if len(matches) == 0 {
|
||||
return Type{}, fmt.Errorf("invalid type '%v'", t)
|
||||
}
|
||||
parsedType := matches[0]
|
||||
|
||||
// varSize is the size of the variable
|
||||
var varSize int
|
||||
if len(parsedType[3]) > 0 {
|
||||
@@ -153,6 +169,40 @@ func NewType(t string) (typ Type, err error) {
|
||||
typ.Size = varSize
|
||||
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
|
||||
}
|
||||
case "tuple":
|
||||
var (
|
||||
fields []reflect.StructField
|
||||
elems []*Type
|
||||
names []string
|
||||
expression string // canonical parameter expression
|
||||
)
|
||||
expression += "("
|
||||
for idx, c := range components {
|
||||
cType, err := NewType(c.Type, c.Components)
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
if ToCamelCase(c.Name) == "" {
|
||||
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
|
||||
}
|
||||
fields = append(fields, reflect.StructField{
|
||||
Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field.
|
||||
Type: cType.Type,
|
||||
})
|
||||
elems = append(elems, &cType)
|
||||
names = append(names, c.Name)
|
||||
expression += cType.stringKind
|
||||
if idx != len(components)-1 {
|
||||
expression += ","
|
||||
}
|
||||
}
|
||||
expression += ")"
|
||||
typ.Kind = reflect.Struct
|
||||
typ.Type = reflect.StructOf(fields)
|
||||
typ.TupleElems = elems
|
||||
typ.TupleRawNames = names
|
||||
typ.T = TupleTy
|
||||
typ.stringKind = expression
|
||||
case "function":
|
||||
typ.Kind = reflect.Array
|
||||
typ.T = FunctionTy
|
||||
@@ -173,28 +223,82 @@ func (t Type) String() (out string) {
|
||||
func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
// dereference pointer first if it's a pointer
|
||||
v = indirect(v)
|
||||
|
||||
if err := typeCheck(t, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t.T == SliceTy || t.T == ArrayTy {
|
||||
var packed []byte
|
||||
switch t.T {
|
||||
case SliceTy, ArrayTy:
|
||||
var ret []byte
|
||||
|
||||
if t.requiresLengthPrefix() {
|
||||
// append length
|
||||
ret = append(ret, packNum(reflect.ValueOf(v.Len()))...)
|
||||
}
|
||||
|
||||
// calculate offset if any
|
||||
offset := 0
|
||||
offsetReq := isDynamicType(*t.Elem)
|
||||
if offsetReq {
|
||||
offset = getTypeSize(*t.Elem) * v.Len()
|
||||
}
|
||||
var tail []byte
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := t.Elem.pack(v.Index(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packed = append(packed, val...)
|
||||
if !offsetReq {
|
||||
ret = append(ret, val...)
|
||||
continue
|
||||
}
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||
offset += len(val)
|
||||
tail = append(tail, val...)
|
||||
}
|
||||
if t.T == SliceTy {
|
||||
return packBytesSlice(packed, v.Len()), nil
|
||||
} else if t.T == ArrayTy {
|
||||
return packed, nil
|
||||
return append(ret, tail...), nil
|
||||
case TupleTy:
|
||||
// (T1,...,Tk) for k >= 0 and any types T1, …, Tk
|
||||
// enc(X) = head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(k))
|
||||
// where X = (X(1), ..., X(k)) and head and tail are defined for Ti being a static
|
||||
// type as
|
||||
// head(X(i)) = enc(X(i)) and tail(X(i)) = "" (the empty string)
|
||||
// and as
|
||||
// head(X(i)) = enc(len(head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(i-1))))
|
||||
// tail(X(i)) = enc(X(i))
|
||||
// otherwise, i.e. if Ti is a dynamic type.
|
||||
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Calculate prefix occupied size.
|
||||
offset := 0
|
||||
for _, elem := range t.TupleElems {
|
||||
offset += getTypeSize(*elem)
|
||||
}
|
||||
var ret, tail []byte
|
||||
for i, elem := range t.TupleElems {
|
||||
field := v.FieldByName(fieldmap[t.TupleRawNames[i]])
|
||||
if !field.IsValid() {
|
||||
return nil, fmt.Errorf("field %s for tuple not found in the given struct", t.TupleRawNames[i])
|
||||
}
|
||||
val, err := elem.pack(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isDynamicType(*elem) {
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||
tail = append(tail, val...)
|
||||
offset += len(val)
|
||||
} else {
|
||||
ret = append(ret, val...)
|
||||
}
|
||||
}
|
||||
return append(ret, tail...), nil
|
||||
|
||||
default:
|
||||
return packElement(t, v), nil
|
||||
}
|
||||
return packElement(t, v), nil
|
||||
}
|
||||
|
||||
// requireLengthPrefix returns whether the type requires any sort of length
|
||||
@@ -202,3 +306,47 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
func (t Type) requiresLengthPrefix() bool {
|
||||
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
|
||||
}
|
||||
|
||||
// isDynamicType returns true if the type is dynamic.
|
||||
// The following types are called “dynamic”:
|
||||
// * bytes
|
||||
// * string
|
||||
// * T[] for any T
|
||||
// * T[k] for any dynamic T and any k >= 0
|
||||
// * (T1,...,Tk) if Ti is dynamic for some 1 <= i <= k
|
||||
func isDynamicType(t Type) bool {
|
||||
if t.T == TupleTy {
|
||||
for _, elem := range t.TupleElems {
|
||||
if isDynamicType(*elem) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
|
||||
}
|
||||
|
||||
// getTypeSize returns the size that this type needs to occupy.
|
||||
// We distinguish static and dynamic types. Static types are encoded in-place
|
||||
// and dynamic types are encoded at a separately allocated location after the
|
||||
// current block.
|
||||
// So for a static variable, the size returned represents the size that the
|
||||
// variable actually occupies.
|
||||
// For a dynamic variable, the returned size is fixed 32 bytes, which is used
|
||||
// to store the location reference for actual value storage.
|
||||
func getTypeSize(t Type) int {
|
||||
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
|
||||
// Recursively calculate type size if it is a nested array
|
||||
if t.Elem.T == ArrayTy {
|
||||
return t.Size * getTypeSize(*t.Elem)
|
||||
}
|
||||
return t.Size * 32
|
||||
} else if t.T == TupleTy && !isDynamicType(t) {
|
||||
total := 0
|
||||
for _, elem := range t.TupleElems {
|
||||
total += getTypeSize(*elem)
|
||||
}
|
||||
return total
|
||||
}
|
||||
return 32
|
||||
}
|
||||
|
@@ -32,72 +32,75 @@ type typeWithoutStringer Type
|
||||
// Tests that all allowed types get recognized by the type parser.
|
||||
func TestTypeRegexp(t *testing.T) {
|
||||
tests := []struct {
|
||||
blob string
|
||||
kind Type
|
||||
blob string
|
||||
components []ArgumentMarshaling
|
||||
kind Type
|
||||
}{
|
||||
{"bool", Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
|
||||
{"bool[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
|
||||
{"bool[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||
{"bool[2][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
|
||||
{"bool[][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
|
||||
{"bool[][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
|
||||
{"bool[2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
|
||||
{"bool[2][][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
|
||||
{"bool[2][2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
||||
{"bool[][][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
||||
{"bool[][2][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"int16", Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||
{"int32", Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||
{"int64", Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||
{"int256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint16", Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||
{"uint32", Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||
{"uint64", Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"string", Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
|
||||
{"string[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
|
||||
{"address", Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
|
||||
{"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||
{"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||
{"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
|
||||
{"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
|
||||
{"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||
{"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
|
||||
{"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
|
||||
{"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
|
||||
{"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
|
||||
{"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
|
||||
{"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
||||
{"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
||||
{"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
||||
{"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||
{"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||
{"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||
{"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||
{"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||
{"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||
{"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
|
||||
{"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
|
||||
{"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
|
||||
{"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
|
||||
{"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||
{"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||
// TODO when fixed types are implemented properly
|
||||
// {"fixed", Type{}},
|
||||
// {"fixed128x128", Type{}},
|
||||
// {"fixed[]", Type{}},
|
||||
// {"fixed[2]", Type{}},
|
||||
// {"fixed128x128[]", Type{}},
|
||||
// {"fixed128x128[2]", Type{}},
|
||||
// {"fixed", nil, Type{}},
|
||||
// {"fixed128x128", nil, Type{}},
|
||||
// {"fixed[]", nil, Type{}},
|
||||
// {"fixed[2]", nil, Type{}},
|
||||
// {"fixed128x128[]", nil, Type{}},
|
||||
// {"fixed128x128[2]", nil, Type{}},
|
||||
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct{ A int64 }{}), stringKind: "(int64)",
|
||||
TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
typ, err := NewType(tt.blob)
|
||||
typ, err := NewType(tt.blob, tt.components)
|
||||
if err != nil {
|
||||
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
|
||||
}
|
||||
@@ -109,154 +112,170 @@ func TestTypeRegexp(t *testing.T) {
|
||||
|
||||
func TestTypeCheck(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
input interface{}
|
||||
err string
|
||||
typ string
|
||||
components []ArgumentMarshaling
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), "unsupported arg type: uint"},
|
||||
{"int", big.NewInt(1), "unsupported arg type: int"},
|
||||
{"uint256", big.NewInt(1), ""},
|
||||
{"uint256[][3][]", [][3][]*big.Int{{{}}}, ""},
|
||||
{"uint256[][][3]", [3][][]*big.Int{{{}}}, ""},
|
||||
{"uint256[3][][]", [][][3]*big.Int{{{}}}, ""},
|
||||
{"uint256[3][3][3]", [3][3][3]*big.Int{{{}}}, ""},
|
||||
{"uint8[][]", [][]uint8{}, ""},
|
||||
{"int256", big.NewInt(1), ""},
|
||||
{"uint8", uint8(1), ""},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint32", uint32(1), ""},
|
||||
{"uint64", uint64(1), ""},
|
||||
{"int8", int8(1), ""},
|
||||
{"int16", int16(1), ""},
|
||||
{"int32", int32(1), ""},
|
||||
{"int64", int64(1), ""},
|
||||
{"uint24", big.NewInt(1), ""},
|
||||
{"uint40", big.NewInt(1), ""},
|
||||
{"uint48", big.NewInt(1), ""},
|
||||
{"uint56", big.NewInt(1), ""},
|
||||
{"uint72", big.NewInt(1), ""},
|
||||
{"uint80", big.NewInt(1), ""},
|
||||
{"uint88", big.NewInt(1), ""},
|
||||
{"uint96", big.NewInt(1), ""},
|
||||
{"uint104", big.NewInt(1), ""},
|
||||
{"uint112", big.NewInt(1), ""},
|
||||
{"uint120", big.NewInt(1), ""},
|
||||
{"uint128", big.NewInt(1), ""},
|
||||
{"uint136", big.NewInt(1), ""},
|
||||
{"uint144", big.NewInt(1), ""},
|
||||
{"uint152", big.NewInt(1), ""},
|
||||
{"uint160", big.NewInt(1), ""},
|
||||
{"uint168", big.NewInt(1), ""},
|
||||
{"uint176", big.NewInt(1), ""},
|
||||
{"uint184", big.NewInt(1), ""},
|
||||
{"uint192", big.NewInt(1), ""},
|
||||
{"uint200", big.NewInt(1), ""},
|
||||
{"uint208", big.NewInt(1), ""},
|
||||
{"uint216", big.NewInt(1), ""},
|
||||
{"uint224", big.NewInt(1), ""},
|
||||
{"uint232", big.NewInt(1), ""},
|
||||
{"uint240", big.NewInt(1), ""},
|
||||
{"uint248", big.NewInt(1), ""},
|
||||
{"int24", big.NewInt(1), ""},
|
||||
{"int40", big.NewInt(1), ""},
|
||||
{"int48", big.NewInt(1), ""},
|
||||
{"int56", big.NewInt(1), ""},
|
||||
{"int72", big.NewInt(1), ""},
|
||||
{"int80", big.NewInt(1), ""},
|
||||
{"int88", big.NewInt(1), ""},
|
||||
{"int96", big.NewInt(1), ""},
|
||||
{"int104", big.NewInt(1), ""},
|
||||
{"int112", big.NewInt(1), ""},
|
||||
{"int120", big.NewInt(1), ""},
|
||||
{"int128", big.NewInt(1), ""},
|
||||
{"int136", big.NewInt(1), ""},
|
||||
{"int144", big.NewInt(1), ""},
|
||||
{"int152", big.NewInt(1), ""},
|
||||
{"int160", big.NewInt(1), ""},
|
||||
{"int168", big.NewInt(1), ""},
|
||||
{"int176", big.NewInt(1), ""},
|
||||
{"int184", big.NewInt(1), ""},
|
||||
{"int192", big.NewInt(1), ""},
|
||||
{"int200", big.NewInt(1), ""},
|
||||
{"int208", big.NewInt(1), ""},
|
||||
{"int216", big.NewInt(1), ""},
|
||||
{"int224", big.NewInt(1), ""},
|
||||
{"int232", big.NewInt(1), ""},
|
||||
{"int240", big.NewInt(1), ""},
|
||||
{"int248", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint8", uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
|
||||
{"uint8", uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
|
||||
{"uint8", uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
|
||||
{"uint8", int8(1), "abi: cannot use int8 as type uint8 as argument"},
|
||||
{"uint8", int16(1), "abi: cannot use int16 as type uint8 as argument"},
|
||||
{"uint8", int32(1), "abi: cannot use int32 as type uint8 as argument"},
|
||||
{"uint8", int64(1), "abi: cannot use int64 as type uint8 as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"address[]", []common.Address{{1}}, ""},
|
||||
{"address[1]", []common.Address{{1}}, ""},
|
||||
{"address[1]", [1]common.Address{{1}}, ""},
|
||||
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes30", [30]byte{}, ""},
|
||||
{"bytes29", [29]byte{}, ""},
|
||||
{"bytes28", [28]byte{}, ""},
|
||||
{"bytes27", [27]byte{}, ""},
|
||||
{"bytes26", [26]byte{}, ""},
|
||||
{"bytes25", [25]byte{}, ""},
|
||||
{"bytes24", [24]byte{}, ""},
|
||||
{"bytes23", [23]byte{}, ""},
|
||||
{"bytes22", [22]byte{}, ""},
|
||||
{"bytes21", [21]byte{}, ""},
|
||||
{"bytes20", [20]byte{}, ""},
|
||||
{"bytes19", [19]byte{}, ""},
|
||||
{"bytes18", [18]byte{}, ""},
|
||||
{"bytes17", [17]byte{}, ""},
|
||||
{"bytes16", [16]byte{}, ""},
|
||||
{"bytes15", [15]byte{}, ""},
|
||||
{"bytes14", [14]byte{}, ""},
|
||||
{"bytes13", [13]byte{}, ""},
|
||||
{"bytes12", [12]byte{}, ""},
|
||||
{"bytes11", [11]byte{}, ""},
|
||||
{"bytes10", [10]byte{}, ""},
|
||||
{"bytes9", [9]byte{}, ""},
|
||||
{"bytes8", [8]byte{}, ""},
|
||||
{"bytes7", [7]byte{}, ""},
|
||||
{"bytes6", [6]byte{}, ""},
|
||||
{"bytes5", [5]byte{}, ""},
|
||||
{"bytes4", [4]byte{}, ""},
|
||||
{"bytes3", [3]byte{}, ""},
|
||||
{"bytes2", [2]byte{}, ""},
|
||||
{"bytes1", [1]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
|
||||
{"bytes", common.Hash{1}, "abi: cannot use array as type slice as argument"},
|
||||
{"string", "hello world", ""},
|
||||
{"string", string(""), ""},
|
||||
{"string", []byte{}, "abi: cannot use slice as type string as argument"},
|
||||
{"bytes32[]", [][32]byte{{}}, ""},
|
||||
{"function", [24]byte{}, ""},
|
||||
{"bytes20", common.Address{}, ""},
|
||||
{"address", [20]byte{}, ""},
|
||||
{"address", common.Address{}, ""},
|
||||
{"bytes32[]]", "", "invalid arg type in abi"},
|
||||
{"invalidType", "", "unsupported arg type: invalidType"},
|
||||
{"invalidSlice[]", "", "unsupported arg type: invalidSlice"},
|
||||
{"uint", nil, big.NewInt(1), "unsupported arg type: uint"},
|
||||
{"int", nil, big.NewInt(1), "unsupported arg type: int"},
|
||||
{"uint256", nil, big.NewInt(1), ""},
|
||||
{"uint256[][3][]", nil, [][3][]*big.Int{{{}}}, ""},
|
||||
{"uint256[][][3]", nil, [3][][]*big.Int{{{}}}, ""},
|
||||
{"uint256[3][][]", nil, [][][3]*big.Int{{{}}}, ""},
|
||||
{"uint256[3][3][3]", nil, [3][3][3]*big.Int{{{}}}, ""},
|
||||
{"uint8[][]", nil, [][]uint8{}, ""},
|
||||
{"int256", nil, big.NewInt(1), ""},
|
||||
{"uint8", nil, uint8(1), ""},
|
||||
{"uint16", nil, uint16(1), ""},
|
||||
{"uint32", nil, uint32(1), ""},
|
||||
{"uint64", nil, uint64(1), ""},
|
||||
{"int8", nil, int8(1), ""},
|
||||
{"int16", nil, int16(1), ""},
|
||||
{"int32", nil, int32(1), ""},
|
||||
{"int64", nil, int64(1), ""},
|
||||
{"uint24", nil, big.NewInt(1), ""},
|
||||
{"uint40", nil, big.NewInt(1), ""},
|
||||
{"uint48", nil, big.NewInt(1), ""},
|
||||
{"uint56", nil, big.NewInt(1), ""},
|
||||
{"uint72", nil, big.NewInt(1), ""},
|
||||
{"uint80", nil, big.NewInt(1), ""},
|
||||
{"uint88", nil, big.NewInt(1), ""},
|
||||
{"uint96", nil, big.NewInt(1), ""},
|
||||
{"uint104", nil, big.NewInt(1), ""},
|
||||
{"uint112", nil, big.NewInt(1), ""},
|
||||
{"uint120", nil, big.NewInt(1), ""},
|
||||
{"uint128", nil, big.NewInt(1), ""},
|
||||
{"uint136", nil, big.NewInt(1), ""},
|
||||
{"uint144", nil, big.NewInt(1), ""},
|
||||
{"uint152", nil, big.NewInt(1), ""},
|
||||
{"uint160", nil, big.NewInt(1), ""},
|
||||
{"uint168", nil, big.NewInt(1), ""},
|
||||
{"uint176", nil, big.NewInt(1), ""},
|
||||
{"uint184", nil, big.NewInt(1), ""},
|
||||
{"uint192", nil, big.NewInt(1), ""},
|
||||
{"uint200", nil, big.NewInt(1), ""},
|
||||
{"uint208", nil, big.NewInt(1), ""},
|
||||
{"uint216", nil, big.NewInt(1), ""},
|
||||
{"uint224", nil, big.NewInt(1), ""},
|
||||
{"uint232", nil, big.NewInt(1), ""},
|
||||
{"uint240", nil, big.NewInt(1), ""},
|
||||
{"uint248", nil, big.NewInt(1), ""},
|
||||
{"int24", nil, big.NewInt(1), ""},
|
||||
{"int40", nil, big.NewInt(1), ""},
|
||||
{"int48", nil, big.NewInt(1), ""},
|
||||
{"int56", nil, big.NewInt(1), ""},
|
||||
{"int72", nil, big.NewInt(1), ""},
|
||||
{"int80", nil, big.NewInt(1), ""},
|
||||
{"int88", nil, big.NewInt(1), ""},
|
||||
{"int96", nil, big.NewInt(1), ""},
|
||||
{"int104", nil, big.NewInt(1), ""},
|
||||
{"int112", nil, big.NewInt(1), ""},
|
||||
{"int120", nil, big.NewInt(1), ""},
|
||||
{"int128", nil, big.NewInt(1), ""},
|
||||
{"int136", nil, big.NewInt(1), ""},
|
||||
{"int144", nil, big.NewInt(1), ""},
|
||||
{"int152", nil, big.NewInt(1), ""},
|
||||
{"int160", nil, big.NewInt(1), ""},
|
||||
{"int168", nil, big.NewInt(1), ""},
|
||||
{"int176", nil, big.NewInt(1), ""},
|
||||
{"int184", nil, big.NewInt(1), ""},
|
||||
{"int192", nil, big.NewInt(1), ""},
|
||||
{"int200", nil, big.NewInt(1), ""},
|
||||
{"int208", nil, big.NewInt(1), ""},
|
||||
{"int216", nil, big.NewInt(1), ""},
|
||||
{"int224", nil, big.NewInt(1), ""},
|
||||
{"int232", nil, big.NewInt(1), ""},
|
||||
{"int240", nil, big.NewInt(1), ""},
|
||||
{"int248", nil, big.NewInt(1), ""},
|
||||
{"uint30", nil, uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint8", nil, uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
|
||||
{"uint8", nil, uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
|
||||
{"uint8", nil, uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
|
||||
{"uint8", nil, int8(1), "abi: cannot use int8 as type uint8 as argument"},
|
||||
{"uint8", nil, int16(1), "abi: cannot use int16 as type uint8 as argument"},
|
||||
{"uint8", nil, int32(1), "abi: cannot use int32 as type uint8 as argument"},
|
||||
{"uint8", nil, int64(1), "abi: cannot use int64 as type uint8 as argument"},
|
||||
{"uint16", nil, uint16(1), ""},
|
||||
{"uint16", nil, uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", nil, []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", nil, [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", nil, []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
|
||||
{"uint16[3]", nil, [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", nil, [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", nil, []uint16{1, 2, 3}, ""},
|
||||
{"uint16[3]", nil, []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"address[]", nil, []common.Address{{1}}, ""},
|
||||
{"address[1]", nil, []common.Address{{1}}, ""},
|
||||
{"address[1]", nil, [1]common.Address{{1}}, ""},
|
||||
{"address[2]", nil, [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", nil, [32]byte{}, ""},
|
||||
{"bytes31", nil, [31]byte{}, ""},
|
||||
{"bytes30", nil, [30]byte{}, ""},
|
||||
{"bytes29", nil, [29]byte{}, ""},
|
||||
{"bytes28", nil, [28]byte{}, ""},
|
||||
{"bytes27", nil, [27]byte{}, ""},
|
||||
{"bytes26", nil, [26]byte{}, ""},
|
||||
{"bytes25", nil, [25]byte{}, ""},
|
||||
{"bytes24", nil, [24]byte{}, ""},
|
||||
{"bytes23", nil, [23]byte{}, ""},
|
||||
{"bytes22", nil, [22]byte{}, ""},
|
||||
{"bytes21", nil, [21]byte{}, ""},
|
||||
{"bytes20", nil, [20]byte{}, ""},
|
||||
{"bytes19", nil, [19]byte{}, ""},
|
||||
{"bytes18", nil, [18]byte{}, ""},
|
||||
{"bytes17", nil, [17]byte{}, ""},
|
||||
{"bytes16", nil, [16]byte{}, ""},
|
||||
{"bytes15", nil, [15]byte{}, ""},
|
||||
{"bytes14", nil, [14]byte{}, ""},
|
||||
{"bytes13", nil, [13]byte{}, ""},
|
||||
{"bytes12", nil, [12]byte{}, ""},
|
||||
{"bytes11", nil, [11]byte{}, ""},
|
||||
{"bytes10", nil, [10]byte{}, ""},
|
||||
{"bytes9", nil, [9]byte{}, ""},
|
||||
{"bytes8", nil, [8]byte{}, ""},
|
||||
{"bytes7", nil, [7]byte{}, ""},
|
||||
{"bytes6", nil, [6]byte{}, ""},
|
||||
{"bytes5", nil, [5]byte{}, ""},
|
||||
{"bytes4", nil, [4]byte{}, ""},
|
||||
{"bytes3", nil, [3]byte{}, ""},
|
||||
{"bytes2", nil, [2]byte{}, ""},
|
||||
{"bytes1", nil, [1]byte{}, ""},
|
||||
{"bytes32", nil, [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", nil, common.Hash{1}, ""},
|
||||
{"bytes31", nil, common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
|
||||
{"bytes31", nil, [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", nil, []byte{0, 1}, ""},
|
||||
{"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
|
||||
{"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"},
|
||||
{"string", nil, "hello world", ""},
|
||||
{"string", nil, string(""), ""},
|
||||
{"string", nil, []byte{}, "abi: cannot use slice as type string as argument"},
|
||||
{"bytes32[]", nil, [][32]byte{{}}, ""},
|
||||
{"function", nil, [24]byte{}, ""},
|
||||
{"bytes20", nil, common.Address{}, ""},
|
||||
{"address", nil, [20]byte{}, ""},
|
||||
{"address", nil, common.Address{}, ""},
|
||||
{"bytes32[]]", nil, "", "invalid arg type in abi"},
|
||||
{"invalidType", nil, "", "unsupported arg type: invalidType"},
|
||||
{"invalidSlice[]", nil, "", "unsupported arg type: invalidSlice"},
|
||||
// simple tuple
|
||||
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, struct {
|
||||
A *big.Int
|
||||
B *big.Int
|
||||
}{}, ""},
|
||||
// tuple slice
|
||||
{"tuple[]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
|
||||
A *big.Int
|
||||
B *big.Int
|
||||
}{}, ""},
|
||||
// tuple array
|
||||
{"tuple[2]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
|
||||
A *big.Int
|
||||
B *big.Int
|
||||
}{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
typ, err := NewType(test.typ, test.components)
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
} else if err != nil && len(test.err) != 0 {
|
||||
|
@@ -25,8 +25,17 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var (
|
||||
maxUint256 = big.NewInt(0).Add(
|
||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil),
|
||||
big.NewInt(-1))
|
||||
maxInt256 = big.NewInt(0).Add(
|
||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil),
|
||||
big.NewInt(-1))
|
||||
)
|
||||
|
||||
// reads the integer based on its kind
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return b[len(b)-1]
|
||||
@@ -45,7 +54,20 @@ func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
case reflect.Int64:
|
||||
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||
default:
|
||||
return new(big.Int).SetBytes(b)
|
||||
// the only case lefts for integer is int256/uint256.
|
||||
// big.SetBytes can't tell if a number is negative, positive on itself.
|
||||
// On EVM, if the returned number > max int256, it is negative.
|
||||
ret := new(big.Int).SetBytes(b)
|
||||
if typ == UintTy {
|
||||
return ret
|
||||
}
|
||||
|
||||
if ret.Cmp(maxInt256) > 0 {
|
||||
ret.Add(maxUint256, big.NewInt(0).Neg(ret))
|
||||
ret.Add(ret, big.NewInt(1))
|
||||
ret.Neg(ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,17 +115,6 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
|
||||
}
|
||||
|
||||
func getFullElemSize(elem *Type) int {
|
||||
//all other should be counted as 32 (slices have pointers to respective elements)
|
||||
size := 32
|
||||
//arrays wrap it, each element being the same size
|
||||
for elem.T == ArrayTy {
|
||||
size *= elem.Size
|
||||
elem = elem.Elem
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// iteratively unpack elements
|
||||
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
||||
if size < 0 {
|
||||
@@ -128,13 +139,9 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
|
||||
|
||||
// Arrays have packed elements, resulting in longer unpack steps.
|
||||
// Slices have just 32 bytes per element (pointing to the contents).
|
||||
elemSize := 32
|
||||
if t.T == ArrayTy {
|
||||
elemSize = getFullElemSize(t.Elem)
|
||||
}
|
||||
elemSize := getTypeSize(*t.Elem)
|
||||
|
||||
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
|
||||
|
||||
inter, err := toGoType(i, *t.Elem, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -148,6 +155,36 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
|
||||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
func forTupleUnpack(t Type, output []byte) (interface{}, error) {
|
||||
retval := reflect.New(t.Type).Elem()
|
||||
virtualArgs := 0
|
||||
for index, elem := range t.TupleElems {
|
||||
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
|
||||
if elem.T == ArrayTy && !isDynamicType(*elem) {
|
||||
// If we have a static array, like [3]uint256, these are coded as
|
||||
// just like uint256,uint256,uint256.
|
||||
// This means that we need to add two 'virtual' arguments when
|
||||
// we count the index from now on.
|
||||
//
|
||||
// Array values nested multiple levels deep are also encoded inline:
|
||||
// [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
|
||||
//
|
||||
// Calculate the full array size to get the correct offset for the next argument.
|
||||
// Decrement it by 1, as the normal index increment is still applied.
|
||||
virtualArgs += getTypeSize(*elem)/32 - 1
|
||||
} else if elem.T == TupleTy && !isDynamicType(*elem) {
|
||||
// If we have a static tuple, like (uint256, bool, uint256), these are
|
||||
// coded as just like uint256,bool,uint256
|
||||
virtualArgs += getTypeSize(*elem)/32 - 1
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retval.Field(index).Set(reflect.ValueOf(marshalledValue))
|
||||
}
|
||||
return retval.Interface(), nil
|
||||
}
|
||||
|
||||
// toGoType parses the output bytes and recursively assigns the value of these bytes
|
||||
// into a go type with accordance with the ABI spec.
|
||||
func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
@@ -156,14 +193,14 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
}
|
||||
|
||||
var (
|
||||
returnOutput []byte
|
||||
begin, end int
|
||||
err error
|
||||
returnOutput []byte
|
||||
begin, length int
|
||||
err error
|
||||
)
|
||||
|
||||
// if we require a length prefix, find the beginning word and size returned.
|
||||
if t.requiresLengthPrefix() {
|
||||
begin, end, err = lengthPrefixPointsTo(index, output)
|
||||
begin, length, err = lengthPrefixPointsTo(index, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -172,14 +209,28 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
}
|
||||
|
||||
switch t.T {
|
||||
case TupleTy:
|
||||
if isDynamicType(t) {
|
||||
begin, err := tuplePointsTo(index, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return forTupleUnpack(t, output[begin:])
|
||||
} else {
|
||||
return forTupleUnpack(t, output[index:])
|
||||
}
|
||||
case SliceTy:
|
||||
return forEachUnpack(t, output, begin, end)
|
||||
return forEachUnpack(t, output[begin:], 0, length)
|
||||
case ArrayTy:
|
||||
return forEachUnpack(t, output, index, t.Size)
|
||||
if isDynamicType(*t.Elem) {
|
||||
offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]))
|
||||
return forEachUnpack(t, output[offset:], 0, t.Size)
|
||||
}
|
||||
return forEachUnpack(t, output[index:], 0, t.Size)
|
||||
case StringTy: // variable arrays are written at the end of the return bytes
|
||||
return string(output[begin : begin+end]), nil
|
||||
return string(output[begin : begin+length]), nil
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.Kind, returnOutput), nil
|
||||
return readInteger(t.T, t.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return readBool(returnOutput)
|
||||
case AddressTy:
|
||||
@@ -187,7 +238,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
case HashTy:
|
||||
return common.BytesToHash(returnOutput), nil
|
||||
case BytesTy:
|
||||
return output[begin : begin+end], nil
|
||||
return output[begin : begin+length], nil
|
||||
case FixedBytesTy:
|
||||
return readFixedBytes(t, returnOutput)
|
||||
case FunctionTy:
|
||||
@@ -228,3 +279,17 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
|
||||
length = int(lengthBig.Uint64())
|
||||
return
|
||||
}
|
||||
|
||||
// tuplePointsTo resolves the location reference for dynamic tuple.
|
||||
func tuplePointsTo(index int, output []byte) (start int, err error) {
|
||||
offset := big.NewInt(0).SetBytes(output[index : index+32])
|
||||
outputLen := big.NewInt(int64(len(output)))
|
||||
|
||||
if offset.Cmp(big.NewInt(int64(len(output)))) > 0 {
|
||||
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
|
||||
}
|
||||
if offset.BitLen() > 63 {
|
||||
return 0, fmt.Errorf("abi offset larger than int64: %v", offset)
|
||||
}
|
||||
return int(offset.Uint64()), nil
|
||||
}
|
||||
|
@@ -117,6 +117,11 @@ var unpackTests = []unpackTest{
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: big.NewInt(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int256"}]`,
|
||||
enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
want: big.NewInt(-1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "address"}]`,
|
||||
enc: "0000000000000000000000000100000000000000000000000000000000000000",
|
||||
@@ -168,9 +173,14 @@ var unpackTests = []unpackTest{
|
||||
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000E0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [][]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||
want: [][]uint8{{1, 2}, {1, 2, 3}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][2]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@@ -178,7 +188,7 @@ var unpackTests = []unpackTest{
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: [2][]uint8{{1}, {1}},
|
||||
},
|
||||
{
|
||||
@@ -186,6 +196,11 @@ var unpackTests = []unpackTest{
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [][2]uint8{{1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [][2]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint16[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@@ -231,6 +246,26 @@ var unpackTests = []unpackTest{
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "string[4]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000",
|
||||
want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "string[]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000",
|
||||
want: []string{"Ethereum", "go-ethereum"},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes[]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000",
|
||||
want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint256[2][][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8",
|
||||
want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@@ -290,6 +325,53 @@ var unpackTests = []unpackTest{
|
||||
Int2 *big.Int
|
||||
}{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int_one","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
IntOne *big.Int
|
||||
}{big.NewInt(1)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int__one","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
IntOne *big.Int
|
||||
}{big.NewInt(1)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int_one_","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
IntOne *big.Int
|
||||
}{big.NewInt(1)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
IntOne *big.Int
|
||||
Intone *big.Int
|
||||
}{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"___","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
IntOne *big.Int
|
||||
Intone *big.Int
|
||||
}{},
|
||||
err: "abi: purely underscored output cannot unpack to struct",
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: multiple outputs mapping to the same struct field 'IntOne'",
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int","type":"int256"},{"name":"Int","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@@ -354,6 +436,55 @@ func TestUnpack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetDynamicArrayOutput(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var (
|
||||
marshalledReturn32 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783132333435363738393000000000000000000000000000000000000000003078303938373635343332310000000000000000000000000000000000000000")
|
||||
marshalledReturn15 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783031323334350000000000000000000000000000000000000000000000003078393837363534000000000000000000000000000000000000000000000000")
|
||||
|
||||
out32 [][32]byte
|
||||
out15 [][15]byte
|
||||
)
|
||||
|
||||
// test 32
|
||||
err = abi.Unpack(&out32, "testDynamicFixedBytes32", marshalledReturn32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(out32) != 2 {
|
||||
t.Fatalf("expected array with 2 values, got %d", len(out32))
|
||||
}
|
||||
expected := common.Hex2Bytes("3078313233343536373839300000000000000000000000000000000000000000")
|
||||
if !bytes.Equal(out32[0][:], expected) {
|
||||
t.Errorf("expected %x, got %x\n", expected, out32[0])
|
||||
}
|
||||
expected = common.Hex2Bytes("3078303938373635343332310000000000000000000000000000000000000000")
|
||||
if !bytes.Equal(out32[1][:], expected) {
|
||||
t.Errorf("expected %x, got %x\n", expected, out32[1])
|
||||
}
|
||||
|
||||
// test 15
|
||||
err = abi.Unpack(&out15, "testDynamicFixedBytes32", marshalledReturn15)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(out15) != 2 {
|
||||
t.Fatalf("expected array with 2 values, got %d", len(out15))
|
||||
}
|
||||
expected = common.Hex2Bytes("307830313233343500000000000000")
|
||||
if !bytes.Equal(out15[0][:], expected) {
|
||||
t.Errorf("expected %x, got %x\n", expected, out15[0])
|
||||
}
|
||||
expected = common.Hex2Bytes("307839383736353400000000000000")
|
||||
if !bytes.Equal(out15[1][:], expected) {
|
||||
t.Errorf("expected %x, got %x\n", expected, out15[1])
|
||||
}
|
||||
}
|
||||
|
||||
type methodMultiOutput struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
@@ -457,6 +588,68 @@ func TestMultiReturnWithArray(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithStringArray(t *testing.T) {
|
||||
const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000"))
|
||||
temp, _ := big.NewInt(0).SetString("30000000000000000000", 10)
|
||||
ret1, ret1Exp := new([3]*big.Int), [3]*big.Int{big.NewInt(1545304298), big.NewInt(6), temp}
|
||||
ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f")
|
||||
ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"}
|
||||
ret4, ret4Exp := new(bool), false
|
||||
if err := abi.Unpack(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||
t.Error("big.Int array result", *ret1, "!= Expected", ret1Exp)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret2, ret2Exp) {
|
||||
t.Error("address result", *ret2, "!= Expected", ret2Exp)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret3, ret3Exp) {
|
||||
t.Error("string array result", *ret3, "!= Expected", ret3Exp)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret4, ret4Exp) {
|
||||
t.Error("bool result", *ret4, "!= Expected", ret4Exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithStringSlice(t *testing.T) {
|
||||
const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0] offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000120")) // output[1] offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[0] length
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0][0] offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // output[0][1] offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008")) // output[0][0] length
|
||||
buff.Write(common.Hex2Bytes("657468657265756d000000000000000000000000000000000000000000000000")) // output[0][0] value
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000b")) // output[0][1] length
|
||||
buff.Write(common.Hex2Bytes("676f2d657468657265756d000000000000000000000000000000000000000000")) // output[0][1] value
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[1] length
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000064")) // output[1][0] value
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value
|
||||
ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"}
|
||||
ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)}
|
||||
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||
t.Error("string slice result", *ret1, "!= Expected", ret1Exp)
|
||||
}
|
||||
if !reflect.DeepEqual(*ret2, ret2Exp) {
|
||||
t.Error("uint256 slice result", *ret2, "!= Expected", ret2Exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
||||
// Similar to TestMultiReturnWithArray, but with a special case in mind:
|
||||
// values of nested static arrays count towards the size as well, and any element following
|
||||
@@ -746,6 +939,108 @@ func TestUnmarshal(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackTuple(t *testing.T) {
|
||||
const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
|
||||
abi, err := JSON(strings.NewReader(simpleTuple))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // ret[a] = 1
|
||||
buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
|
||||
|
||||
v := struct {
|
||||
Ret struct {
|
||||
A *big.Int
|
||||
B *big.Int
|
||||
}
|
||||
}{Ret: struct {
|
||||
A *big.Int
|
||||
B *big.Int
|
||||
}{new(big.Int), new(big.Int)}}
|
||||
|
||||
err = abi.Unpack(&v, "tuple", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
if v.Ret.A.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.Ret.A)
|
||||
}
|
||||
if v.Ret.B.Cmp(big.NewInt(-1)) != 0 {
|
||||
t.Errorf("unexpected value unpacked: want %x, got %x", v.Ret.B, -1)
|
||||
}
|
||||
}
|
||||
|
||||
// Test nested tuple
|
||||
const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[
|
||||
{"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]},
|
||||
{"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]},
|
||||
{"type":"uint256","name":"a"}
|
||||
]}]`
|
||||
|
||||
abi, err = JSON(strings.NewReader(nestedTuple))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // s offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")) // t.X = 0
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // t.Y = 1
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // a = 1
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.A = 1
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")) // s.B offset
|
||||
buff.Write(common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0")) // s.C offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B length
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.B[0] = 1
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B[0] = 2
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C length
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[0].X
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[0].Y
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[1].X
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[1].Y
|
||||
|
||||
type T struct {
|
||||
X *big.Int `abi:"x"`
|
||||
Z *big.Int `abi:"y"` // Test whether the abi tag works.
|
||||
}
|
||||
|
||||
type S struct {
|
||||
A *big.Int
|
||||
B []*big.Int
|
||||
C []T
|
||||
}
|
||||
|
||||
type Ret struct {
|
||||
FieldS S `abi:"s"`
|
||||
FieldT T `abi:"t"`
|
||||
A *big.Int
|
||||
}
|
||||
var ret Ret
|
||||
var expected = Ret{
|
||||
FieldS: S{
|
||||
A: big.NewInt(1),
|
||||
B: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
C: []T{
|
||||
{big.NewInt(1), big.NewInt(2)},
|
||||
{big.NewInt(2), big.NewInt(1)},
|
||||
},
|
||||
},
|
||||
FieldT: T{
|
||||
big.NewInt(0), big.NewInt(1),
|
||||
},
|
||||
A: big.NewInt(1),
|
||||
}
|
||||
|
||||
err = abi.Unpack(&ret, "tuple", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if reflect.DeepEqual(ret, expected) {
|
||||
t.Error("unexpected unpack value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOOMMaliciousInput(t *testing.T) {
|
||||
oomTests := []unpackTest{
|
||||
{
|
||||
|
@@ -106,7 +106,7 @@ type Wallet interface {
|
||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||
//
|
||||
// If the wallet requires additional authentication to sign the request (e.g.
|
||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
||||
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||
// an AuthNeededError instance will be returned, containing infos for the user
|
||||
// about which fields or actions are needed. The user may retry by providing
|
||||
// the needed details via SignTxWithPassphrase, or by other means (e.g. unlock
|
||||
|
@@ -30,8 +30,8 @@ import (
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0/0, the second
|
||||
// at m/44'/60'/0'/0/1, etc.
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
|
||||
|
||||
// DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
|
@@ -265,7 +265,10 @@ func (ac *accountCache) scanAccounts() error {
|
||||
case (addr == common.Address{}):
|
||||
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
|
||||
default:
|
||||
return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
|
||||
return &accounts.Account{
|
||||
Address: addr,
|
||||
URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -56,9 +56,9 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
|
||||
|
||||
var newLastMod time.Time
|
||||
for _, fi := range files {
|
||||
// Skip any non-key files from the folder
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
// Skip any non-key files from the folder
|
||||
if nonKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
@@ -88,8 +88,8 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
|
||||
return creates, deletes, updates, nil
|
||||
}
|
||||
|
||||
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func nonKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
|
@@ -66,19 +66,19 @@ type plainKeyJSON struct {
|
||||
|
||||
type encryptedKeyJSONV3 struct {
|
||||
Address string `json:"address"`
|
||||
Crypto cryptoJSON `json:"crypto"`
|
||||
Crypto CryptoJSON `json:"crypto"`
|
||||
Id string `json:"id"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
type encryptedKeyJSONV1 struct {
|
||||
Address string `json:"address"`
|
||||
Crypto cryptoJSON `json:"crypto"`
|
||||
Crypto CryptoJSON `json:"crypto"`
|
||||
Id string `json:"id"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type cryptoJSON struct {
|
||||
type CryptoJSON struct {
|
||||
Cipher string `json:"cipher"`
|
||||
CipherText string `json:"ciphertext"`
|
||||
CipherParams cipherparamsJSON `json:"cipherparams"`
|
||||
@@ -171,7 +171,10 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
|
||||
if err != nil {
|
||||
return nil, accounts.Account{}, err
|
||||
}
|
||||
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}}
|
||||
a := accounts.Account{
|
||||
Address: key.Address,
|
||||
URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
|
||||
}
|
||||
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
|
||||
zeroKey(key.PrivateKey)
|
||||
return nil, a, err
|
||||
@@ -179,26 +182,34 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
|
||||
return key, a, err
|
||||
}
|
||||
|
||||
func writeKeyFile(file string, content []byte) error {
|
||||
func writeTemporaryKeyFile(file string, content []byte) (string, error) {
|
||||
// Create the keystore directory with appropriate permissions
|
||||
// in case it is not present yet.
|
||||
const dirPerm = 0700
|
||||
if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
// Atomic write: create a temporary hidden file first
|
||||
// then move it into place. TempFile assigns mode 0600.
|
||||
f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
if _, err := f.Write(content); err != nil {
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
f.Close()
|
||||
return os.Rename(f.Name(), file)
|
||||
return f.Name(), nil
|
||||
}
|
||||
|
||||
func writeKeyFile(file string, content []byte) error {
|
||||
name, err := writeTemporaryKeyFile(file, content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(name, file)
|
||||
}
|
||||
|
||||
// keyFileName implements the naming convention for keyfiles:
|
||||
@@ -216,5 +227,6 @@ func toISO8601(t time.Time) string {
|
||||
} else {
|
||||
tz = fmt.Sprintf("%03d00", offset/3600)
|
||||
}
|
||||
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
|
||||
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
|
||||
t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
|
||||
}
|
||||
|
@@ -78,7 +78,7 @@ type unlocked struct {
|
||||
// NewKeyStore creates a keystore for the given directory.
|
||||
func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
|
||||
keydir, _ = filepath.Abs(keydir)
|
||||
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP}}
|
||||
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}}
|
||||
ks.init(keydir)
|
||||
return ks
|
||||
}
|
||||
|
@@ -35,6 +35,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -72,6 +73,10 @@ type keyStorePassphrase struct {
|
||||
keysDirPath string
|
||||
scryptN int
|
||||
scryptP int
|
||||
// skipKeyFileVerification disables the security-feature which does
|
||||
// reads and decrypts any newly created keyfiles. This should be 'false' in all
|
||||
// cases except tests -- setting this to 'true' is not recommended.
|
||||
skipKeyFileVerification bool
|
||||
}
|
||||
|
||||
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
|
||||
@@ -93,7 +98,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
|
||||
|
||||
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
|
||||
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, rand.Reader, auth)
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth)
|
||||
return a.Address, err
|
||||
}
|
||||
|
||||
@@ -102,7 +107,25 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeKeyFile(filename, keyjson)
|
||||
// Write into temporary file
|
||||
tmpName, err := writeTemporaryKeyFile(filename, keyjson)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ks.skipKeyFileVerification {
|
||||
// Verify that we can decrypt the file with the given password.
|
||||
_, err = ks.GetKey(key.Address, tmpName, auth)
|
||||
if err != nil {
|
||||
msg := "An error was encountered when saving and verifying the keystore file. \n" +
|
||||
"This indicates that the keystore is corrupted. \n" +
|
||||
"The corrupted file is stored at \n%v\n" +
|
||||
"Please file a ticket at:\n\n" +
|
||||
"https://github.com/ethereum/go-ethereum/issues." +
|
||||
"The error was : %s"
|
||||
return fmt.Errorf(msg, tmpName, err)
|
||||
}
|
||||
}
|
||||
return os.Rename(tmpName, filename)
|
||||
}
|
||||
|
||||
func (ks keyStorePassphrase) JoinPath(filename string) string {
|
||||
@@ -112,29 +135,26 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
|
||||
// EncryptKey encrypts a key using the specified scrypt parameters into a json
|
||||
// blob that can be decrypted later on.
|
||||
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
authArray := []byte(auth)
|
||||
// Encryptdata encrypts the data given as 'data' with the password 'auth'.
|
||||
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
|
||||
|
||||
salt := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
|
||||
derivedKey, err := scrypt.Key(auth, salt, scryptN, scryptR, scryptP, scryptDKLen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return CryptoJSON{}, err
|
||||
}
|
||||
encryptKey := derivedKey[:16]
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
|
||||
iv := make([]byte, aes.BlockSize) // 16
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
|
||||
cipherText, err := aesCTRXOR(encryptKey, data, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return CryptoJSON{}, err
|
||||
}
|
||||
mac := crypto.Keccak256(derivedKey[16:32], cipherText)
|
||||
|
||||
@@ -144,12 +164,11 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
scryptParamsJSON["p"] = scryptP
|
||||
scryptParamsJSON["dklen"] = scryptDKLen
|
||||
scryptParamsJSON["salt"] = hex.EncodeToString(salt)
|
||||
|
||||
cipherParamsJSON := cipherparamsJSON{
|
||||
IV: hex.EncodeToString(iv),
|
||||
}
|
||||
|
||||
cryptoStruct := cryptoJSON{
|
||||
cryptoStruct := CryptoJSON{
|
||||
Cipher: "aes-128-ctr",
|
||||
CipherText: hex.EncodeToString(cipherText),
|
||||
CipherParams: cipherParamsJSON,
|
||||
@@ -157,6 +176,17 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
KDFParams: scryptParamsJSON,
|
||||
MAC: hex.EncodeToString(mac),
|
||||
}
|
||||
return cryptoStruct, nil
|
||||
}
|
||||
|
||||
// EncryptKey encrypts a key using the specified scrypt parameters into a json
|
||||
// blob that can be decrypted later on.
|
||||
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
cryptoStruct, err := EncryptDataV3(keyBytes, []byte(auth), scryptN, scryptP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encryptedKeyJSONV3 := encryptedKeyJSONV3{
|
||||
hex.EncodeToString(key.Address[:]),
|
||||
cryptoStruct,
|
||||
@@ -204,42 +234,48 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
|
||||
if keyProtected.Version != version {
|
||||
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
|
||||
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
||||
if cryptoJson.Cipher != "aes-128-ctr" {
|
||||
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
|
||||
}
|
||||
|
||||
if keyProtected.Crypto.Cipher != "aes-128-ctr" {
|
||||
return nil, nil, fmt.Errorf("Cipher not supported: %v", keyProtected.Crypto.Cipher)
|
||||
}
|
||||
|
||||
keyId = uuid.Parse(keyProtected.Id)
|
||||
mac, err := hex.DecodeString(keyProtected.Crypto.MAC)
|
||||
mac, err := hex.DecodeString(cryptoJson.MAC)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)
|
||||
iv, err := hex.DecodeString(cryptoJson.CipherParams.IV)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)
|
||||
cipherText, err := hex.DecodeString(cryptoJson.CipherText)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
derivedKey, err := getKDFKey(keyProtected.Crypto, auth)
|
||||
derivedKey, err := getKDFKey(cryptoJson, auth)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
|
||||
if !bytes.Equal(calculatedMAC, mac) {
|
||||
return nil, nil, ErrDecrypt
|
||||
return nil, ErrDecrypt
|
||||
}
|
||||
|
||||
plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return plainText, err
|
||||
}
|
||||
|
||||
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
|
||||
if keyProtected.Version != version {
|
||||
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
|
||||
}
|
||||
keyId = uuid.Parse(keyProtected.Id)
|
||||
plainText, err := DecryptDataV3(keyProtected.Crypto, auth)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -280,7 +316,7 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt
|
||||
return plainText, keyId, err
|
||||
}
|
||||
|
||||
func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
|
||||
func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
|
||||
authArray := []byte(auth)
|
||||
salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string))
|
||||
if err != nil {
|
@@ -37,7 +37,7 @@ func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if encrypted {
|
||||
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP}
|
||||
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
|
||||
} else {
|
||||
ks = &keyStorePlain{d}
|
||||
}
|
||||
@@ -191,7 +191,7 @@ func TestV1_1(t *testing.T) {
|
||||
|
||||
func TestV1_2(t *testing.T) {
|
||||
t.Parallel()
|
||||
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP}
|
||||
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP, true}
|
||||
addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e")
|
||||
file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e"
|
||||
k, err := ks.GetKey(addr, file, "g")
|
@@ -38,7 +38,13 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou
|
||||
return accounts.Account{}, nil, err
|
||||
}
|
||||
key.Id = uuid.NewRandom()
|
||||
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: keyStore.JoinPath(keyFileName(key.Address))}}
|
||||
a := accounts.Account{
|
||||
Address: key.Address,
|
||||
URL: accounts.URL{
|
||||
Scheme: KeyStoreScheme,
|
||||
Path: keyStore.JoinPath(keyFileName(key.Address)),
|
||||
},
|
||||
}
|
||||
err = keyStore.StoreKey(a.URL.Path, key, password)
|
||||
return a, key, err
|
||||
}
|
||||
|
@@ -52,8 +52,8 @@ func (w *keystoreWallet) Status() (string, error) {
|
||||
// is no connection or decryption step necessary to access the list of accounts.
|
||||
func (w *keystoreWallet) Open(passphrase string) error { return nil }
|
||||
|
||||
// Close implements accounts.Wallet, but is a noop for plain wallets since is no
|
||||
// meaningful open operation.
|
||||
// Close implements accounts.Wallet, but is a noop for plain wallets since there
|
||||
// is no meaningful open operation.
|
||||
func (w *keystoreWallet) Close() error { return nil }
|
||||
|
||||
// Accounts implements accounts.Wallet, returning an account list consisting of
|
||||
@@ -84,10 +84,7 @@ func (w *keystoreWallet) SelfDerive(base accounts.DerivationPath, chain ethereum
|
||||
// able to sign via our shared keystore backend).
|
||||
func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
|
||||
// Make sure the requested account is contained within
|
||||
if account.Address != w.account.Address {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
|
||||
if !w.Contains(account) {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// Account seems valid, request the keystore to sign
|
||||
@@ -100,10 +97,7 @@ func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte
|
||||
// be able to sign via our shared keystore backend).
|
||||
func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
// Make sure the requested account is contained within
|
||||
if account.Address != w.account.Address {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
|
||||
if !w.Contains(account) {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// Account seems valid, request the keystore to sign
|
||||
@@ -114,10 +108,7 @@ func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction,
|
||||
// given hash with the given account using passphrase as extra authentication.
|
||||
func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
||||
// Make sure the requested account is contained within
|
||||
if account.Address != w.account.Address {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
|
||||
if !w.Contains(account) {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// Account seems valid, request the keystore to sign
|
||||
@@ -128,10 +119,7 @@ func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passph
|
||||
// transaction with the given account using passphrase as extra authentication.
|
||||
func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
// Make sure the requested account is contained within
|
||||
if account.Address != w.account.Address {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
|
||||
if !w.Contains(account) {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// Account seems valid, request the keystore to sign
|
@@ -257,7 +257,9 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
|
||||
|
||||
// Decode the hex sting into an Ethereum address and return
|
||||
var address common.Address
|
||||
hex.Decode(address[:], hexstr)
|
||||
if _, err = hex.Decode(address[:], hexstr); err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
return address, nil
|
||||
}
|
||||
|
||||
@@ -350,7 +352,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
}
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
|
@@ -221,7 +221,7 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.5.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.11.5.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
@@ -7,11 +7,18 @@ Canonical.
|
||||
Packages of develop branch commits have suffix -unstable and cannot be installed alongside
|
||||
the stable version. Switching between release streams requires user intervention.
|
||||
|
||||
## Launchpad
|
||||
|
||||
The packages are built and served by launchpad.net. We generate a Debian source package
|
||||
for each distribution and upload it. Their builder picks up the source package, builds it
|
||||
and installs the new version into the PPA repository. Launchpad requires a valid signature
|
||||
by a team member for source package uploads. The signing key is stored in an environment
|
||||
variable which Travis CI makes available to certain builds.
|
||||
by a team member for source package uploads.
|
||||
|
||||
The signing key is stored in an environment variable which Travis CI makes available to
|
||||
certain builds. Since Travis CI doesn't support FTP, SFTP is used to transfer the
|
||||
packages. To set this up yourself, you need to create a Launchpad user and add a GPG key
|
||||
and SSH key to it. Then encode both keys as base64 and configure 'secret' environment
|
||||
variables `PPA_SIGNING_KEY` and `PPA_SSH_KEY` on Travis.
|
||||
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
@@ -27,7 +34,7 @@ Add the gophers PPA and install Go 1.10 and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.10 devscripts debhelper
|
||||
$ sudo apt-get install build-essential golang-1.10 devscripts debhelper python-bzrlib python-paramiko
|
||||
|
||||
Create the source packages:
|
||||
|
||||
|
96
build/ci.go
@@ -147,6 +147,9 @@ var (
|
||||
debEthereum,
|
||||
}
|
||||
|
||||
// Packages to be cross-compiled by the xgo command
|
||||
allCrossCompiledArchiveFiles = append(allToolsArchiveFiles, swarmArchiveFiles...)
|
||||
|
||||
// Distros for which packages are created.
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
@@ -317,9 +320,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
|
||||
// "tests" also includes static analysis tools such as vet.
|
||||
|
||||
func doTest(cmdline []string) {
|
||||
var (
|
||||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
)
|
||||
coverage := flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
env := build.Env()
|
||||
|
||||
@@ -329,14 +330,11 @@ func doTest(cmdline []string) {
|
||||
}
|
||||
packages = build.ExpandPackagesNoVendor(packages)
|
||||
|
||||
// Run analysis tools before the tests.
|
||||
build.MustRun(goTool("vet", packages...))
|
||||
|
||||
// Run the actual tests.
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
// Test a single package at a time. CI builders are slow
|
||||
// and some tests run into timeouts under load.
|
||||
gotest.Args = append(gotest.Args, "-p", "1")
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m")
|
||||
if *coverage {
|
||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||
}
|
||||
@@ -443,11 +441,8 @@ func archiveBasename(arch string, archiveVersion string) string {
|
||||
func archiveUpload(archive string, blobstore string, signer string) error {
|
||||
// If signing was requested, generate the signature files
|
||||
if signer != "" {
|
||||
pgpkey, err := base64.StdEncoding.DecodeString(os.Getenv(signer))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base64 %s", signer)
|
||||
}
|
||||
if err := build.PGPSignFile(archive, archive+".asc", string(pgpkey)); err != nil {
|
||||
key := getenvBase64(signer)
|
||||
if err := build.PGPSignFile(archive, archive+".asc", string(key)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -490,7 +485,8 @@ func maybeSkipArchive(env build.Environment) {
|
||||
func doDebianSource(cmdline []string) {
|
||||
var (
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ppa:ethereum/ethereum")`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
|
||||
sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
|
||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||
now = time.Now()
|
||||
)
|
||||
@@ -500,11 +496,7 @@ func doDebianSource(cmdline []string) {
|
||||
maybeSkipArchive(env)
|
||||
|
||||
// Import the signing key.
|
||||
if b64key := os.Getenv("PPA_SIGNING_KEY"); b64key != "" {
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatal("invalid base64 PPA_SIGNING_KEY")
|
||||
}
|
||||
if key := getenvBase64("PPA_SIGNING_KEY"); len(key) > 0 {
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
@@ -515,22 +507,58 @@ func doDebianSource(cmdline []string) {
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
|
||||
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||
changes = filepath.Join(*workdir, changes)
|
||||
var (
|
||||
basename = fmt.Sprintf("%s_%s", meta.Name(), meta.VersionString())
|
||||
source = filepath.Join(*workdir, basename+".tar.xz")
|
||||
dsc = filepath.Join(*workdir, basename+".dsc")
|
||||
changes = filepath.Join(*workdir, basename+"_source.changes")
|
||||
)
|
||||
if *signer != "" {
|
||||
build.MustRunCommand("debsign", changes)
|
||||
}
|
||||
if *upload != "" {
|
||||
build.MustRunCommand("dput", *upload, changes)
|
||||
ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ppaUpload(workdir, ppa, sshUser string, files []string) {
|
||||
p := strings.Split(ppa, "/")
|
||||
if len(p) != 2 {
|
||||
log.Fatal("-upload PPA name must contain single /")
|
||||
}
|
||||
if sshUser == "" {
|
||||
sshUser = p[0]
|
||||
}
|
||||
incomingDir := fmt.Sprintf("~%s/ubuntu/%s", p[0], p[1])
|
||||
// Create the SSH identity file if it doesn't exist.
|
||||
var idfile string
|
||||
if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
|
||||
idfile = filepath.Join(workdir, "sshkey")
|
||||
if _, err := os.Stat(idfile); os.IsNotExist(err) {
|
||||
ioutil.WriteFile(idfile, sshkey, 0600)
|
||||
}
|
||||
}
|
||||
// Upload
|
||||
dest := sshUser + "@ppa.launchpad.net"
|
||||
if err := build.UploadSFTP(idfile, dest, incomingDir, files); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getenvBase64(variable string) []byte {
|
||||
dec, err := base64.StdEncoding.DecodeString(os.Getenv(variable))
|
||||
if err != nil {
|
||||
log.Fatal("invalid base64 " + variable)
|
||||
}
|
||||
return []byte(dec)
|
||||
}
|
||||
|
||||
func makeWorkdir(wdflag string) string {
|
||||
var err error
|
||||
if wdflag != "" {
|
||||
@@ -641,17 +669,6 @@ func (meta debMetadata) ExeName(exe debExecutable) string {
|
||||
return exe.Package()
|
||||
}
|
||||
|
||||
// EthereumSwarmPackageName returns the name of the swarm package based on
|
||||
// environment, e.g. "ethereum-swarm-unstable", or "ethereum-swarm".
|
||||
// This is needed so that we make sure that "ethereum" package,
|
||||
// depends on and installs "ethereum-swarm"
|
||||
func (meta debMetadata) EthereumSwarmPackageName() string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return debSwarm.Name + "-unstable"
|
||||
}
|
||||
return debSwarm.Name
|
||||
}
|
||||
|
||||
// ExeConflicts returns the content of the Conflicts field
|
||||
// for executable packages.
|
||||
func (meta debMetadata) ExeConflicts(exe debExecutable) string {
|
||||
@@ -813,15 +830,10 @@ func doAndroidArchive(cmdline []string) {
|
||||
os.Rename(archive, meta.Package+".aar")
|
||||
if *signer != "" && *deploy != "" {
|
||||
// Import the signing key into the local GPG instance
|
||||
b64key := os.Getenv(*signer)
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid base64 %s", *signer)
|
||||
}
|
||||
key := getenvBase64(*signer)
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
|
||||
keyID, err := build.PGPKeyID(string(key))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -1009,7 +1021,7 @@ func doXgo(cmdline []string) {
|
||||
|
||||
if *alltools {
|
||||
args = append(args, []string{"--dest", GOBIN}...)
|
||||
for _, res := range allToolsArchiveFiles {
|
||||
for _, res := range allCrossCompiledArchiveFiles {
|
||||
if strings.HasPrefix(res, GOBIN) {
|
||||
// Binary tool found, cross build it explicitly
|
||||
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
|
||||
@@ -1048,7 +1060,7 @@ func xgoTool(args []string) *exec.Cmd {
|
||||
func doPurge(cmdline []string) {
|
||||
var (
|
||||
store = flag.String("store", "", `Destination from where to purge archives (usually "gethstore/builds")`)
|
||||
limit = flag.Int("days", 30, `Age threshold above which to delete unstalbe archives`)
|
||||
limit = flag.Int("days", 30, `Age threshold above which to delete unstable archives`)
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
|
@@ -10,7 +10,7 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||
|
||||
Package: {{.Name}}
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, {{.EthereumSwarmPackageName}}, {{.ExeList}}
|
||||
Depends: ${misc:Depends}, {{.ExeList}}
|
||||
Description: Meta-package to install geth, swarm, and other tools
|
||||
Meta-package to install geth, swarm and other tools
|
||||
|
||||
|
@@ -1,3 +1,19 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build none
|
||||
|
||||
/*
|
||||
|
@@ -75,7 +75,7 @@ func main() {
|
||||
bins []string
|
||||
types []string
|
||||
)
|
||||
if *solFlag != "" || *abiFlag == "-" {
|
||||
if *solFlag != "" || (*abiFlag == "-" && *pkgFlag == "") {
|
||||
// Generate the list of types to exclude from binding
|
||||
exclude := make(map[string]bool)
|
||||
for _, kind := range strings.Split(*excFlag, ",") {
|
||||
@@ -111,7 +111,13 @@ func main() {
|
||||
}
|
||||
} else {
|
||||
// Otherwise load up the ABI, optional bytecode and type name from the parameters
|
||||
abi, err := ioutil.ReadFile(*abiFlag)
|
||||
var abi []byte
|
||||
var err error
|
||||
if *abiFlag == "-" {
|
||||
abi, err = ioutil.ReadAll(os.Stdin)
|
||||
} else {
|
||||
abi, err = ioutil.ReadFile(*abiFlag)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read input ABI: %v\n", err)
|
||||
os.Exit(-1)
|
||||
@@ -155,6 +161,5 @@ func contractsFromStdin() (map[string]*compiler.Contract, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return compiler.ParseCombinedJSON(bytes, "", "", "", "")
|
||||
}
|
||||
|
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
)
|
||||
@@ -37,7 +38,7 @@ func main() {
|
||||
var (
|
||||
listenAddr = flag.String("addr", ":30301", "listen address")
|
||||
genKey = flag.String("genkey", "", "generate a node key")
|
||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's pubkey hash and quit")
|
||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
|
||||
nodeKeyFile = flag.String("nodekey", "", "private key filename")
|
||||
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
|
||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
|
||||
@@ -85,7 +86,7 @@ func main() {
|
||||
}
|
||||
|
||||
if *writeAddr {
|
||||
fmt.Printf("%v\n", discover.PubkeyID(&nodeKey.PublicKey))
|
||||
fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
@@ -118,16 +119,17 @@ func main() {
|
||||
}
|
||||
|
||||
if *runv5 {
|
||||
if _, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", restrictList); err != nil {
|
||||
if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
db, _ := enode.OpenDB("")
|
||||
ln := enode.NewLocalNode(db, nodeKey)
|
||||
cfg := discover.Config{
|
||||
PrivateKey: nodeKey,
|
||||
AnnounceAddr: realaddr,
|
||||
NetRestrict: restrictList,
|
||||
PrivateKey: nodeKey,
|
||||
NetRestrict: restrictList,
|
||||
}
|
||||
if _, err := discover.ListenUDP(conn, cfg); err != nil {
|
||||
if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -91,7 +91,7 @@ invoking methods with the following info:
|
||||
* [x] Version info about the signer
|
||||
* [x] Address of API (http/ipc)
|
||||
* [ ] List of known accounts
|
||||
* [ ] Have a default timeout on signing operations, so that if the user has not answered withing e.g. 60 seconds, the request is rejected.
|
||||
* [ ] Have a default timeout on signing operations, so that if the user has not answered within e.g. 60 seconds, the request is rejected.
|
||||
* [ ] `account_signRawTransaction`
|
||||
* [ ] `account_bulkSignTransactions([] transactions)` should
|
||||
* only exist if enabled via config/flag
|
||||
@@ -129,7 +129,7 @@ The signer listens to HTTP requests on `rpcaddr`:`rpcport`, with the same JSONRP
|
||||
expected to be JSON [jsonrpc 2.0 standard](http://www.jsonrpc.org/specification).
|
||||
|
||||
Some of these call can require user interaction. Clients must be aware that responses
|
||||
may be delayed significanlty or may never be received if a users decides to ignore the confirmation request.
|
||||
may be delayed significantly or may never be received if a users decides to ignore the confirmation request.
|
||||
|
||||
The External API is **untrusted** : it does not accept credentials over this api, nor does it expect
|
||||
that requests have any authority.
|
||||
@@ -862,7 +862,7 @@ A UI should conform to the following rules.
|
||||
* A UI SHOULD inform the user about the `SHA256` or `MD5` hash of the binary being executed
|
||||
* A UI SHOULD NOT maintain a secondary storage of data, e.g. list of accounts
|
||||
* The signer provides accounts
|
||||
* A UI SHOULD, to the best extent possible, use static linking / bundling, so that requried libraries are bundled
|
||||
* A UI SHOULD, to the best extent possible, use static linking / bundling, so that required libraries are bundled
|
||||
along with the UI.
|
||||
|
||||
|
||||
@@ -875,3 +875,4 @@ There are a couple of implementation for a UI. We'll try to keep this list up to
|
||||
| QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
|
||||
| GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
|
||||
| Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
|
||||
| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)|
|
||||
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 36 KiB |
@@ -1,6 +1,13 @@
|
||||
### Changelog for external API
|
||||
|
||||
#### 4.0.0
|
||||
|
||||
* The external `account_Ecrecover`-method was removed.
|
||||
* The external `account_Import`-method was removed.
|
||||
|
||||
#### 3.0.0
|
||||
|
||||
* The external `account_List`-method was changed to not expose `url`, which contained info about the local filesystem. It now returns only a list of addresses.
|
||||
|
||||
#### 2.0.0
|
||||
|
||||
|
@@ -1,5 +1,24 @@
|
||||
### Changelog for internal API (ui-api)
|
||||
|
||||
### 3.0.0
|
||||
|
||||
* Make use of `OnInputRequired(info UserInputRequest)` for obtaining master password during startup
|
||||
|
||||
### 2.1.0
|
||||
|
||||
* Add `OnInputRequired(info UserInputRequest)` to internal API. This method is used when Clef needs user input, e.g. passwords.
|
||||
|
||||
The following structures are used:
|
||||
```golang
|
||||
UserInputRequest struct {
|
||||
Prompt string `json:"prompt"`
|
||||
Title string `json:"title"`
|
||||
IsPassword bool `json:"isPassword"`
|
||||
}
|
||||
UserInputResponse struct {
|
||||
Text string `json:"text"`
|
||||
}
|
||||
|
||||
### 2.0.0
|
||||
|
||||
* Modify how `call_info` on a transaction is conveyed. New format:
|
||||
|
237
cmd/clef/main.go
@@ -35,8 +35,10 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@@ -48,19 +50,19 @@ import (
|
||||
)
|
||||
|
||||
// ExternalAPIVersion -- see extapi_changelog.md
|
||||
const ExternalAPIVersion = "2.0.0"
|
||||
const ExternalAPIVersion = "4.0.0"
|
||||
|
||||
// InternalAPIVersion -- see intapi_changelog.md
|
||||
const InternalAPIVersion = "2.0.0"
|
||||
const InternalAPIVersion = "3.0.0"
|
||||
|
||||
const legalWarning = `
|
||||
WARNING!
|
||||
WARNING!
|
||||
|
||||
Clef is alpha software, and not yet publically released. This software has _not_ been audited, and there
|
||||
are no guarantees about the workings of this software. It may contain severe flaws. You should not use this software
|
||||
unless you agree to take full responsibility for doing so, and know what you are doing.
|
||||
unless you agree to take full responsibility for doing so, and know what you are doing.
|
||||
|
||||
TLDR; THIS IS NOT PRODUCTION-READY SOFTWARE!
|
||||
TLDR; THIS IS NOT PRODUCTION-READY SOFTWARE!
|
||||
|
||||
`
|
||||
|
||||
@@ -70,6 +72,10 @@ var (
|
||||
Value: 4,
|
||||
Usage: "log level to emit to the screen",
|
||||
}
|
||||
advancedMode = cli.BoolFlag{
|
||||
Name: "advanced",
|
||||
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
|
||||
}
|
||||
keystoreFlag = cli.StringFlag{
|
||||
Name: "keystore",
|
||||
Value: filepath.Join(node.DefaultDataDir(), "keystore"),
|
||||
@@ -87,7 +93,7 @@ var (
|
||||
}
|
||||
signerSecretFlag = cli.StringFlag{
|
||||
Name: "signersecret",
|
||||
Usage: "A file containing the password used to encrypt Clef credentials, e.g. keystore credentials and ruleset hash",
|
||||
Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash",
|
||||
}
|
||||
dBFlag = cli.StringFlag{
|
||||
Name: "4bytedb",
|
||||
@@ -130,7 +136,7 @@ var (
|
||||
configdirFlag,
|
||||
},
|
||||
Description: `
|
||||
The init command generates a master seed which Clef can use to store credentials and data needed for
|
||||
The init command generates a master seed which Clef can use to store credentials and data needed for
|
||||
the rule-engine to work.`,
|
||||
}
|
||||
attestCommand = cli.Command{
|
||||
@@ -144,25 +150,25 @@ the rule-engine to work.`,
|
||||
signerSecretFlag,
|
||||
},
|
||||
Description: `
|
||||
The attest command stores the sha256 of the rule.js-file that you want to use for automatic processing of
|
||||
incoming requests.
|
||||
The attest command stores the sha256 of the rule.js-file that you want to use for automatic processing of
|
||||
incoming requests.
|
||||
|
||||
Whenever you make an edit to the rule file, you need to use attestation to tell
|
||||
Whenever you make an edit to the rule file, you need to use attestation to tell
|
||||
Clef that the file is 'safe' to execute.`,
|
||||
}
|
||||
|
||||
addCredentialCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(addCredential),
|
||||
Name: "addpw",
|
||||
setCredentialCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(setCredential),
|
||||
Name: "setpw",
|
||||
Usage: "Store a credential for a keystore file",
|
||||
ArgsUsage: "<address> <password>",
|
||||
ArgsUsage: "<address>",
|
||||
Flags: []cli.Flag{
|
||||
logLevelFlag,
|
||||
configdirFlag,
|
||||
signerSecretFlag,
|
||||
},
|
||||
Description: `
|
||||
The addpw command stores a password for a given address (keyfile). If you invoke it with only one parameter, it will
|
||||
The setpw command stores a password for a given address (keyfile). If you enter a blank passphrase, it will
|
||||
remove any stored credential for that address (keyfile)
|
||||
`,
|
||||
}
|
||||
@@ -191,9 +197,10 @@ func init() {
|
||||
ruleFlag,
|
||||
stdiouiFlag,
|
||||
testFlag,
|
||||
advancedMode,
|
||||
}
|
||||
app.Action = signer
|
||||
app.Commands = []cli.Command{initCommand, attestCommand, addCredentialCommand}
|
||||
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand}
|
||||
|
||||
}
|
||||
func main() {
|
||||
@@ -207,36 +214,56 @@ func initializeSecrets(c *cli.Context) error {
|
||||
if err := initialize(c); err != nil {
|
||||
return err
|
||||
}
|
||||
configDir := c.String(configdirFlag.Name)
|
||||
configDir := c.GlobalString(configdirFlag.Name)
|
||||
|
||||
masterSeed := make([]byte, 256)
|
||||
n, err := io.ReadFull(rand.Reader, masterSeed)
|
||||
num, err := io.ReadFull(rand.Reader, masterSeed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != len(masterSeed) {
|
||||
if num != len(masterSeed) {
|
||||
return fmt.Errorf("failed to read enough random")
|
||||
}
|
||||
|
||||
n, p := keystore.StandardScryptN, keystore.StandardScryptP
|
||||
if c.GlobalBool(utils.LightKDFFlag.Name) {
|
||||
n, p = keystore.LightScryptN, keystore.LightScryptP
|
||||
}
|
||||
text := "The master seed of clef is locked with a password. Please give a password. Do not forget this password."
|
||||
var password string
|
||||
for {
|
||||
password = getPassPhrase(text, true)
|
||||
if err := core.ValidatePasswordFormat(password); err != nil {
|
||||
fmt.Printf("invalid password: %v\n", err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
cipherSeed, err := encryptSeed(masterSeed, []byte(password), n, p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt master seed: %v", err)
|
||||
}
|
||||
|
||||
err = os.Mkdir(configDir, 0700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
location := filepath.Join(configDir, "secrets.dat")
|
||||
location := filepath.Join(configDir, "masterseed.json")
|
||||
if _, err := os.Stat(location); err == nil {
|
||||
return fmt.Errorf("file %v already exists, will not overwrite", location)
|
||||
}
|
||||
err = ioutil.WriteFile(location, masterSeed, 0700)
|
||||
err = ioutil.WriteFile(location, cipherSeed, 0400)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("A master seed has been generated into %s\n", location)
|
||||
fmt.Printf(`
|
||||
This is required to be able to store credentials, such as :
|
||||
This is required to be able to store credentials, such as :
|
||||
* Passwords for keystores (used by rule engine)
|
||||
* Storage for javascript rules
|
||||
* Hash of rule-file
|
||||
|
||||
You should treat that file with utmost secrecy, and make a backup of it.
|
||||
You should treat that file with utmost secrecy, and make a backup of it.
|
||||
NOTE: This file does not contain your accounts. Those need to be backed up separately!
|
||||
|
||||
`)
|
||||
@@ -250,11 +277,11 @@ func attestFile(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
stretchedKey, err := readMasterKey(ctx)
|
||||
stretchedKey, err := readMasterKey(ctx, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
configDir := ctx.String(configdirFlag.Name)
|
||||
configDir := ctx.GlobalString(configdirFlag.Name)
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
|
||||
confKey := crypto.Keccak256([]byte("config"), stretchedKey)
|
||||
|
||||
@@ -266,38 +293,36 @@ func attestFile(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func addCredential(ctx *cli.Context) error {
|
||||
func setCredential(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires at leaste one argument.")
|
||||
utils.Fatalf("This command requires an address to be passed as an argument.")
|
||||
}
|
||||
if err := initialize(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stretchedKey, err := readMasterKey(ctx)
|
||||
address := ctx.Args().First()
|
||||
password := getPassPhrase("Enter a passphrase to store with this address.", true)
|
||||
|
||||
stretchedKey, err := readMasterKey(ctx, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
configDir := ctx.String(configdirFlag.Name)
|
||||
configDir := ctx.GlobalString(configdirFlag.Name)
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
|
||||
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
|
||||
|
||||
// Initialize the encrypted storages
|
||||
pwStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "credentials.json"), pwkey)
|
||||
key := ctx.Args().First()
|
||||
value := ""
|
||||
if len(ctx.Args()) > 1 {
|
||||
value = ctx.Args().Get(1)
|
||||
}
|
||||
pwStorage.Put(key, value)
|
||||
log.Info("Credential store updated", "key", key)
|
||||
pwStorage.Put(address, password)
|
||||
log.Info("Credential store updated", "key", address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func initialize(c *cli.Context) error {
|
||||
// Set up the logger to print everything
|
||||
logOutput := os.Stdout
|
||||
if c.Bool(stdiouiFlag.Name) {
|
||||
if c.GlobalBool(stdiouiFlag.Name) {
|
||||
logOutput = os.Stderr
|
||||
// If using the stdioui, we can't do the 'confirm'-flow
|
||||
fmt.Fprintf(logOutput, legalWarning)
|
||||
@@ -318,26 +343,28 @@ func signer(c *cli.Context) error {
|
||||
var (
|
||||
ui core.SignerUI
|
||||
)
|
||||
if c.Bool(stdiouiFlag.Name) {
|
||||
if c.GlobalBool(stdiouiFlag.Name) {
|
||||
log.Info("Using stdin/stdout as UI-channel")
|
||||
ui = core.NewStdIOUI()
|
||||
} else {
|
||||
log.Info("Using CLI as UI-channel")
|
||||
ui = core.NewCommandlineUI()
|
||||
}
|
||||
db, err := core.NewAbiDBFromFiles(c.String(dBFlag.Name), c.String(customDBFlag.Name))
|
||||
fourByteDb := c.GlobalString(dBFlag.Name)
|
||||
fourByteLocal := c.GlobalString(customDBFlag.Name)
|
||||
db, err := core.NewAbiDBFromFiles(fourByteDb, fourByteLocal)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
log.Info("Loaded 4byte db", "signatures", db.Size(), "file", c.String("4bytedb"))
|
||||
log.Info("Loaded 4byte db", "signatures", db.Size(), "file", fourByteDb, "local", fourByteLocal)
|
||||
|
||||
var (
|
||||
api core.ExternalAPI
|
||||
)
|
||||
|
||||
configDir := c.String(configdirFlag.Name)
|
||||
if stretchedKey, err := readMasterKey(c); err != nil {
|
||||
log.Info("No master seed provided, rules disabled")
|
||||
configDir := c.GlobalString(configdirFlag.Name)
|
||||
if stretchedKey, err := readMasterKey(c, ui); err != nil {
|
||||
log.Info("No master seed provided, rules disabled", "error", err)
|
||||
} else {
|
||||
|
||||
if err != nil {
|
||||
@@ -356,7 +383,7 @@ func signer(c *cli.Context) error {
|
||||
configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confkey)
|
||||
|
||||
//Do we have a rule-file?
|
||||
ruleJS, err := ioutil.ReadFile(c.String(ruleFlag.Name))
|
||||
ruleJS, err := ioutil.ReadFile(c.GlobalString(ruleFlag.Name))
|
||||
if err != nil {
|
||||
log.Info("Could not load rulefile, rules not enabled", "file", "rulefile")
|
||||
} else {
|
||||
@@ -380,16 +407,15 @@ func signer(c *cli.Context) error {
|
||||
}
|
||||
|
||||
apiImpl := core.NewSignerAPI(
|
||||
c.Int64(utils.NetworkIdFlag.Name),
|
||||
c.String(keystoreFlag.Name),
|
||||
c.Bool(utils.NoUSBFlag.Name),
|
||||
c.GlobalInt64(utils.NetworkIdFlag.Name),
|
||||
c.GlobalString(keystoreFlag.Name),
|
||||
c.GlobalBool(utils.NoUSBFlag.Name),
|
||||
ui, db,
|
||||
c.Bool(utils.LightKDFFlag.Name))
|
||||
|
||||
c.GlobalBool(utils.LightKDFFlag.Name),
|
||||
c.GlobalBool(advancedMode.Name))
|
||||
api = apiImpl
|
||||
|
||||
// Audit logging
|
||||
if logfile := c.String(auditLogFlag.Name); logfile != "" {
|
||||
if logfile := c.GlobalString(auditLogFlag.Name); logfile != "" {
|
||||
api, err = core.NewAuditLogger(logfile, api)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
@@ -408,13 +434,13 @@ func signer(c *cli.Context) error {
|
||||
Service: api,
|
||||
Version: "1.0"},
|
||||
}
|
||||
if c.Bool(utils.RPCEnabledFlag.Name) {
|
||||
if c.GlobalBool(utils.RPCEnabledFlag.Name) {
|
||||
|
||||
vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name))
|
||||
cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name))
|
||||
|
||||
// start http server
|
||||
httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
||||
httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start RPC api: %v", err)
|
||||
@@ -428,9 +454,9 @@ func signer(c *cli.Context) error {
|
||||
}()
|
||||
|
||||
}
|
||||
if !c.Bool(utils.IPCDisabledFlag.Name) {
|
||||
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
|
||||
if c.IsSet(utils.IPCPathFlag.Name) {
|
||||
ipcapiURL = c.String(utils.IPCPathFlag.Name)
|
||||
ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
ipcapiURL = filepath.Join(configDir, "clef.ipc")
|
||||
}
|
||||
@@ -447,7 +473,7 @@ func signer(c *cli.Context) error {
|
||||
|
||||
}
|
||||
|
||||
if c.Bool(testFlag.Name) {
|
||||
if c.GlobalBool(testFlag.Name) {
|
||||
log.Info("Performing UI test")
|
||||
go testExternalUI(apiImpl)
|
||||
}
|
||||
@@ -506,48 +532,64 @@ func homeDir() string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func readMasterKey(ctx *cli.Context) ([]byte, error) {
|
||||
func readMasterKey(ctx *cli.Context, ui core.SignerUI) ([]byte, error) {
|
||||
var (
|
||||
file string
|
||||
configDir = ctx.String(configdirFlag.Name)
|
||||
configDir = ctx.GlobalString(configdirFlag.Name)
|
||||
)
|
||||
if ctx.IsSet(signerSecretFlag.Name) {
|
||||
file = ctx.String(signerSecretFlag.Name)
|
||||
if ctx.GlobalIsSet(signerSecretFlag.Name) {
|
||||
file = ctx.GlobalString(signerSecretFlag.Name)
|
||||
} else {
|
||||
file = filepath.Join(configDir, "secrets.dat")
|
||||
file = filepath.Join(configDir, "masterseed.json")
|
||||
}
|
||||
if err := checkFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
masterKey, err := ioutil.ReadFile(file)
|
||||
cipherKey, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(masterKey) < 256 {
|
||||
return nil, fmt.Errorf("master key of insufficient length, expected >255 bytes, got %d", len(masterKey))
|
||||
var password string
|
||||
// If ui is not nil, get the password from ui.
|
||||
if ui != nil {
|
||||
resp, err := ui.OnInputRequired(core.UserInputRequest{
|
||||
Title: "Master Password",
|
||||
Prompt: "Please enter the password to decrypt the master seed",
|
||||
IsPassword: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password = resp.Text
|
||||
} else {
|
||||
password = getPassPhrase("Decrypt master seed of clef", false)
|
||||
}
|
||||
masterSeed, err := decryptSeed(cipherKey, password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt the master seed of clef")
|
||||
}
|
||||
if len(masterSeed) < 256 {
|
||||
return nil, fmt.Errorf("master seed of insufficient length, expected >255 bytes, got %d", len(masterSeed))
|
||||
}
|
||||
|
||||
// Create vault location
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterKey)[:10]))
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterSeed)[:10]))
|
||||
err = os.Mkdir(vaultLocation, 0700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
//!TODO, use KDF to stretch the master key
|
||||
// stretched_key := stretch_key(master_key)
|
||||
|
||||
return masterKey, nil
|
||||
return masterSeed, nil
|
||||
}
|
||||
|
||||
// checkFile is a convenience function to check if a file
|
||||
// * exists
|
||||
// * is mode 0600
|
||||
// * is mode 0400
|
||||
func checkFile(filename string) error {
|
||||
info, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed stat on %s: %v", filename, err)
|
||||
}
|
||||
// Check the unix permission bits
|
||||
if info.Mode().Perm()&077 != 0 {
|
||||
if info.Mode().Perm()&0377 != 0 {
|
||||
return fmt.Errorf("file (%v) has insecure file permissions (%v)", filename, info.Mode().String())
|
||||
}
|
||||
return nil
|
||||
@@ -613,6 +655,59 @@ func testExternalUI(api *core.SignerAPI) {
|
||||
|
||||
}
|
||||
|
||||
// getPassPhrase retrieves the password associated with clef, either fetched
|
||||
// from a list of preloaded passphrases, or requested interactively from the user.
|
||||
// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one.
|
||||
func getPassPhrase(prompt string, confirmation bool) string {
|
||||
fmt.Println(prompt)
|
||||
password, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if password != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
return password
|
||||
}
|
||||
|
||||
type encryptedSeedStorage struct {
|
||||
Description string `json:"description"`
|
||||
Version int `json:"version"`
|
||||
Params keystore.CryptoJSON `json:"params"`
|
||||
}
|
||||
|
||||
// encryptSeed uses a similar scheme as the keystore uses, but with a different wrapping,
|
||||
// to encrypt the master seed
|
||||
func encryptSeed(seed []byte, auth []byte, scryptN, scryptP int) ([]byte, error) {
|
||||
cryptoStruct, err := keystore.EncryptDataV3(seed, auth, scryptN, scryptP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(&encryptedSeedStorage{"Clef seed", 1, cryptoStruct})
|
||||
}
|
||||
|
||||
// decryptSeed decrypts the master seed
|
||||
func decryptSeed(keyjson []byte, auth string) ([]byte, error) {
|
||||
var encSeed encryptedSeedStorage
|
||||
if err := json.Unmarshal(keyjson, &encSeed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if encSeed.Version != 1 {
|
||||
log.Warn(fmt.Sprintf("unsupported encryption format of seed: %d, operation will likely fail", encSeed.Version))
|
||||
}
|
||||
seed, err := keystore.DecryptDataV3(encSeed.Params, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return seed, err
|
||||
}
|
||||
|
||||
/**
|
||||
//Create Account
|
||||
|
||||
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 20 KiB |
@@ -31,43 +31,51 @@ NOTE: This file does not contain your accounts. Those need to be backed up separ
|
||||
|
||||
## Creating rules
|
||||
|
||||
Now, you can create a rule-file.
|
||||
Now, you can create a rule-file. Note that it is not mandatory to use predefined rules, but it's really handy.
|
||||
|
||||
```javascript
|
||||
function ApproveListing(){
|
||||
return "Approve"
|
||||
}
|
||||
```
|
||||
Get the `sha256` hash....
|
||||
|
||||
Get the `sha256` hash. If you have openssl, you can do `openssl sha256 rules.js`...
|
||||
```text
|
||||
#sha256sum rules.js
|
||||
6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72 rules.js
|
||||
```
|
||||
...And then `attest` the file:
|
||||
...now `attest` the file...
|
||||
```text
|
||||
#./signer attest 6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
|
||||
|
||||
INFO [02-21|12:14:38] Ruleset attestation updated sha256=6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
|
||||
```
|
||||
At this point, we then start the signer with the rule-file:
|
||||
|
||||
...and (this is required only for non-production versions) load a mock-up `4byte.json` by copying the file from the source to your current working directory:
|
||||
```text
|
||||
#./signer --rules rules.json
|
||||
#cp $GOPATH/src/github.com/ethereum/go-ethereum/cmd/clef/4byte.json $PWD
|
||||
```
|
||||
|
||||
INFO [02-21|12:15:18] Using CLI as UI-channel
|
||||
INFO [02-21|12:15:18] Loaded 4byte db signatures=5509 file=./4byte.json
|
||||
INFO [02-21|12:15:18] Could not load rulefile, rules not enabled file=rulefile
|
||||
DEBUG[02-21|12:15:18] FS scan times list=35.335µs set=5.536µs diff=5.073µs
|
||||
DEBUG[02-21|12:15:18] Ledger support enabled
|
||||
DEBUG[02-21|12:15:18] Trezor support enabled
|
||||
INFO [02-21|12:15:18] Audit logs configured file=audit.log
|
||||
INFO [02-21|12:15:18] HTTP endpoint opened url=http://localhost:8550
|
||||
At this point, we can start the signer with the rule-file:
|
||||
```text
|
||||
#./signer --rules rules.js --rpc
|
||||
|
||||
INFO [09-25|20:28:11.866] Using CLI as UI-channel
|
||||
INFO [09-25|20:28:11.876] Loaded 4byte db signatures=5509 file=./4byte.json
|
||||
INFO [09-25|20:28:11.877] Rule engine configured file=./rules.js
|
||||
DEBUG[09-25|20:28:11.877] FS scan times list=100.781µs set=13.253µs diff=5.761µs
|
||||
DEBUG[09-25|20:28:11.884] Ledger support enabled
|
||||
DEBUG[09-25|20:28:11.888] Trezor support enabled
|
||||
INFO [09-25|20:28:11.888] Audit logs configured file=audit.log
|
||||
DEBUG[09-25|20:28:11.888] HTTP registered namespace=account
|
||||
INFO [09-25|20:28:11.890] HTTP endpoint opened url=http://localhost:8550
|
||||
DEBUG[09-25|20:28:11.890] IPC registered namespace=account
|
||||
INFO [09-25|20:28:11.890] IPC endpoint opened url=<nil>
|
||||
------- Signer info -------
|
||||
* extapi_version : 2.0.0
|
||||
* intapi_version : 2.0.0
|
||||
* extapi_http : http://localhost:8550
|
||||
* extapi_ipc : <nil>
|
||||
* extapi_version : 2.0.0
|
||||
* intapi_version : 1.2.0
|
||||
|
||||
```
|
||||
|
||||
Any list-requests will now be auto-approved by our rule-file.
|
||||
@@ -107,16 +115,16 @@ The `master_seed` was then used to derive a few other things:
|
||||
|
||||
## Adding credentials
|
||||
|
||||
In order to make more useful rules; sign transactions, the signer needs access to the passwords needed to unlock keystores.
|
||||
In order to make more useful rules like signing transactions, the signer needs access to the passwords needed to unlock keystores.
|
||||
|
||||
```text
|
||||
#./signer addpw 0x694267f14675d7e1b9494fd8d72fefe1755710fa test
|
||||
#./signer addpw "0x694267f14675d7e1b9494fd8d72fefe1755710fa" "test_password"
|
||||
|
||||
INFO [02-21|13:43:21] Credential store updated key=0x694267f14675d7e1b9494fd8d72fefe1755710fa
|
||||
```
|
||||
## More advanced rules
|
||||
|
||||
Now let's update the rules to make use of credentials
|
||||
Now let's update the rules to make use of credentials:
|
||||
|
||||
```javascript
|
||||
function ApproveListing(){
|
||||
@@ -134,13 +142,15 @@ function ApproveSignData(r){
|
||||
}
|
||||
|
||||
```
|
||||
In this example,
|
||||
* any requests to sign data with the account `0x694...` will be
|
||||
* auto-approved if the message contains with `bazonk`,
|
||||
* and auto-rejected if it does not.
|
||||
* Any other signing-requests will be passed along for manual approve/reject.
|
||||
In this example:
|
||||
* Any requests to sign data with the account `0x694...` will be
|
||||
* auto-approved if the message contains with `bazonk`
|
||||
* auto-rejected if it does not.
|
||||
* Any other signing-requests will be passed along for manual approve/reject.
|
||||
|
||||
..attest the new file
|
||||
_Note: make sure that `0x694...` is an account you have access to. You can create it either via the clef or the traditional account cli tool. If the latter was chosen, make sure both clef and geth use the same keystore by specifing `--keystore path/to/your/keystore` when running clef._
|
||||
|
||||
Attest the new file...
|
||||
```text
|
||||
#sha256sum rules.js
|
||||
2a0cb661dacfc804b6e95d935d813fd17c0997a7170e4092ffbc34ca976acd9f rules.js
|
||||
@@ -153,23 +163,26 @@ INFO [02-21|14:36:30] Ruleset attestation updated sha256=2a0cb661da
|
||||
And start the signer:
|
||||
|
||||
```
|
||||
#./signer --rules rules.js
|
||||
#./signer --rules rules.js --rpc
|
||||
|
||||
INFO [02-21|14:41:56] Using CLI as UI-channel
|
||||
INFO [02-21|14:41:56] Loaded 4byte db signatures=5509 file=./4byte.json
|
||||
INFO [02-21|14:41:56] Rule engine configured file=rules.js
|
||||
DEBUG[02-21|14:41:56] FS scan times list=34.607µs set=4.509µs diff=4.87µs
|
||||
DEBUG[02-21|14:41:56] Ledger support enabled
|
||||
DEBUG[02-21|14:41:56] Trezor support enabled
|
||||
INFO [02-21|14:41:56] Audit logs configured file=audit.log
|
||||
INFO [02-21|14:41:56] HTTP endpoint opened url=http://localhost:8550
|
||||
INFO [09-25|21:02:16.450] Using CLI as UI-channel
|
||||
INFO [09-25|21:02:16.466] Loaded 4byte db signatures=5509 file=./4byte.json
|
||||
INFO [09-25|21:02:16.467] Rule engine configured file=./rules.js
|
||||
DEBUG[09-25|21:02:16.468] FS scan times list=1.45262ms set=21.926µs diff=6.944µs
|
||||
DEBUG[09-25|21:02:16.473] Ledger support enabled
|
||||
DEBUG[09-25|21:02:16.475] Trezor support enabled
|
||||
INFO [09-25|21:02:16.476] Audit logs configured file=audit.log
|
||||
DEBUG[09-25|21:02:16.476] HTTP registered namespace=account
|
||||
INFO [09-25|21:02:16.478] HTTP endpoint opened url=http://localhost:8550
|
||||
DEBUG[09-25|21:02:16.478] IPC registered namespace=account
|
||||
INFO [09-25|21:02:16.478] IPC endpoint opened url=<nil>
|
||||
------- Signer info -------
|
||||
* extapi_version : 2.0.0
|
||||
* intapi_version : 1.2.0
|
||||
* intapi_version : 2.0.0
|
||||
* extapi_http : http://localhost:8550
|
||||
* extapi_ipc : <nil>
|
||||
INFO [02-21|14:41:56] error occurred during execution error="ReferenceError: 'OnSignerStartup' is not defined"
|
||||
```
|
||||
|
||||
And then test signing, once with `bazonk` and once without:
|
||||
|
||||
```
|
||||
@@ -190,9 +203,9 @@ INFO [02-21|14:42:56] Op rejected
|
||||
The signer also stores all traffic over the external API in a log file. The last 4 lines shows the two requests and their responses:
|
||||
|
||||
```text
|
||||
#tail audit.log -n 4
|
||||
#tail -n 4 audit.log
|
||||
t=2018-02-21T14:42:41+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49706\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=202062617a6f6e6b2062617a2067617a0a
|
||||
t=2018-02-21T14:42:42+0100 lvl=info msg=Sign api=signer type=response data=93e6161840c3ae1efc26dc68dedab6e8fc233bb3fefa1b4645dbf6609b93dace160572ea4ab33240256bb6d3dadb60dcd9c515d6374d3cf614ee897408d41d541c error=nil
|
||||
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49708\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=2020626f6e6b2062617a2067617a0a
|
||||
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=response data= error="Request denied"
|
||||
```
|
||||
```
|
@@ -21,21 +21,33 @@ Private key information can be printed by using the `--private` flag;
|
||||
make sure to use this feature with great caution!
|
||||
|
||||
|
||||
### `ethkey sign <keyfile> <message/file>`
|
||||
### `ethkey signmessage <keyfile> <message/file>`
|
||||
|
||||
Sign the message with a keyfile.
|
||||
It is possible to refer to a file containing the message.
|
||||
To sign a message contained in a file, use the `--msgfile` flag.
|
||||
|
||||
|
||||
### `ethkey verify <address> <signature> <message/file>`
|
||||
### `ethkey verifymessage <address> <signature> <message/file>`
|
||||
|
||||
Verify the signature of the message.
|
||||
It is possible to refer to a file containing the message.
|
||||
To sign a message contained in a file, use the --msgfile flag.
|
||||
|
||||
|
||||
### `ethkey changepassphrase <keyfile>`
|
||||
|
||||
Change the passphrase of a keyfile.
|
||||
use the `--newpasswordfile` to point to the new password file.
|
||||
|
||||
|
||||
## Passphrases
|
||||
|
||||
For every command that uses a keyfile, you will be prompted to provide the
|
||||
passphrase for decrypting the keyfile. To avoid this message, it is possible
|
||||
to pass the passphrase by using the `--passphrase` flag pointing to a file that
|
||||
to pass the passphrase by using the `--passwordfile` flag pointing to a file that
|
||||
contains the passphrase.
|
||||
|
||||
## JSON
|
||||
|
||||
In case you need to output the result in a JSON format, you shall by using the `--json` flag.
|
||||
|
@@ -50,6 +50,6 @@ func compileCmd(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, bin)
|
||||
fmt.Println(bin)
|
||||
return nil
|
||||
}
|
||||
|
@@ -44,7 +44,7 @@ func disasmCmd(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(string(in[:]))
|
||||
fmt.Fprintf(ctx.App.Writer, "%v\n", code)
|
||||
code := strings.TrimSpace(string(in))
|
||||
fmt.Printf("%v\n", code)
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
||||
|
@@ -30,11 +30,10 @@ func Compile(fn string, src []byte, debug bool) (string, error) {
|
||||
bin, compileErrors := compiler.Compile()
|
||||
if len(compileErrors) > 0 {
|
||||
// report errors
|
||||
errs := ""
|
||||
for _, err := range compileErrors {
|
||||
errs += fmt.Sprintf("%s:%v\n", fn, err)
|
||||
fmt.Printf("%s:%v\n", fn, err)
|
||||
}
|
||||
return "", errors.New(errs + "compiling failed\n")
|
||||
return "", errors.New("compiling failed")
|
||||
}
|
||||
return bin, nil
|
||||
}
|
||||
|
@@ -80,16 +80,16 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
var (
|
||||
tracer vm.Tracer
|
||||
debugLogger *vm.StructLogger
|
||||
statedb *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.BytesToAddress([]byte("sender"))
|
||||
receiver = common.BytesToAddress([]byte("receiver"))
|
||||
blockNumber uint64
|
||||
tracer vm.Tracer
|
||||
debugLogger *vm.StructLogger
|
||||
statedb *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.BytesToAddress([]byte("sender"))
|
||||
receiver = common.BytesToAddress([]byte("receiver"))
|
||||
genesisConfig *core.Genesis
|
||||
)
|
||||
if ctx.GlobalBool(MachineFlag.Name) {
|
||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
||||
tracer = vm.NewJSONLogger(logconfig, os.Stdout)
|
||||
} else if ctx.GlobalBool(DebugFlag.Name) {
|
||||
debugLogger = vm.NewStructLogger(logconfig)
|
||||
tracer = debugLogger
|
||||
@@ -98,13 +98,14 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||
genesisConfig = gen
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis := gen.ToBlock(db)
|
||||
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
|
||||
chainConfig = gen.Config
|
||||
blockNumber = gen.Number
|
||||
} else {
|
||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
|
||||
genesisConfig = new(core.Genesis)
|
||||
}
|
||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
|
||||
@@ -128,13 +129,13 @@ func runCmd(ctx *cli.Context) error {
|
||||
if ctx.GlobalString(CodeFileFlag.Name) == "-" {
|
||||
//Try reading from stdin
|
||||
if hexcode, err = ioutil.ReadAll(os.Stdin); err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "Could not load code from stdin: %v\n", err)
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Codefile with hex assembly
|
||||
if hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name)); err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "Could not load code from file: %v\n", err)
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -156,13 +157,19 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
initialGas := ctx.GlobalUint64(GasFlag.Name)
|
||||
if genesisConfig.GasLimit != 0 {
|
||||
initialGas = genesisConfig.GasLimit
|
||||
}
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: initialGas,
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
BlockNumber: new(big.Int).SetUint64(blockNumber),
|
||||
Difficulty: genesisConfig.Difficulty,
|
||||
Time: new(big.Int).SetUint64(genesisConfig.Timestamp),
|
||||
Coinbase: genesisConfig.Coinbase,
|
||||
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
@@ -172,11 +179,11 @@ func runCmd(ctx *cli.Context) error {
|
||||
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" {
|
||||
f, err := os.Create(cpuProfilePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create CPU profile: %v\n", err)
|
||||
fmt.Println("could not create CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not start CPU profile: %v\n", err)
|
||||
fmt.Println("could not start CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
@@ -199,18 +206,19 @@ func runCmd(ctx *cli.Context) error {
|
||||
execTime := time.Since(tstart)
|
||||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.Commit(true)
|
||||
statedb.IntermediateRoot(true)
|
||||
fmt.Fprintln(ctx.App.Writer, string(statedb.Dump()))
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
}
|
||||
|
||||
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
|
||||
f, err := os.Create(memProfilePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create memory profile: %v\n", err)
|
||||
fmt.Println("could not create memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create memory profile: %v\n", err)
|
||||
fmt.Println("could not write memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
f.Close()
|
||||
@@ -218,17 +226,17 @@ func runCmd(ctx *cli.Context) error {
|
||||
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugLogger != nil {
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### TRACE ####")
|
||||
vm.WriteTrace(ctx.App.ErrWriter, debugLogger.StructLogs())
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||
}
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### LOGS ####")
|
||||
vm.WriteLogs(ctx.App.ErrWriter, statedb.Logs())
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(StatDumpFlag.Name) {
|
||||
var mem goruntime.MemStats
|
||||
goruntime.ReadMemStats(&mem)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, `evm execution time: %v
|
||||
fmt.Fprintf(os.Stderr, `evm execution time: %v
|
||||
heap objects: %d
|
||||
allocations: %d
|
||||
total allocations: %d
|
||||
@@ -238,9 +246,9 @@ Gas used: %d
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer == nil {
|
||||
fmt.Fprintf(ctx.App.Writer, "0x%x\n", ret)
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
if err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, " error: %v\n", err)
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -68,7 +68,7 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
)
|
||||
switch {
|
||||
case ctx.GlobalBool(MachineFlag.Name):
|
||||
tracer = NewJSONLogger(config, os.Stderr)
|
||||
tracer = vm.NewJSONLogger(config, os.Stderr)
|
||||
|
||||
case ctx.GlobalBool(DebugFlag.Name):
|
||||
debugger = vm.NewStructLogger(config)
|
||||
@@ -97,6 +97,10 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
// Run the test and aggregate the result
|
||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
state, err := test.Run(st, cfg)
|
||||
// print state root for evmlab tracing
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
}
|
||||
if err != nil {
|
||||
// Test failed, mark as so and dump any state to aid debugging
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
@@ -105,23 +109,19 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
result.State = &dump
|
||||
}
|
||||
}
|
||||
// print state root for evmlab tracing (already committed above, so no need to delete objects again
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
}
|
||||
|
||||
results = append(results, *result)
|
||||
|
||||
// Print any structured logs collected
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugger != nil {
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### TRACE ####")
|
||||
vm.WriteTrace(ctx.App.ErrWriter, debugger.StructLogs())
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugger.StructLogs())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Fprintln(ctx.App.Writer, string(out))
|
||||
fmt.Println(string(out))
|
||||
return nil
|
||||
}
|
||||
|
@@ -54,8 +54,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"golang.org/x/net/websocket"
|
||||
@@ -157,7 +157,8 @@ func main() {
|
||||
if blob, err = ioutil.ReadFile(*accPassFlag); err != nil {
|
||||
log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
|
||||
}
|
||||
pass := string(blob)
|
||||
// Delete trailing newline in password
|
||||
pass := strings.TrimSuffix(string(blob), "\n")
|
||||
|
||||
ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if blob, err = ioutil.ReadFile(*accJSONFlag); err != nil {
|
||||
@@ -198,6 +199,8 @@ type faucet struct {
|
||||
|
||||
keystore *keystore.KeyStore // Keystore containing the single signer
|
||||
account accounts.Account // Account funding user faucet requests
|
||||
head *types.Header // Current head header of the faucet
|
||||
balance *big.Int // Current balance of the faucet
|
||||
nonce uint64 // Current pending nonce of the faucet
|
||||
price *big.Int // Current gas price to issue funds with
|
||||
|
||||
@@ -252,8 +255,10 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
return nil, err
|
||||
}
|
||||
for _, boot := range enodes {
|
||||
old, _ := discover.ParseNode(boot.String())
|
||||
stack.Server().AddPeer(old)
|
||||
old, err := enode.ParseV4(boot.String())
|
||||
if err == nil {
|
||||
stack.Server().AddPeer(old)
|
||||
}
|
||||
}
|
||||
// Attach to the client and retrieve and interesting metadatas
|
||||
api, err := stack.Attach()
|
||||
@@ -323,33 +328,30 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
nonce uint64
|
||||
err error
|
||||
)
|
||||
for {
|
||||
// Attempt to retrieve the stats, may error on no faucet connectivity
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
head, err = f.client.HeaderByNumber(ctx, nil)
|
||||
if err == nil {
|
||||
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
|
||||
if err == nil {
|
||||
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
|
||||
}
|
||||
for head == nil || balance == nil {
|
||||
// Retrieve the current stats cached by the faucet
|
||||
f.lock.RLock()
|
||||
if f.head != nil {
|
||||
head = types.CopyHeader(f.head)
|
||||
}
|
||||
cancel()
|
||||
if f.balance != nil {
|
||||
balance = new(big.Int).Set(f.balance)
|
||||
}
|
||||
nonce = f.nonce
|
||||
f.lock.RUnlock()
|
||||
|
||||
// If stats retrieval failed, wait a bit and retry
|
||||
if err != nil {
|
||||
if err = sendError(conn, errors.New("Faucet offline: "+err.Error())); err != nil {
|
||||
if head == nil || balance == nil {
|
||||
// Report the faucet offline until initial stats are ready
|
||||
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
|
||||
log.Warn("Failed to send faucet error to client", "err", err)
|
||||
return
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
// Initial stats reported successfully, proceed with user interaction
|
||||
break
|
||||
}
|
||||
// Send over the initial stats and the latest header
|
||||
if err = send(conn, map[string]interface{}{
|
||||
"funds": balance.Div(balance, ether),
|
||||
"funds": new(big.Int).Div(balance, ether),
|
||||
"funded": nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
@@ -519,6 +521,47 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
}
|
||||
|
||||
// refresh attempts to retrieve the latest header from the chain and extract the
|
||||
// associated faucet balance and nonce for connectivity caching.
|
||||
func (f *faucet) refresh(head *types.Header) error {
|
||||
// Ensure a state update does not run for too long
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// If no header was specified, use the current chain head
|
||||
var err error
|
||||
if head == nil {
|
||||
if head, err = f.client.HeaderByNumber(ctx, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Retrieve the balance, nonce and gas price from the current head
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
price *big.Int
|
||||
)
|
||||
if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil {
|
||||
return err
|
||||
}
|
||||
if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil {
|
||||
return err
|
||||
}
|
||||
if price, err = f.client.SuggestGasPrice(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
// Everything succeeded, update the cached stats and eject old requests
|
||||
f.lock.Lock()
|
||||
f.head, f.balance = head, balance
|
||||
f.price, f.nonce = price, nonce
|
||||
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
|
||||
f.reqs = f.reqs[1:]
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loop keeps waiting for interesting events and pushes them out to connected
|
||||
// websockets.
|
||||
func (f *faucet) loop() {
|
||||
@@ -536,45 +579,27 @@ func (f *faucet) loop() {
|
||||
go func() {
|
||||
for head := range update {
|
||||
// New chain head arrived, query the current stats and stream to clients
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
price *big.Int
|
||||
err error
|
||||
)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
|
||||
if err == nil {
|
||||
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
|
||||
if err == nil {
|
||||
price, err = f.client.SuggestGasPrice(ctx)
|
||||
}
|
||||
timestamp := time.Unix(int64(head.Time), 0)
|
||||
if time.Since(timestamp) > time.Hour {
|
||||
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
|
||||
continue
|
||||
}
|
||||
cancel()
|
||||
|
||||
// If querying the data failed, try for the next block
|
||||
if err != nil {
|
||||
if err := f.refresh(head); err != nil {
|
||||
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
|
||||
continue
|
||||
} else {
|
||||
log.Info("Updated faucet state", "block", head.Number, "hash", head.Hash(), "balance", balance, "nonce", nonce, "price", price)
|
||||
}
|
||||
// Faucet state retrieved, update locally and send to clients
|
||||
balance = new(big.Int).Div(balance, ether)
|
||||
|
||||
f.lock.Lock()
|
||||
f.price, f.nonce = price, nonce
|
||||
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
|
||||
f.reqs = f.reqs[1:]
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
f.lock.RLock()
|
||||
log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price)
|
||||
|
||||
balance := new(big.Int).Div(f.balance, ether)
|
||||
peers := f.stack.Server().PeerCount()
|
||||
|
||||
for _, conn := range f.conns {
|
||||
if err := send(conn, map[string]interface{}{
|
||||
"funds": balance,
|
||||
"funded": f.nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"peers": peers,
|
||||
"requests": f.reqs,
|
||||
}, time.Second); err != nil {
|
||||
log.Warn("Failed to send stats to client", "err", err)
|
||||
|
@@ -48,7 +48,6 @@ var (
|
||||
ArgsUsage: "<genesisPath>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -66,7 +65,7 @@ It expects the genesis file as argument.`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheGCFlag,
|
||||
@@ -87,7 +86,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -105,7 +104,7 @@ be gzipped.`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -119,7 +118,7 @@ be gzipped.`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -149,7 +148,6 @@ The first argument must be the directory containing the blockchain to download f
|
||||
ArgsUsage: " ",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -163,7 +161,7 @@ Remove blockchain and state databases`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -342,9 +340,9 @@ func importPreimages(ctx *cli.Context) error {
|
||||
|
||||
start := time.Now()
|
||||
if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
|
||||
utils.Fatalf("Export error: %v\n", err)
|
||||
utils.Fatalf("Import error: %v\n", err)
|
||||
}
|
||||
fmt.Printf("Export done in %v\n", time.Since(start))
|
||||
fmt.Printf("Import done in %v\n", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"os"
|
||||
"reflect"
|
||||
"unicode"
|
||||
@@ -152,7 +152,9 @@ func enableWhisper(ctx *cli.Context) bool {
|
||||
|
||||
func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
stack, cfg := makeConfigNode(ctx)
|
||||
|
||||
if ctx.GlobalIsSet(utils.ConstantinopleOverrideFlag.Name) {
|
||||
cfg.Eth.ConstantinopleOverride = new(big.Int).SetUint64(ctx.GlobalUint64(utils.ConstantinopleOverrideFlag.Name))
|
||||
}
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
@@ -168,6 +170,9 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
if ctx.GlobalIsSet(utils.WhisperMinPOWFlag.Name) {
|
||||
cfg.Shh.MinimumAcceptedPOW = ctx.Float64(utils.WhisperMinPOWFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.WhisperRestrictConnectionBetweenLightClientsFlag.Name) {
|
||||
cfg.Shh.RestrictConnectionBetweenLightClients = true
|
||||
}
|
||||
utils.RegisterShhService(stack, &cfg.Shh)
|
||||
}
|
||||
|
||||
@@ -192,7 +197,17 @@ func dumpConfig(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(os.Stdout, comment)
|
||||
os.Stdout.Write(out)
|
||||
|
||||
dump := os.Stdout
|
||||
if ctx.NArg() > 0 {
|
||||
dump, err = os.OpenFile(ctx.Args().Get(0), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dump.Close()
|
||||
}
|
||||
dump.WriteString(comment)
|
||||
dump.Write(out)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
|
||||
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
|
||||
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
|
||||
)
|
||||
|
||||
|
@@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
godebug "runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -39,7 +38,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -72,6 +71,7 @@ var (
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
@@ -82,25 +82,35 @@ var (
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.LightServFlag,
|
||||
utils.LightPeersFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.WhitelistFlag,
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MinerLegacyThreadsFlag,
|
||||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasTargetFlag,
|
||||
utils.MinerLegacyGasTargetFlag,
|
||||
utils.MinerGasLimitFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerLegacyGasPriceFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
utils.MinerLegacyEtherbaseFlag,
|
||||
utils.MinerExtraDataFlag,
|
||||
utils.MinerLegacyExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
utils.MinerNoVerfiyFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
@@ -111,17 +121,18 @@ var (
|
||||
utils.DeveloperPeriodFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.GoerliFlag,
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.RPCVirtualHostsFlag,
|
||||
utils.ConstantinopleOverrideFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.ExtraDataFlag,
|
||||
utils.EWASMInterpreterFlag,
|
||||
utils.EVMInterpreterFlag,
|
||||
configFileFlag,
|
||||
}
|
||||
|
||||
@@ -137,12 +148,14 @@ var (
|
||||
utils.WSAllowedOriginsFlag,
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.RPCGlobalGasCap,
|
||||
}
|
||||
|
||||
whisperFlags = []cli.Flag{
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.WhisperMaxMessageSizeFlag,
|
||||
utils.WhisperMinPOWFlag,
|
||||
utils.WhisperRestrictConnectionBetweenLightClientsFlag,
|
||||
}
|
||||
|
||||
metricsFlags = []cli.Flag{
|
||||
@@ -151,7 +164,7 @@ var (
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
utils.MetricsInfluxDBTagsFlag,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -198,8 +211,6 @@ func init() {
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
logdir := ""
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
|
||||
@@ -229,7 +240,6 @@ func init() {
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
|
||||
utils.SetupNetwork(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -323,25 +333,25 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
utils.Fatalf("Light clients do not support mining")
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
utils.Fatalf("Ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
if th, ok := ethereum.Engine().(threaded); ok {
|
||||
th.SetThreads(threads)
|
||||
}
|
||||
}
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name))
|
||||
if err := ethereum.StartMining(true); err != nil {
|
||||
gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name)
|
||||
if ctx.IsSet(utils.MinerGasPriceFlag.Name) {
|
||||
gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||
}
|
||||
ethereum.TxPool().SetGasPrice(gasprice)
|
||||
|
||||
threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
|
||||
threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name)
|
||||
}
|
||||
if err := ethereum.StartMining(threads); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// AppHelpTemplate is the test template for the default, global app help topic.
|
||||
@@ -74,6 +74,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.NetworkIdFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.GoerliFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
@@ -81,6 +82,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.LightServFlag,
|
||||
utils.LightPeersFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.WhitelistFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -114,6 +116,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
@@ -131,6 +134,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Flags: []cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
},
|
||||
@@ -149,6 +153,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.RPCListenAddrFlag,
|
||||
utils.RPCPortFlag,
|
||||
utils.RPCApiFlag,
|
||||
utils.RPCGlobalGasCap,
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
@@ -185,10 +190,14 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Flags: []cli.Flag{
|
||||
utils.MiningEnabledFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.ExtraDataFlag,
|
||||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerGasTargetFlag,
|
||||
utils.MinerGasLimitFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
utils.MinerExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
utils.MinerNoVerfiyFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -202,6 +211,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Name: "VIRTUAL MACHINE",
|
||||
Flags: []cli.Flag{
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.EVMInterpreterFlag,
|
||||
utils.EWASMInterpreterFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -220,7 +231,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
utils.MetricsInfluxDBTagsFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -230,8 +241,11 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "DEPRECATED",
|
||||
Flags: []cli.Flag{
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.MinerLegacyThreadsFlag,
|
||||
utils.MinerLegacyGasTargetFlag,
|
||||
utils.MinerLegacyGasPriceFlag,
|
||||
utils.MinerLegacyEtherbaseFlag,
|
||||
utils.MinerLegacyExtraDataFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@@ -47,7 +47,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
@@ -285,7 +285,7 @@ func createNode(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ID = discover.PubkeyID(&privKey.PublicKey)
|
||||
config.ID = enode.PubkeyToIDV4(&privKey.PublicKey)
|
||||
config.PrivateKey = privKey
|
||||
}
|
||||
if services := ctx.String("services"); services != "" {
|
||||
|
@@ -20,35 +20,41 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
math2 "github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// cppEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// alethGenesisSpec represents the genesis specification format used by the
|
||||
// C++ Ethereum implementation.
|
||||
type cppEthereumGenesisSpec struct {
|
||||
type alethGenesisSpec struct {
|
||||
SealEngine string `json:"sealEngine"`
|
||||
Params struct {
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
||||
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
||||
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
||||
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
||||
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
||||
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
|
||||
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
||||
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
||||
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
||||
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
TieBreakingGas bool `json:"tieBreakingGas"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
AllowFutureBlocks bool `json:"allowFutureBlocks"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
@@ -62,57 +68,68 @@ type cppEthereumGenesisSpec struct {
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
} `json:"genesis"`
|
||||
|
||||
Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
|
||||
Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
|
||||
}
|
||||
|
||||
// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type cppEthereumGenesisSpecAccount struct {
|
||||
Balance *hexutil.Big `json:"balance"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
||||
type alethGenesisSpecAccount struct {
|
||||
Balance *math2.HexOrDecimal256 `json:"balance"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
||||
}
|
||||
|
||||
// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
|
||||
type cppEthereumGenesisSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
||||
Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
|
||||
// alethGenesisSpecBuiltin is the precompiled contract definition.
|
||||
type alethGenesisSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
||||
Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
|
||||
}
|
||||
|
||||
type cppEthereumGenesisSpecLinearPricing struct {
|
||||
type alethGenesisSpecLinearPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Word uint64 `json:"word"`
|
||||
}
|
||||
|
||||
// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific
|
||||
// chain specification format.
|
||||
func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
|
||||
// Only ethash is currently supported between go-ethereum and cpp-ethereum
|
||||
func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) {
|
||||
// Only ethash is currently supported between go-ethereum and aleth
|
||||
if genesis.Config.Ethash == nil {
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
// Reconstruct the chain spec in Parity's format
|
||||
spec := &cppEthereumGenesisSpec{
|
||||
// Reconstruct the chain spec in Aleth format
|
||||
spec := &alethGenesisSpec{
|
||||
SealEngine: "Ethash",
|
||||
}
|
||||
// Some defaults
|
||||
spec.Params.AccountStartNonce = 0
|
||||
spec.Params.TieBreakingGas = false
|
||||
spec.Params.AllowFutureBlocks = false
|
||||
spec.Params.DaoHardforkBlock = 0
|
||||
|
||||
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
|
||||
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
|
||||
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
|
||||
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
|
||||
|
||||
// Byzantium
|
||||
if num := genesis.Config.ByzantiumBlock; num != nil {
|
||||
spec.setByzantium(num)
|
||||
}
|
||||
// Constantinople
|
||||
if num := genesis.Config.ConstantinopleBlock; num != nil {
|
||||
spec.setConstantinople(num)
|
||||
}
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
|
||||
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64)
|
||||
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||
spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||
spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor)
|
||||
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
|
||||
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
|
||||
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
@@ -126,77 +143,109 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
|
||||
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
||||
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
||||
|
||||
spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
|
||||
for address, account := range genesis.Alloc {
|
||||
spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
|
||||
Balance: (*hexutil.Big)(account.Balance),
|
||||
Nonce: account.Nonce,
|
||||
}
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
|
||||
spec.setAccount(address, account)
|
||||
}
|
||||
|
||||
spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover",
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 3000}})
|
||||
spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256",
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}})
|
||||
spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160",
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}})
|
||||
spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity",
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
}
|
||||
spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||
spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 500}})
|
||||
spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
|
||||
spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) {
|
||||
if spec.Accounts == nil {
|
||||
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
|
||||
}
|
||||
addr := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
|
||||
if _, exist := spec.Accounts[addr]; !exist {
|
||||
spec.Accounts[addr] = &alethGenesisSpecAccount{}
|
||||
}
|
||||
spec.Accounts[addr].Precompiled = data
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
|
||||
if spec.Accounts == nil {
|
||||
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
|
||||
}
|
||||
|
||||
a, exist := spec.Accounts[common.UnprefixedAddress(address)]
|
||||
if !exist {
|
||||
a = &alethGenesisSpecAccount{}
|
||||
spec.Accounts[common.UnprefixedAddress(address)] = a
|
||||
}
|
||||
a.Balance = (*math2.HexOrDecimal256)(account.Balance)
|
||||
a.Nonce = account.Nonce
|
||||
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
|
||||
spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
|
||||
spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// parityChainSpec is the chain specification format used by Parity.
|
||||
type parityChainSpec struct {
|
||||
Name string `json:"name"`
|
||||
Engine struct {
|
||||
Name string `json:"name"`
|
||||
Datadir string `json:"dataDir"`
|
||||
Engine struct {
|
||||
Ethash struct {
|
||||
Params struct {
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
HomesteadTransition uint64 `json:"homesteadTransition"`
|
||||
EIP150Transition uint64 `json:"eip150Transition"`
|
||||
EIP160Transition uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition uint64 `json:"eip161dTransition"`
|
||||
EIP649Reward *hexutil.Big `json:"eip649Reward"`
|
||||
EIP100bTransition uint64 `json:"eip100bTransition"`
|
||||
EIP649Transition uint64 `json:"eip649Transition"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward map[string]string `json:"blockReward"`
|
||||
DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
|
||||
HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
|
||||
EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
|
||||
} `json:"params"`
|
||||
} `json:"Ethash"`
|
||||
} `json:"engine"`
|
||||
|
||||
Params struct {
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
MaxCodeSize uint64 `json:"maxCodeSize"`
|
||||
EIP155Transition uint64 `json:"eip155Transition"`
|
||||
EIP98Transition uint64 `json:"eip98Transition"`
|
||||
EIP86Transition uint64 `json:"eip86Transition"`
|
||||
EIP140Transition uint64 `json:"eip140Transition"`
|
||||
EIP211Transition uint64 `json:"eip211Transition"`
|
||||
EIP214Transition uint64 `json:"eip214Transition"`
|
||||
EIP658Transition uint64 `json:"eip658Transition"`
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
@@ -215,22 +264,22 @@ type parityChainSpec struct {
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
} `json:"genesis"`
|
||||
|
||||
Nodes []string `json:"nodes"`
|
||||
Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
|
||||
Nodes []string `json:"nodes"`
|
||||
Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"`
|
||||
}
|
||||
|
||||
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type parityChainSpecAccount struct {
|
||||
Balance *hexutil.Big `json:"balance"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Balance math2.HexOrDecimal256 `json:"balance"`
|
||||
Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"`
|
||||
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
|
||||
}
|
||||
|
||||
// parityChainSpecBuiltin is the precompiled contract definition.
|
||||
type parityChainSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ActivateAt uint64 `json:"activate_at,omitempty"`
|
||||
ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
|
||||
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
|
||||
}
|
||||
|
||||
@@ -265,34 +314,56 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
}
|
||||
// Reconstruct the chain spec in Parity's format
|
||||
spec := &parityChainSpec{
|
||||
Name: network,
|
||||
Nodes: bootnodes,
|
||||
Name: network,
|
||||
Nodes: bootnodes,
|
||||
Datadir: strings.ToLower(network),
|
||||
}
|
||||
spec.Engine.Ethash.Params.BlockReward = make(map[string]string)
|
||||
spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string)
|
||||
// Frontier
|
||||
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
|
||||
spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward)
|
||||
|
||||
// Homestead
|
||||
spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64())
|
||||
|
||||
// Tangerine Whistle : 150
|
||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md
|
||||
spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64())
|
||||
|
||||
// Spurious Dragon: 155, 160, 161, 170
|
||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md
|
||||
spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
|
||||
spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
|
||||
spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
|
||||
spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
|
||||
|
||||
// Byzantium
|
||||
if num := genesis.Config.ByzantiumBlock; num != nil {
|
||||
spec.setByzantium(num)
|
||||
}
|
||||
// Constantinople
|
||||
if num := genesis.Config.ConstantinopleBlock; num != nil {
|
||||
spec.setConstantinople(num)
|
||||
}
|
||||
// ConstantinopleFix (remove eip-1283)
|
||||
if num := genesis.Config.PetersburgBlock; num != nil {
|
||||
spec.setConstantinopleFix(num)
|
||||
}
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Params.EIP98Transition = math.MaxUint64
|
||||
spec.Params.EIP86Transition = math.MaxUint64
|
||||
spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
// geth has it set from zero
|
||||
spec.Params.MaxCodeSizeTransition = 0
|
||||
|
||||
// Disable this one
|
||||
spec.Params.EIP98Transition = math.MaxInt64
|
||||
|
||||
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
|
||||
@@ -305,42 +376,81 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
||||
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
||||
|
||||
spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
|
||||
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
|
||||
for address, account := range genesis.Alloc {
|
||||
spec.Accounts[address] = &parityChainSpecAccount{
|
||||
Balance: (*hexutil.Big)(account.Balance),
|
||||
Nonce: account.Nonce,
|
||||
bal := math2.HexOrDecimal256(*account.Balance)
|
||||
|
||||
spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{
|
||||
Balance: bal,
|
||||
Nonce: math2.HexOrDecimal64(account.Nonce),
|
||||
}
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
|
||||
spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover",
|
||||
Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}})
|
||||
|
||||
spec.setPrecompile(2, &parityChainSpecBuiltin{
|
||||
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
|
||||
})
|
||||
spec.setPrecompile(3, &parityChainSpecBuiltin{
|
||||
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
|
||||
})
|
||||
spec.setPrecompile(4, &parityChainSpecBuiltin{
|
||||
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
|
||||
}
|
||||
})
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||
}
|
||||
blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.setPrecompile(5, &parityChainSpecBuiltin{
|
||||
Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||
})
|
||||
spec.setPrecompile(6, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
||||
})
|
||||
spec.setPrecompile(7, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
||||
})
|
||||
spec.setPrecompile(8, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||
})
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) {
|
||||
if spec.Accounts == nil {
|
||||
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
|
||||
}
|
||||
a := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
|
||||
if _, exist := spec.Accounts[a]; !exist {
|
||||
spec.Accounts[a] = &parityChainSpecAccount{}
|
||||
}
|
||||
spec.Accounts[a].Builtin = data
|
||||
}
|
||||
|
||||
func (spec *parityChainSpec) setByzantium(num *big.Int) {
|
||||
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward)
|
||||
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000)
|
||||
n := hexutil.Uint64(num.Uint64())
|
||||
spec.Engine.Ethash.Params.EIP100bTransition = n
|
||||
spec.Params.EIP140Transition = n
|
||||
spec.Params.EIP211Transition = n
|
||||
spec.Params.EIP214Transition = n
|
||||
spec.Params.EIP658Transition = n
|
||||
}
|
||||
|
||||
func (spec *parityChainSpec) setConstantinople(num *big.Int) {
|
||||
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward)
|
||||
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000)
|
||||
n := hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP145Transition = n
|
||||
spec.Params.EIP1014Transition = n
|
||||
spec.Params.EIP1052Transition = n
|
||||
spec.Params.EIP1283Transition = n
|
||||
}
|
||||
|
||||
func (spec *parityChainSpec) setConstantinopleFix(num *big.Int) {
|
||||
spec.Params.EIP1283DisableTransition = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// Python Ethereum implementation.
|
||||
type pyEthereumGenesisSpec struct {
|
||||
|
109
cmd/puppeth/genesis_test.go
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
)
|
||||
|
||||
// Tests the go-ethereum to Aleth chainspec conversion for the Stureby testnet.
|
||||
func TestAlethSturebyConverter(t *testing.T) {
|
||||
blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
|
||||
if err != nil {
|
||||
t.Fatalf("could not read file: %v", err)
|
||||
}
|
||||
var genesis core.Genesis
|
||||
if err := json.Unmarshal(blob, &genesis); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
spec, err := newAlethGenesisSpec("stureby", &genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("failed creating chainspec: %v", err)
|
||||
}
|
||||
|
||||
expBlob, err := ioutil.ReadFile("testdata/stureby_aleth.json")
|
||||
if err != nil {
|
||||
t.Fatalf("could not read file: %v", err)
|
||||
}
|
||||
expspec := &alethGenesisSpec{}
|
||||
if err := json.Unmarshal(expBlob, expspec); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expspec, spec) {
|
||||
t.Errorf("chainspec mismatch")
|
||||
c := spew.ConfigState{
|
||||
DisablePointerAddresses: true,
|
||||
SortKeys: true,
|
||||
}
|
||||
exp := strings.Split(c.Sdump(expspec), "\n")
|
||||
got := strings.Split(c.Sdump(spec), "\n")
|
||||
for i := 0; i < len(exp) && i < len(got); i++ {
|
||||
if exp[i] != got[i] {
|
||||
fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests the go-ethereum to Parity chainspec conversion for the Stureby testnet.
|
||||
func TestParitySturebyConverter(t *testing.T) {
|
||||
blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
|
||||
if err != nil {
|
||||
t.Fatalf("could not read file: %v", err)
|
||||
}
|
||||
var genesis core.Genesis
|
||||
if err := json.Unmarshal(blob, &genesis); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
spec, err := newParityChainSpec("Stureby", &genesis, []string{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed creating chainspec: %v", err)
|
||||
}
|
||||
|
||||
expBlob, err := ioutil.ReadFile("testdata/stureby_parity.json")
|
||||
if err != nil {
|
||||
t.Fatalf("could not read file: %v", err)
|
||||
}
|
||||
expspec := &parityChainSpec{}
|
||||
if err := json.Unmarshal(expBlob, expspec); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
expspec.Nodes = []string{}
|
||||
|
||||
if !reflect.DeepEqual(expspec, spec) {
|
||||
t.Errorf("chainspec mismatch")
|
||||
c := spew.ConfigState{
|
||||
DisablePointerAddresses: true,
|
||||
SortKeys: true,
|
||||
}
|
||||
exp := strings.Split(c.Sdump(expspec), "\n")
|
||||
got := strings.Split(c.Sdump(spec), "\n")
|
||||
for i := 0; i < len(exp) && i < len(got); i++ {
|
||||
if exp[i] != got[i] {
|
||||
fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -608,30 +608,31 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
bootPython[i] = "'" + boot + "'"
|
||||
}
|
||||
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
||||
"Network": network,
|
||||
"NetworkID": conf.Genesis.Config.ChainID,
|
||||
"NetworkTitle": strings.Title(network),
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
"WalletPage": config.wallet,
|
||||
"FaucetPage": config.faucet,
|
||||
"GethGenesis": network + ".json",
|
||||
"Bootnodes": conf.bootnodes,
|
||||
"BootnodesFlat": strings.Join(conf.bootnodes, ","),
|
||||
"Ethstats": statsLogin,
|
||||
"Ethash": conf.Genesis.Config.Ethash != nil,
|
||||
"CppGenesis": network + "-cpp.json",
|
||||
"CppBootnodes": strings.Join(bootCpp, " "),
|
||||
"HarmonyGenesis": network + "-harmony.json",
|
||||
"HarmonyBootnodes": strings.Join(bootHarmony, " "),
|
||||
"ParityGenesis": network + "-parity.json",
|
||||
"PythonGenesis": network + "-python.json",
|
||||
"PythonBootnodes": strings.Join(bootPython, ","),
|
||||
"Homestead": conf.Genesis.Config.HomesteadBlock,
|
||||
"Tangerine": conf.Genesis.Config.EIP150Block,
|
||||
"Spurious": conf.Genesis.Config.EIP155Block,
|
||||
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
|
||||
"Constantinople": conf.Genesis.Config.ConstantinopleBlock,
|
||||
"Network": network,
|
||||
"NetworkID": conf.Genesis.Config.ChainID,
|
||||
"NetworkTitle": strings.Title(network),
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
"WalletPage": config.wallet,
|
||||
"FaucetPage": config.faucet,
|
||||
"GethGenesis": network + ".json",
|
||||
"Bootnodes": conf.bootnodes,
|
||||
"BootnodesFlat": strings.Join(conf.bootnodes, ","),
|
||||
"Ethstats": statsLogin,
|
||||
"Ethash": conf.Genesis.Config.Ethash != nil,
|
||||
"CppGenesis": network + "-cpp.json",
|
||||
"CppBootnodes": strings.Join(bootCpp, " "),
|
||||
"HarmonyGenesis": network + "-harmony.json",
|
||||
"HarmonyBootnodes": strings.Join(bootHarmony, " "),
|
||||
"ParityGenesis": network + "-parity.json",
|
||||
"PythonGenesis": network + "-python.json",
|
||||
"PythonBootnodes": strings.Join(bootPython, ","),
|
||||
"Homestead": conf.Genesis.Config.HomesteadBlock,
|
||||
"Tangerine": conf.Genesis.Config.EIP150Block,
|
||||
"Spurious": conf.Genesis.Config.EIP155Block,
|
||||
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
|
||||
"Constantinople": conf.Genesis.Config.ConstantinopleBlock,
|
||||
"ConstantinopleFix": conf.Genesis.Config.PetersburgBlock,
|
||||
})
|
||||
files[filepath.Join(workdir, "index.html")] = indexfile.Bytes()
|
||||
|
||||
@@ -640,7 +641,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
files[filepath.Join(workdir, network+".json")] = genesis
|
||||
|
||||
if conf.Genesis.Config.Ethash != nil {
|
||||
cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis)
|
||||
cppSpec, err := newAlethGenesisSpec(network, conf.Genesis)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -678,9 +679,9 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
|
||||
// Build and deploy the dashboard service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// dashboardInfos is returned from a dashboard status check to allow reporting
|
||||
|
@@ -43,7 +43,8 @@ version: '2'
|
||||
services:
|
||||
ethstats:
|
||||
build: .
|
||||
image: {{.Network}}/ethstats{{if not .VHost}}
|
||||
image: {{.Network}}/ethstats
|
||||
container_name: {{.Network}}_ethstats_1{{if not .VHost}}
|
||||
ports:
|
||||
- "{{.Port}}:3000"{{end}}
|
||||
environment:
|
||||
@@ -100,9 +101,9 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
|
||||
|
||||
// Build and deploy the ethstats service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// ethstatsInfos is returned from an ethstats status check to allow reporting
|
||||
|
@@ -38,7 +38,7 @@ ADD chain.json /chain.json
|
||||
RUN \
|
||||
echo '(cd ../eth-net-intelligence-api && pm2 start /ethstats.json)' > explorer.sh && \
|
||||
echo '(cd ../etherchain-light && npm start &)' >> explorer.sh && \
|
||||
echo '/parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
|
||||
echo 'exec /parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "explorer.sh"]
|
||||
`
|
||||
@@ -77,6 +77,7 @@ services:
|
||||
explorer:
|
||||
build: .
|
||||
image: {{.Network}}/explorer
|
||||
container_name: {{.Network}}_explorer_1
|
||||
ports:
|
||||
- "{{.NodePort}}:{{.NodePort}}"
|
||||
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}
|
||||
@@ -140,9 +141,9 @@ func deployExplorer(client *sshClient, network string, chainspec []byte, config
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// explorerInfos is returned from a block explorer status check to allow reporting
|
||||
|
@@ -56,8 +56,10 @@ services:
|
||||
faucet:
|
||||
build: .
|
||||
image: {{.Network}}/faucet
|
||||
container_name: {{.Network}}_faucet_1
|
||||
ports:
|
||||
- "{{.EthPort}}:{{.EthPort}}"{{if not .VHost}}
|
||||
- "{{.EthPort}}:{{.EthPort}}"
|
||||
- "{{.EthPort}}:{{.EthPort}}/udp"{{if not .VHost}}
|
||||
- "{{.ApiPort}}:8080"{{end}}
|
||||
volumes:
|
||||
- {{.Datadir}}:/root/.faucet
|
||||
@@ -133,9 +135,9 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
|
||||
// Build and deploy the faucet service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// faucetInfos is returned from a faucet status check to allow reporting various
|
||||
|
@@ -40,6 +40,7 @@ services:
|
||||
nginx:
|
||||
build: .
|
||||
image: {{.Network}}/nginx
|
||||
container_name: {{.Network}}_nginx_1
|
||||
ports:
|
||||
- "{{.Port}}:80"
|
||||
volumes:
|
||||
@@ -81,9 +82,9 @@ func deployNginx(client *sshClient, network string, port int, nocache bool) ([]b
|
||||
|
||||
// Build and deploy the reverse-proxy service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// nginxInfos is returned from an nginx reverse-proxy status check to allow
|
||||
|
@@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
|
||||
RUN \
|
||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --nat extip:{{.IP}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gaslimit {{.GasLimit}} --miner.gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@@ -55,6 +55,7 @@ services:
|
||||
{{.Type}}:
|
||||
build: .
|
||||
image: {{.Network}}/{{.Type}}
|
||||
container_name: {{.Network}}_{{.Type}}_1
|
||||
ports:
|
||||
- "{{.Port}}:{{.Port}}"
|
||||
- "{{.Port}}:{{.Port}}/udp"
|
||||
@@ -68,6 +69,7 @@ services:
|
||||
- STATS_NAME={{.Ethstats}}
|
||||
- MINER_NAME={{.Etherbase}}
|
||||
- GAS_TARGET={{.GasTarget}}
|
||||
- GAS_LIMIT={{.GasLimit}}
|
||||
- GAS_PRICE={{.GasPrice}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
@@ -98,12 +100,14 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
template.Must(template.New("").Parse(nodeDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"NetworkID": config.network,
|
||||
"Port": config.port,
|
||||
"IP": client.address,
|
||||
"Peers": config.peersTotal,
|
||||
"LightFlag": lightFlag,
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
"Ethstats": config.ethstats,
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": uint64(1000000 * config.gasTarget),
|
||||
"GasLimit": uint64(1000000 * config.gasLimit),
|
||||
"GasPrice": uint64(1000000000 * config.gasPrice),
|
||||
"Unlock": config.keyJSON != "",
|
||||
})
|
||||
@@ -122,6 +126,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": config.gasTarget,
|
||||
"GasLimit": config.gasLimit,
|
||||
"GasPrice": config.gasPrice,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
@@ -139,9 +144,9 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// nodeInfos is returned from a boot or seal node status check to allow reporting
|
||||
@@ -160,6 +165,7 @@ type nodeInfos struct {
|
||||
keyJSON string
|
||||
keyPass string
|
||||
gasTarget float64
|
||||
gasLimit float64
|
||||
gasPrice float64
|
||||
}
|
||||
|
||||
@@ -175,8 +181,9 @@ func (info *nodeInfos) Report() map[string]string {
|
||||
}
|
||||
if info.gasTarget > 0 {
|
||||
// Miner or signer node
|
||||
report["Gas limit (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
|
||||
report["Gas price (minimum accepted)"] = fmt.Sprintf("%0.3f GWei", info.gasPrice)
|
||||
report["Gas floor (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
|
||||
report["Gas ceil (target maximum)"] = fmt.Sprintf("%0.3f MGas", info.gasLimit)
|
||||
|
||||
if info.etherbase != "" {
|
||||
// Ethash proof-of-work miner
|
||||
@@ -217,14 +224,15 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
totalPeers, _ := strconv.Atoi(infos.envvars["TOTAL_PEERS"])
|
||||
lightPeers, _ := strconv.Atoi(infos.envvars["LIGHT_PEERS"])
|
||||
gasTarget, _ := strconv.ParseFloat(infos.envvars["GAS_TARGET"], 64)
|
||||
gasLimit, _ := strconv.ParseFloat(infos.envvars["GAS_LIMIT"], 64)
|
||||
gasPrice, _ := strconv.ParseFloat(infos.envvars["GAS_PRICE"], 64)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
var out []byte
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id --cache=16 attach", network, kind)); err != nil {
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.enode --cache=16 attach", network, kind)); err != nil {
|
||||
return nil, ErrServiceUnreachable
|
||||
}
|
||||
id := bytes.Trim(bytes.TrimSpace(out), "\"")
|
||||
enode := bytes.Trim(bytes.TrimSpace(out), "\"")
|
||||
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 cat /genesis.json", network, kind)); err != nil {
|
||||
return nil, ErrServiceUnreachable
|
||||
@@ -256,9 +264,10 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
keyJSON: keyJSON,
|
||||
keyPass: keyPass,
|
||||
gasTarget: gasTarget,
|
||||
gasLimit: gasLimit,
|
||||
gasPrice: gasPrice,
|
||||
}
|
||||
stats.enode = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.port)
|
||||
stats.enode = string(enode)
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
@@ -37,7 +37,7 @@ ADD genesis.json /genesis.json
|
||||
RUN \
|
||||
echo 'node server.js &' > wallet.sh && \
|
||||
echo 'geth --cache 512 init /genesis.json' >> wallet.sh && \
|
||||
echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
|
||||
echo $'exec geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
|
||||
|
||||
RUN \
|
||||
sed -i 's/PuppethNetworkID/{{.NetworkID}}/g' dist/js/etherwallet-master.js && \
|
||||
@@ -57,6 +57,7 @@ services:
|
||||
wallet:
|
||||
build: .
|
||||
image: {{.Network}}/wallet
|
||||
container_name: {{.Network}}_wallet_1
|
||||
ports:
|
||||
- "{{.NodePort}}:{{.NodePort}}"
|
||||
- "{{.NodePort}}:{{.NodePort}}/udp"
|
||||
@@ -120,9 +121,9 @@ func deployWallet(client *sshClient, network string, bootnodes []string, config
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// walletInfos is returned from a web wallet status check to allow reporting
|
||||
|
@@ -43,18 +43,23 @@ func main() {
|
||||
Usage: "log level to emit to the screen",
|
||||
},
|
||||
}
|
||||
app.Action = func(c *cli.Context) error {
|
||||
app.Before = func(c *cli.Context) error {
|
||||
// Set up the logger to print everything and the random generator
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
network := c.String("network")
|
||||
if strings.Contains(network, " ") || strings.Contains(network, "-") {
|
||||
log.Crit("No spaces or hyphens allowed in network name")
|
||||
}
|
||||
// Start the wizard and relinquish control
|
||||
makeWizard(c.String("network")).run()
|
||||
return nil
|
||||
}
|
||||
app.Action = runWizard
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
// runWizard start the wizard and relinquish control to it.
|
||||
func runWizard(c *cli.Context) error {
|
||||
network := c.String("network")
|
||||
if strings.Contains(network, " ") || strings.Contains(network, "-") || strings.ToLower(network) != network {
|
||||
log.Crit("No spaces, hyphens or capital letters allowed in network name")
|
||||
}
|
||||
makeWizard(c.String("network")).run()
|
||||
return nil
|
||||
}
|
||||
|
@@ -45,33 +45,44 @@ type sshClient struct {
|
||||
|
||||
// dial establishes an SSH connection to a remote node using the current user and
|
||||
// the user's configured private RSA key. If that fails, password authentication
|
||||
// is fallen back to. The caller may override the login user via user@server:port.
|
||||
// is fallen back to. server can be a string like user:identity@server:port.
|
||||
func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
// Figure out a label for the server and a logger
|
||||
label := server
|
||||
if strings.Contains(label, ":") {
|
||||
label = label[:strings.Index(label, ":")]
|
||||
}
|
||||
login := ""
|
||||
// Figure out username, identity, hostname and port
|
||||
hostname := ""
|
||||
hostport := server
|
||||
username := ""
|
||||
identity := "id_rsa" // default
|
||||
|
||||
if strings.Contains(server, "@") {
|
||||
login = label[:strings.Index(label, "@")]
|
||||
label = label[strings.Index(label, "@")+1:]
|
||||
server = server[strings.Index(server, "@")+1:]
|
||||
prefix := server[:strings.Index(server, "@")]
|
||||
if strings.Contains(prefix, ":") {
|
||||
username = prefix[:strings.Index(prefix, ":")]
|
||||
identity = prefix[strings.Index(prefix, ":")+1:]
|
||||
} else {
|
||||
username = prefix
|
||||
}
|
||||
hostport = server[strings.Index(server, "@")+1:]
|
||||
}
|
||||
logger := log.New("server", label)
|
||||
if strings.Contains(hostport, ":") {
|
||||
hostname = hostport[:strings.Index(hostport, ":")]
|
||||
} else {
|
||||
hostname = hostport
|
||||
hostport += ":22"
|
||||
}
|
||||
logger := log.New("server", server)
|
||||
logger.Debug("Attempting to establish SSH connection")
|
||||
|
||||
user, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if login == "" {
|
||||
login = user.Username
|
||||
if username == "" {
|
||||
username = user.Username
|
||||
}
|
||||
// Configure the supported authentication methods (private key and password)
|
||||
var auths []ssh.AuthMethod
|
||||
|
||||
path := filepath.Join(user.HomeDir, ".ssh", "id_rsa")
|
||||
path := filepath.Join(user.HomeDir, ".ssh", identity)
|
||||
if buf, err := ioutil.ReadFile(path); err != nil {
|
||||
log.Warn("No SSH key, falling back to passwords", "path", path, "err", err)
|
||||
} else {
|
||||
@@ -94,14 +105,14 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
}
|
||||
}
|
||||
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
|
||||
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
|
||||
fmt.Println()
|
||||
return string(blob), err
|
||||
}))
|
||||
// Resolve the IP address of the remote server
|
||||
addr, err := net.LookupHost(label)
|
||||
addr, err := net.LookupHost(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,10 +120,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
return nil, errors.New("no IPs associated with domain")
|
||||
}
|
||||
// Try to dial in to the remote server
|
||||
logger.Trace("Dialing remote SSH server", "user", login)
|
||||
if !strings.Contains(server, ":") {
|
||||
server += ":22"
|
||||
}
|
||||
logger.Trace("Dialing remote SSH server", "user", username)
|
||||
keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
// If no public key is known for SSH, ask the user to confirm
|
||||
if pubkey == nil {
|
||||
@@ -139,13 +147,13 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
// We have a mismatch, forbid connecting
|
||||
return errors.New("ssh key mismatch, readd the machine to update")
|
||||
}
|
||||
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck})
|
||||
client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Connection established, return our utility wrapper
|
||||
c := &sshClient{
|
||||
server: label,
|
||||
server: hostname,
|
||||
address: addr[0],
|
||||
pubkey: pubkey,
|
||||
client: client,
|
||||
|
112
cmd/puppeth/testdata/stureby_aleth.json
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"sealEngine":"Ethash",
|
||||
"params":{
|
||||
"accountStartNonce":"0x00",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"homesteadForkBlock":"0x2710",
|
||||
"daoHardforkBlock":"0x00",
|
||||
"EIP150ForkBlock":"0x3a98",
|
||||
"EIP158ForkBlock":"0x59d8",
|
||||
"byzantiumForkBlock":"0x7530",
|
||||
"constantinopleForkBlock":"0x9c40",
|
||||
"minGasLimit":"0x1388",
|
||||
"maxGasLimit":"0x7fffffffffffffff",
|
||||
"tieBreakingGas":false,
|
||||
"gasLimitBoundDivisor":"0x0400",
|
||||
"minimumDifficulty":"0x20000",
|
||||
"difficultyBoundDivisor":"0x0800",
|
||||
"durationLimit":"0x0d",
|
||||
"blockReward":"0x4563918244F40000",
|
||||
"networkID":"0x4cb2e",
|
||||
"chainID":"0x4cb2e",
|
||||
"allowFutureBlocks":false
|
||||
},
|
||||
"genesis":{
|
||||
"nonce":"0x0000000000000000",
|
||||
"difficulty":"0x20000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x59a4e76d",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit":"0x47b760"
|
||||
},
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"ecrecover",
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"sha256",
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"ripemd160",
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"identity",
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"modexp",
|
||||
"startingBlock":"0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_G1_add",
|
||||
"startingBlock":"0x7530",
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_G1_mul",
|
||||
"startingBlock":"0x7530",
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_pairing_product",
|
||||
"startingBlock":"0x7530"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
47
cmd/puppeth/testdata/stureby_geth.json
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"config": {
|
||||
"ethash":{},
|
||||
"chainId": 314158,
|
||||
"homesteadBlock": 10000,
|
||||
"eip150Block": 15000,
|
||||
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"eip155Block": 23000,
|
||||
"eip158Block": 23000,
|
||||
"byzantiumBlock": 30000,
|
||||
"constantinopleBlock": 40000
|
||||
},
|
||||
"nonce": "0x0",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760",
|
||||
"difficulty": "0x20000",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
"alloc": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x01"
|
||||
},
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x01"
|
||||
},
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x01"
|
||||
},
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x01"
|
||||
},
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x01"
|
||||
},
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x01"
|
||||
},
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x01"
|
||||
},
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x01"
|
||||
}
|
||||
}
|
||||
}
|
181
cmd/puppeth/testdata/stureby_parity.json
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
{
|
||||
"name":"Stureby",
|
||||
"dataDir":"stureby",
|
||||
"engine":{
|
||||
"Ethash":{
|
||||
"params":{
|
||||
"minimumDifficulty":"0x20000",
|
||||
"difficultyBoundDivisor":"0x800",
|
||||
"durationLimit":"0xd",
|
||||
"blockReward":{
|
||||
"0x0":"0x4563918244f40000",
|
||||
"0x7530":"0x29a2241af62c0000",
|
||||
"0x9c40":"0x1bc16d674ec80000"
|
||||
},
|
||||
"homesteadTransition":"0x2710",
|
||||
"eip100bTransition":"0x7530",
|
||||
"difficultyBombDelays":{
|
||||
"0x7530":"0x2dc6c0",
|
||||
"0x9c40":"0x1e8480"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"params":{
|
||||
"accountStartNonce":"0x0",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"gasLimitBoundDivisor":"0x400",
|
||||
"minGasLimit":"0x1388",
|
||||
"networkID":"0x4cb2e",
|
||||
"chainID":"0x4cb2e",
|
||||
"maxCodeSize":"0x6000",
|
||||
"maxCodeSizeTransition":"0x0",
|
||||
"eip98Transition": "0x7fffffffffffffff",
|
||||
"eip150Transition":"0x3a98",
|
||||
"eip160Transition":"0x59d8",
|
||||
"eip161abcTransition":"0x59d8",
|
||||
"eip161dTransition":"0x59d8",
|
||||
"eip155Transition":"0x59d8",
|
||||
"eip140Transition":"0x7530",
|
||||
"eip211Transition":"0x7530",
|
||||
"eip214Transition":"0x7530",
|
||||
"eip658Transition":"0x7530",
|
||||
"eip145Transition":"0x9c40",
|
||||
"eip1014Transition":"0x9c40",
|
||||
"eip1052Transition":"0x9c40",
|
||||
"eip1283Transition":"0x9c40"
|
||||
},
|
||||
"genesis":{
|
||||
"seal":{
|
||||
"ethereum":{
|
||||
"nonce":"0x0000000000000000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
},
|
||||
"difficulty":"0x20000",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x59a4e76d",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit":"0x47b760"
|
||||
},
|
||||
"nodes":[
|
||||
"enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
|
||||
"enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
|
||||
"enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
|
||||
"enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
|
||||
"enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
|
||||
"enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
|
||||
"enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
|
||||
"enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
|
||||
],
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"ecrecover",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"sha256",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"ripemd160",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"identity",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"modexp",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"modexp":{
|
||||
"divisor":20
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_add",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_mul",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_pairing",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"alt_bn128_pairing":{
|
||||
"base":100000,
|
||||
"pair":80000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -23,6 +23,7 @@ import (
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -118,6 +119,47 @@ func (w *wizard) readDefaultString(def string) string {
|
||||
return def
|
||||
}
|
||||
|
||||
// readDefaultYesNo reads a single line from stdin, trimming if from spaces and
|
||||
// interpreting it as a 'yes' or a 'no'. If an empty line is entered, the default
|
||||
// value is returned.
|
||||
func (w *wizard) readDefaultYesNo(def bool) bool {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
|
||||
return def
|
||||
}
|
||||
if text == "y" || text == "yes" {
|
||||
return true
|
||||
}
|
||||
if text == "n" || text == "no" {
|
||||
return false
|
||||
}
|
||||
log.Error("Invalid input, expected 'y', 'yes', 'n', 'no' or empty")
|
||||
}
|
||||
}
|
||||
|
||||
// readURL reads a single line from stdin, trimming if from spaces and trying to
|
||||
// interpret it as a URL (http, https or file).
|
||||
func (w *wizard) readURL() *url.URL {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
uri, err := url.Parse(strings.TrimSpace(text))
|
||||
if err != nil {
|
||||
log.Error("Invalid input, expected URL", "err", err)
|
||||
continue
|
||||
}
|
||||
return uri
|
||||
}
|
||||
}
|
||||
|
||||
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
|
||||
// to parse into an integer.
|
||||
func (w *wizard) readInt() int {
|
||||
|
@@ -92,7 +92,7 @@ func (w *wizard) deployDashboard() {
|
||||
pages = append(pages, page)
|
||||
}
|
||||
}
|
||||
// Promt the user to chose one, enter manually or simply not list this service
|
||||
// Prompt the user to chose one, enter manually or simply not list this service
|
||||
defLabel, defChoice := "don't list", len(pages)+2
|
||||
if len(pages) > 0 {
|
||||
defLabel, defChoice = pages[0], 1
|
||||
@@ -137,14 +137,14 @@ func (w *wizard) deployDashboard() {
|
||||
if w.conf.ethstats != "" {
|
||||
fmt.Println()
|
||||
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
|
||||
infos.trusted = w.readDefaultString("y") == "y"
|
||||
infos.trusted = w.readDefaultYesNo(true)
|
||||
}
|
||||
// Try to deploy the dashboard container on the host
|
||||
nocache := false
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
nocache = w.readDefaultYesNo(false)
|
||||
}
|
||||
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy dashboard container", "err", err)
|
||||
|
@@ -67,11 +67,11 @@ func (w *wizard) deployEthstats() {
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
if !w.readDefaultYesNo(true) {
|
||||
// The user might want to clear the entire list, although generally probably not
|
||||
fmt.Println()
|
||||
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
|
||||
if w.readDefaultString("n") != "n" {
|
||||
if w.readDefaultYesNo(false) {
|
||||
infos.banned = nil
|
||||
}
|
||||
// Offer the user to explicitly add/remove certain IP addresses
|
||||
@@ -106,7 +106,7 @@ func (w *wizard) deployEthstats() {
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
nocache = w.readDefaultYesNo(false)
|
||||
}
|
||||
trusted := make([]string, 0, len(w.servers))
|
||||
for _, client := range w.servers {
|
||||
|
@@ -100,7 +100,7 @@ func (w *wizard) deployExplorer() {
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
nocache = w.readDefaultYesNo(false)
|
||||
}
|
||||
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy explorer container", "err", err)
|
||||
|
@@ -81,7 +81,7 @@ func (w *wizard) deployFaucet() {
|
||||
if infos.captchaToken != "" {
|
||||
fmt.Println()
|
||||
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
|
||||
if w.readDefaultString("y") != "y" {
|
||||
if !w.readDefaultYesNo(true) {
|
||||
infos.captchaToken, infos.captchaSecret = "", ""
|
||||
}
|
||||
}
|
||||
@@ -89,7 +89,7 @@ func (w *wizard) deployFaucet() {
|
||||
// No previous authorization (or old one discarded)
|
||||
fmt.Println()
|
||||
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
|
||||
if w.readDefaultString("n") == "n" {
|
||||
if !w.readDefaultYesNo(false) {
|
||||
log.Warn("Users will be able to requests funds via automated scripts")
|
||||
} else {
|
||||
// Captcha protection explicitly requested, read the site and secret keys
|
||||
@@ -132,7 +132,7 @@ func (w *wizard) deployFaucet() {
|
||||
} else {
|
||||
fmt.Println()
|
||||
fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex())
|
||||
if w.readDefaultString("y") != "y" {
|
||||
if !w.readDefaultYesNo(true) {
|
||||
infos.node.keyJSON, infos.node.keyPass = "", ""
|
||||
}
|
||||
}
|
||||
@@ -166,7 +166,7 @@ func (w *wizard) deployFaucet() {
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
nocache = w.readDefaultYesNo(false)
|
||||
}
|
||||
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy faucet container", "err", err)
|
||||
|
@@ -20,9 +20,13 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -40,11 +44,12 @@ func (w *wizard) makeGenesis() {
|
||||
Difficulty: big.NewInt(524288),
|
||||
Alloc: make(core.GenesisAlloc),
|
||||
Config: ¶ms.ChainConfig{
|
||||
HomesteadBlock: big.NewInt(1),
|
||||
EIP150Block: big.NewInt(2),
|
||||
EIP155Block: big.NewInt(3),
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(4),
|
||||
HomesteadBlock: big.NewInt(1),
|
||||
EIP150Block: big.NewInt(2),
|
||||
EIP155Block: big.NewInt(3),
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(4),
|
||||
ConstantinopleBlock: big.NewInt(5),
|
||||
},
|
||||
}
|
||||
// Figure out which consensus engine to choose
|
||||
@@ -114,9 +119,13 @@ func (w *wizard) makeGenesis() {
|
||||
}
|
||||
break
|
||||
}
|
||||
// Add a batch of precompile balances to avoid them getting deleted
|
||||
for i := int64(0); i < 256; i++ {
|
||||
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
|
||||
fmt.Println()
|
||||
fmt.Println("Should the precompile-addresses (0x1 .. 0xff) be pre-funded with 1 wei? (advisable yes)")
|
||||
if w.readDefaultYesNo(true) {
|
||||
// Add a batch of precompile balances to avoid them getting deleted
|
||||
for i := int64(0); i < 256; i++ {
|
||||
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
|
||||
}
|
||||
}
|
||||
// Query the user for some custom extras
|
||||
fmt.Println()
|
||||
@@ -130,53 +139,138 @@ func (w *wizard) makeGenesis() {
|
||||
w.conf.flush()
|
||||
}
|
||||
|
||||
// importGenesis imports a Geth genesis spec into puppeth.
|
||||
func (w *wizard) importGenesis() {
|
||||
// Request the genesis JSON spec URL from the user
|
||||
fmt.Println()
|
||||
fmt.Println("Where's the genesis file? (local file or http/https url)")
|
||||
url := w.readURL()
|
||||
|
||||
// Convert the various allowed URLs to a reader stream
|
||||
var reader io.Reader
|
||||
|
||||
switch url.Scheme {
|
||||
case "http", "https":
|
||||
// Remote web URL, retrieve it via an HTTP client
|
||||
res, err := http.Get(url.String())
|
||||
if err != nil {
|
||||
log.Error("Failed to retrieve remote genesis", "err", err)
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
reader = res.Body
|
||||
|
||||
case "":
|
||||
// Schemaless URL, interpret as a local file
|
||||
file, err := os.Open(url.String())
|
||||
if err != nil {
|
||||
log.Error("Failed to open local genesis", "err", err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
reader = file
|
||||
|
||||
default:
|
||||
log.Error("Unsupported genesis URL scheme", "scheme", url.Scheme)
|
||||
return
|
||||
}
|
||||
// Parse the genesis file and inject it successful
|
||||
var genesis core.Genesis
|
||||
if err := json.NewDecoder(reader).Decode(&genesis); err != nil {
|
||||
log.Error("Invalid genesis spec: %v", err)
|
||||
return
|
||||
}
|
||||
log.Info("Imported genesis block")
|
||||
|
||||
w.conf.Genesis = &genesis
|
||||
w.conf.flush()
|
||||
}
|
||||
|
||||
// manageGenesis permits the modification of chain configuration parameters in
|
||||
// a genesis config and the export of the entire genesis spec.
|
||||
func (w *wizard) manageGenesis() {
|
||||
// Figure out whether to modify or export the genesis
|
||||
fmt.Println()
|
||||
fmt.Println(" 1. Modify existing fork rules")
|
||||
fmt.Println(" 2. Export genesis configuration")
|
||||
fmt.Println(" 2. Export genesis configurations")
|
||||
fmt.Println(" 3. Remove genesis configuration")
|
||||
|
||||
choice := w.read()
|
||||
switch {
|
||||
case choice == "1":
|
||||
switch choice {
|
||||
case "1":
|
||||
// Fork rule updating requested, iterate over each fork
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
|
||||
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
|
||||
fmt.Printf("Which block should EIP150 (Tangerine Whistle) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
|
||||
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
|
||||
fmt.Printf("Which block should EIP155 (Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
|
||||
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
|
||||
fmt.Printf("Which block should EIP158/161 (also Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
|
||||
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
|
||||
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock)
|
||||
w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock)
|
||||
if w.conf.Genesis.Config.PetersburgBlock == nil {
|
||||
w.conf.Genesis.Config.PetersburgBlock = w.conf.Genesis.Config.ConstantinopleBlock
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Constantinople-Fix (remove EIP-1283) come into effect? (default = %v)\n", w.conf.Genesis.Config.PetersburgBlock)
|
||||
w.conf.Genesis.Config.PetersburgBlock = w.readDefaultBigInt(w.conf.Genesis.Config.PetersburgBlock)
|
||||
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
|
||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||
|
||||
case choice == "2":
|
||||
w.conf.flush()
|
||||
|
||||
case "2":
|
||||
// Save whatever genesis configuration we currently have
|
||||
fmt.Println()
|
||||
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "err", err)
|
||||
}
|
||||
log.Info("Exported existing genesis block")
|
||||
fmt.Printf("Which folder to save the genesis specs into? (default = current)\n")
|
||||
fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network)
|
||||
|
||||
case choice == "3":
|
||||
folder := w.readDefaultString(".")
|
||||
if err := os.MkdirAll(folder, 0755); err != nil {
|
||||
log.Error("Failed to create spec folder", "folder", folder, "err", err)
|
||||
return
|
||||
}
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
|
||||
// Export the native genesis spec used by puppeth and Geth
|
||||
gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network))
|
||||
if err := ioutil.WriteFile((gethJson), out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "err", err)
|
||||
return
|
||||
}
|
||||
log.Info("Saved native genesis chain spec", "path", gethJson)
|
||||
|
||||
// Export the genesis spec used by Aleth (formerly C++ Ethereum)
|
||||
if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil {
|
||||
log.Error("Failed to create Aleth chain spec", "err", err)
|
||||
} else {
|
||||
saveGenesis(folder, w.network, "aleth", spec)
|
||||
}
|
||||
// Export the genesis spec used by Parity
|
||||
if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil {
|
||||
log.Error("Failed to create Parity chain spec", "err", err)
|
||||
} else {
|
||||
saveGenesis(folder, w.network, "parity", spec)
|
||||
}
|
||||
// Export the genesis spec used by Harmony (formerly EthereumJ
|
||||
saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
|
||||
|
||||
case "3":
|
||||
// Make sure we don't have any services running
|
||||
if len(w.conf.servers()) > 0 {
|
||||
log.Error("Genesis reset requires all services and servers torn down")
|
||||
@@ -186,8 +280,20 @@ func (w *wizard) manageGenesis() {
|
||||
|
||||
w.conf.Genesis = nil
|
||||
w.conf.flush()
|
||||
|
||||
default:
|
||||
log.Error("That's not something I can do")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file.
|
||||
func saveGenesis(folder, network, client string, spec interface{}) {
|
||||
path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
|
||||
|
||||
out, _ := json.Marshal(spec)
|
||||
if err := ioutil.WriteFile(path, out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "client", client, "err", err)
|
||||
return
|
||||
}
|
||||
log.Info("Saved genesis chain spec", "client", client, "path", path)
|
||||
}
|
||||
|
@@ -61,14 +61,14 @@ func (w *wizard) run() {
|
||||
// Make sure we have a good network name to work with fmt.Println()
|
||||
// Docker accepts hyphens in image names, but doesn't like it for container names
|
||||
if w.network == "" {
|
||||
fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
|
||||
fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)")
|
||||
for {
|
||||
w.network = w.readString()
|
||||
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
|
||||
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network {
|
||||
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
||||
break
|
||||
}
|
||||
log.Error("I also like to live dangerously, still no spaces or hyphens")
|
||||
log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters")
|
||||
}
|
||||
}
|
||||
log.Info("Administering Ethereum network", "name", w.network)
|
||||
@@ -131,7 +131,20 @@ func (w *wizard) run() {
|
||||
|
||||
case choice == "2":
|
||||
if w.conf.Genesis == nil {
|
||||
w.makeGenesis()
|
||||
fmt.Println()
|
||||
fmt.Println("What would you like to do? (default = create)")
|
||||
fmt.Println(" 1. Create new genesis from scratch")
|
||||
fmt.Println(" 2. Import already existing genesis")
|
||||
|
||||
choice := w.read()
|
||||
switch {
|
||||
case choice == "" || choice == "1":
|
||||
w.makeGenesis()
|
||||
case choice == "2":
|
||||
w.importGenesis()
|
||||
default:
|
||||
log.Error("That's not something I can do")
|
||||
}
|
||||
} else {
|
||||
w.manageGenesis()
|
||||
}
|
||||
@@ -149,7 +162,6 @@ func (w *wizard) run() {
|
||||
} else {
|
||||
w.manageComponents()
|
||||
}
|
||||
|
||||
default:
|
||||
log.Error("That's not something I can do")
|
||||
}
|
||||
|
@@ -82,7 +82,6 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
||||
logger.Info("Starting remote server health-check")
|
||||
|
||||
stat := &serverStat{
|
||||
address: client.address,
|
||||
services: make(map[string]map[string]string),
|
||||
}
|
||||
if client == nil {
|
||||
@@ -94,6 +93,8 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
||||
}
|
||||
client = conn
|
||||
}
|
||||
stat.address = client.address
|
||||
|
||||
// Client connected one way or another, run health-checks
|
||||
logger.Debug("Checking for nginx availability")
|
||||
if infos, err := checkNginx(client, w.network); err != nil {
|
||||
@@ -214,6 +215,9 @@ func (stats serverStats) render() {
|
||||
if len(stat.address) > len(separator[1]) {
|
||||
separator[1] = strings.Repeat("-", len(stat.address))
|
||||
}
|
||||
if len(stat.failure) > len(separator[1]) {
|
||||
separator[1] = strings.Repeat("-", len(stat.failure))
|
||||
}
|
||||
for service, configs := range stat.services {
|
||||
if len(service) > len(separator[2]) {
|
||||
separator[2] = strings.Repeat("-", len(service))
|
||||
@@ -250,7 +254,11 @@ func (stats serverStats) render() {
|
||||
sort.Strings(services)
|
||||
|
||||
if len(services) == 0 {
|
||||
table.Append([]string{server, stats[server].address, "", "", ""})
|
||||
if stats[server].failure != "" {
|
||||
table.Append([]string{server, stats[server].failure, "", "", ""})
|
||||
} else {
|
||||
table.Append([]string{server, stats[server].address, "", "", ""})
|
||||
}
|
||||
}
|
||||
for j, service := range services {
|
||||
// Add an empty line between all services
|
||||
|
@@ -62,14 +62,14 @@ func (w *wizard) manageServers() {
|
||||
}
|
||||
}
|
||||
|
||||
// makeServer reads a single line from stdin and interprets it as a hostname to
|
||||
// connect to. It tries to establish a new SSH session and also executing some
|
||||
// baseline validations.
|
||||
// makeServer reads a single line from stdin and interprets it as
|
||||
// username:identity@hostname to connect to. It tries to establish a
|
||||
// new SSH session and also executing some baseline validations.
|
||||
//
|
||||
// If connection succeeds, the server is added to the wizards configs!
|
||||
func (w *wizard) makeServer() string {
|
||||
fmt.Println()
|
||||
fmt.Println("Please enter remote server's address:")
|
||||
fmt.Println("What is the remote server's address ([username[:identity]@]hostname[:port])?")
|
||||
|
||||
// Read and dial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
@@ -87,7 +87,7 @@ func (w *wizard) makeServer() string {
|
||||
return input
|
||||
}
|
||||
|
||||
// selectServer lists the user all the currnetly known servers to choose from,
|
||||
// selectServer lists the user all the currently known servers to choose from,
|
||||
// also granting the option to add a new one.
|
||||
func (w *wizard) selectServer() string {
|
||||
// List the available server to the user and wait for a choice
|
||||
@@ -115,7 +115,7 @@ func (w *wizard) selectServer() string {
|
||||
// manageComponents displays a list of network components the user can tear down
|
||||
// and an option
|
||||
func (w *wizard) manageComponents() {
|
||||
// List all the componens we can tear down, along with an entry to deploy a new one
|
||||
// List all the components we can tear down, along with an entry to deploy a new one
|
||||
fmt.Println()
|
||||
|
||||
var serviceHosts, serviceNames []string
|
||||
|
@@ -41,12 +41,12 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str
|
||||
// Reverse proxy is not running, offer to deploy a new one
|
||||
fmt.Println()
|
||||
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
|
||||
if w.readDefaultString("y") == "y" {
|
||||
if w.readDefaultYesNo(true) {
|
||||
nocache := false
|
||||
if proxy != nil {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
nocache = w.readDefaultYesNo(false)
|
||||
}
|
||||
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
|
||||
log.Error("Failed to deploy reverse-proxy", "err", err)
|
||||
|
@@ -50,7 +50,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
if boot {
|
||||
infos = &nodeInfos{port: 30303, peersTotal: 512, peersLight: 256}
|
||||
} else {
|
||||
infos = &nodeInfos{port: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||
infos = &nodeInfos{port: 30303, peersTotal: 50, peersLight: 0, gasTarget: 7.5, gasLimit: 10, gasPrice: 1}
|
||||
}
|
||||
}
|
||||
existed := err == nil
|
||||
@@ -126,7 +126,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
} else {
|
||||
fmt.Println()
|
||||
fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex())
|
||||
if w.readDefaultString("y") != "y" {
|
||||
if !w.readDefaultYesNo(true) {
|
||||
infos.keyJSON, infos.keyPass = "", ""
|
||||
}
|
||||
}
|
||||
@@ -152,6 +152,10 @@ func (w *wizard) deployNode(boot bool) {
|
||||
fmt.Printf("What gas limit should empty blocks target (MGas)? (default = %0.3f)\n", infos.gasTarget)
|
||||
infos.gasTarget = w.readDefaultFloat(infos.gasTarget)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas limit should full blocks target (MGas)? (default = %0.3f)\n", infos.gasLimit)
|
||||
infos.gasLimit = w.readDefaultFloat(infos.gasLimit)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas price should the signer require (GWei)? (default = %0.3f)\n", infos.gasPrice)
|
||||
infos.gasPrice = w.readDefaultFloat(infos.gasPrice)
|
||||
@@ -161,7 +165,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
nocache = w.readDefaultYesNo(false)
|
||||
}
|
||||
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy Ethereum node container", "err", err)
|
||||
|