Compare commits
641 Commits
Author | SHA1 | Date | |
---|---|---|---|
88eeb817e4 | |||
b777126bd2 | |||
89d78dcfcf | |||
1cf142c193 | |||
3e29325410 | |||
4dc98c3dbd | |||
9caad645e2 | |||
6cb76ac326 | |||
0001e5c0a1 | |||
ab32d13da1 | |||
cefe46e981 | |||
f4d70e78b6 | |||
d130adf582 | |||
1e6285e64e | |||
e3c90c3807 | |||
85750307aa | |||
0ee4a5e799 | |||
55cb9cf681 | |||
d3af7e0653 | |||
729a24d557 | |||
55b92c16da | |||
835bacce4f | |||
ccb7b1a698 | |||
85dbdeb4c3 | |||
397f9f11c5 | |||
a11986ad1d | |||
a4d373f0af | |||
52eea215ce | |||
6f48aafd3a | |||
2d94c09aee | |||
9699b61679 | |||
8865bfbd59 | |||
5f80c1d37d | |||
f616f5dec6 | |||
db1003b5f8 | |||
f52ff777b7 | |||
4314a29953 | |||
e560fff840 | |||
5ac747ea7d | |||
f522dc1e18 | |||
486812bf54 | |||
7df8f76df1 | |||
bbe4990e80 | |||
a5baaf790d | |||
0a36ed1b8c | |||
b7ad240375 | |||
2cc71f2d55 | |||
3125c74681 | |||
d5b1dee8d6 | |||
4b33a2a1b8 | |||
58e6a5c281 | |||
7eb61074ab | |||
9b2edbaa9b | |||
e8659b45c7 | |||
a9553cb401 | |||
800c409698 | |||
b6f484ddee | |||
3c39fee5a8 | |||
560f34d1f6 | |||
dbda50941a | |||
f1e68ac25c | |||
95029b9b05 | |||
a789bf4761 | |||
d2e7ffa8b9 | |||
0914519f6a | |||
43cd5f3730 | |||
d396a5f45a | |||
76a7071dba | |||
133baa8ce6 | |||
5df3510fde | |||
357339273f | |||
2500881e0b | |||
0013bfff4e | |||
f13498b428 | |||
b567138170 | |||
653982cae5 | |||
605f4906ba | |||
d27f24e312 | |||
c9c1cb5c9c | |||
1cc6493ccf | |||
ae47862be2 | |||
8590184df7 | |||
d840bbab08 | |||
63314de516 | |||
c47a6e12c7 | |||
7937c45ba4 | |||
813b11ac56 | |||
ad6883b66a | |||
a8f4c4e297 | |||
6d68e94e4e | |||
5dd40d7d88 | |||
3f58177670 | |||
edfd65b115 | |||
51da66ec84 | |||
ba36308d69 | |||
ee450b2dd0 | |||
84b28fb261 | |||
1586b86797 | |||
8f065e487e | |||
953eadd983 | |||
a4a792facd | |||
055f808f98 | |||
0404878445 | |||
053907f8a4 | |||
f76dcc1f05 | |||
823bc138cd | |||
18f746b025 | |||
c81adaf901 | |||
2d12ddd0f6 | |||
bee36cc8d0 | |||
f7aee67023 | |||
c021727009 | |||
6653136e1d | |||
06c40c807c | |||
9b262b4915 | |||
cc2d3ecfd7 | |||
92743499bf | |||
aa6a00a03e | |||
bd19f7c4cb | |||
988bf65ba4 | |||
d5b03bd824 | |||
6a72dab111 | |||
56e8319a6d | |||
aed1e51ef1 | |||
f4278d61df | |||
a5c3ae3cef | |||
05c052e212 | |||
dc05bb648a | |||
800b65b2f6 | |||
ae1a0f57c5 | |||
df7c44bd0c | |||
3e29cfd712 | |||
202031538f | |||
29ff1b925d | |||
5a91db6e62 | |||
94ba700e58 | |||
1964c6ec29 | |||
4dd6591bfd | |||
163217815b | |||
37c182cd5d | |||
0c68f27ac3 | |||
5fb8da9b35 | |||
74d9fd1e4f | |||
e71206c578 | |||
0141c80238 | |||
ed928cfdf7 | |||
2fd319ab7a | |||
7813a1decd | |||
93e4ed1f75 | |||
a70f31b3da | |||
2d25227d0a | |||
fc7bfd0f67 | |||
2996291b37 | |||
3e80b9231c | |||
78231a8682 | |||
ace711e7f1 | |||
c9cbc39ec9 | |||
606a392d50 | |||
c67596ceb4 | |||
9a42cc7555 | |||
2e5ef2a802 | |||
8c8e2c4b2b | |||
0578801f99 | |||
6141e1410a | |||
4fc86807ff | |||
d2a2eba69e | |||
156387aba4 | |||
8a8384e674 | |||
58ae9ab34f | |||
3dfef813bf | |||
3aae98c8be | |||
8d32441b96 | |||
26acd6aafa | |||
7373163bed | |||
a21409e97e | |||
9fae5aacc2 | |||
42aaacf520 | |||
36a36d1c83 | |||
2d3a906d55 | |||
48c0845359 | |||
10b1895357 | |||
f1e932c90a | |||
269db1710e | |||
e2b5cd6d47 | |||
2928c5d103 | |||
2324eb9ff9 | |||
b7a32f01c0 | |||
967320a091 | |||
4779858dd4 | |||
c7cdbc98e5 | |||
c78fd2b36d | |||
12a3b1ba6a | |||
18be7a7966 | |||
56c7e4a66c | |||
486168b796 | |||
074c41556f | |||
10d60288e8 | |||
77d42654dc | |||
07243dc87f | |||
429802a138 | |||
8da2e1b2f7 | |||
324cfd40f0 | |||
64cec764b9 | |||
ce17de7d25 | |||
417f0e41fa | |||
d6d032dd49 | |||
357a00d2bc | |||
276815bd33 | |||
4a72c2b054 | |||
9d89fb5c35 | |||
ad7b113944 | |||
f33688361c | |||
36627fb8b3 | |||
f27d001b7a | |||
d9919b99d2 | |||
439fd30840 | |||
e66b5d09db | |||
d5d06e6be0 | |||
97f2bcff69 | |||
427c78d891 | |||
5e43304eca | |||
d34b9ba306 | |||
fac854eb9d | |||
431a228402 | |||
300b33a20e | |||
759c0e0b03 | |||
bbc549f592 | |||
bac4aec16f | |||
4ca352a344 | |||
bfcfbab818 | |||
9222bc2b35 | |||
f562ed4cc8 | |||
c4a096d8d4 | |||
5e89bd8868 | |||
7080fb9b37 | |||
a32f34f131 | |||
f342a50a76 | |||
58ef02f02b | |||
1da1667920 | |||
b762319fc5 | |||
6a6c5f196a | |||
22cddcb1a6 | |||
63813fe69f | |||
adcd2f14a5 | |||
eb1acaf927 | |||
9ef9969d29 | |||
40b7c11262 | |||
d195dce5d1 | |||
816bf6ebdd | |||
ed53a70b5c | |||
4e4a21f9b7 | |||
c5460e7fee | |||
cf8eb7700b | |||
13bc3f8094 | |||
9575afc8fa | |||
1e80044e93 | |||
e09f517094 | |||
1eb40c3fe0 | |||
ee7f15eff1 | |||
a9b82cf95b | |||
5cc252d471 | |||
a75086287c | |||
a5fb3fc220 | |||
28d1f7c5e7 | |||
59de1b3b62 | |||
84b6120983 | |||
3b9dc50541 | |||
2521f75c18 | |||
965204b8e0 | |||
6660e93c39 | |||
4fd7526852 | |||
903a8a3196 | |||
7e364d01c2 | |||
bfe179e911 | |||
af84dff9ef | |||
97e17f9b32 | |||
b4b4d6b00d | |||
1f9d0fc284 | |||
1a47b1cd86 | |||
9c0b80ea1b | |||
288c9751c1 | |||
ad3c8fb812 | |||
19722fceb3 | |||
0541431ea8 | |||
dd78184f8f | |||
af6a8f5fac | |||
405e39fb9f | |||
3ee702a922 | |||
cb50877bbf | |||
84885d79d5 | |||
57a9996921 | |||
00e45ec935 | |||
f98bfda6f9 | |||
01ab1d1369 | |||
e970c58330 | |||
c970bbea4f | |||
f12c6c1ed1 | |||
2ac50177a6 | |||
3757754c89 | |||
754c65c066 | |||
f6e26f6c8c | |||
d08d9322d2 | |||
65a52a4145 | |||
d5c889d6b0 | |||
445e6668c2 | |||
766062b2cc | |||
068666b0e3 | |||
09ae61651a | |||
e951f8d0ed | |||
e078ba1dde | |||
16ddd001f6 | |||
72312ad615 | |||
3442f36f8a | |||
b2672fd623 | |||
16af67d5e1 | |||
627bc7e3a9 | |||
f5b0d13f08 | |||
3aedb81d48 | |||
f8ad3aca25 | |||
a8394317c7 | |||
ffbbdd46e8 | |||
f37f83fd12 | |||
de04563f18 | |||
894549f002 | |||
fc46a0d441 | |||
6eb50450ec | |||
79a6b4b596 | |||
db8011f4f3 | |||
8dfe0affd4 | |||
450f1d2867 | |||
7678af6300 | |||
217931479b | |||
e5bad7594f | |||
de9d8cd849 | |||
6deaf649ef | |||
a91236012d | |||
a0514eb2ae | |||
230df0ec0c | |||
2f08b12753 | |||
efb4988d10 | |||
6ed29b3653 | |||
1018807db9 | |||
0954ea19e8 | |||
b26c07b788 | |||
eb24f3df84 | |||
3d40ca86b0 | |||
d836dfff14 | |||
a4fe11fad2 | |||
9d91cca73c | |||
0a16d09e1f | |||
068f12fd6f | |||
063f616a19 | |||
87827b2330 | |||
f5aaf7ff28 | |||
6e42989309 | |||
d67ad70443 | |||
655e3bc418 | |||
659e87703b | |||
2de999fb61 | |||
a12428a5b8 | |||
3d8fc8a4a8 | |||
ef7196cec2 | |||
a61904b2dc | |||
aac580686f | |||
efad193180 | |||
193dbb1794 | |||
c11abf88b7 | |||
2f705b5b55 | |||
839ff51b9a | |||
8ef097bf6f | |||
c372a39dd3 | |||
c5a7df9221 | |||
f71a23a72a | |||
41eba7d1c7 | |||
9918539229 | |||
e907c0e650 | |||
d3e3f51330 | |||
c9d6c39c31 | |||
05acd4b29f | |||
a7f33b5014 | |||
d7f37a703e | |||
c92f95e0b8 | |||
fa20963b93 | |||
50f1ec0374 | |||
76b1c2baf0 | |||
767a0f9384 | |||
d44e0b7cd8 | |||
3670d3fd7a | |||
cb2efd530f | |||
79829c98db | |||
d2cef8ed9b | |||
17a8b0f783 | |||
3acfe42622 | |||
d1cbccd9ba | |||
504160b11f | |||
b21fd27360 | |||
8df79a3559 | |||
ecb343c23b | |||
7e48e5859d | |||
57a25de910 | |||
24354ccd6a | |||
71f7a7243b | |||
17e7667da4 | |||
5d2f488004 | |||
ab4bdd59db | |||
d5abff82e0 | |||
611d2fa75d | |||
89b30b4853 | |||
9b71573965 | |||
77c3a1f372 | |||
08e73e5366 | |||
2e8349196e | |||
2a935ec15f | |||
7bf1720a76 | |||
ba58589656 | |||
5b8d963ee2 | |||
45ff1f2379 | |||
0d24e758b2 | |||
cbc7b3b0b7 | |||
111a86f3ec | |||
bab3502260 | |||
ad186b8652 | |||
3023691487 | |||
92afe9020f | |||
7d6cdf83dc | |||
64e5684d45 | |||
4d97d3bdb1 | |||
18cba86f77 | |||
914b022663 | |||
6e908a1be8 | |||
5402434218 | |||
6793c10860 | |||
c856d8bdbd | |||
a6ad660e5e | |||
b1a0abc7a6 | |||
3fbe7f0bb3 | |||
41fec5bd5b | |||
44cced3ffc | |||
498d025bd3 | |||
8a69ea971f | |||
b1ca74ed30 | |||
6d941c82fd | |||
77fb4230d6 | |||
a5419fe79e | |||
1607891b29 | |||
75b25e33f6 | |||
d08517db8c | |||
65a9658b13 | |||
3205361163 | |||
58887c591b | |||
657fbfbefa | |||
36bf7ad694 | |||
679e7863cb | |||
a7aa7e172b | |||
729cb5eec6 | |||
addbdcb660 | |||
f142451a33 | |||
124287a0ea | |||
9da366c193 | |||
cb0a1a94a7 | |||
dbaebe101c | |||
8509dcb8a0 | |||
7b5cdf6adf | |||
7207a91aa5 | |||
55ed52a71d | |||
cd4927053e | |||
982e6c4916 | |||
b58338b066 | |||
a9c38fb0df | |||
9bba27a3aa | |||
e655cba5bd | |||
bcfd379f32 | |||
47ae57610a | |||
5ed39de8c5 | |||
66abe45ea1 | |||
425b4fe6dd | |||
93669ab1fc | |||
16b2d41dd6 | |||
30b3862770 | |||
7e7cbec8a1 | |||
4ac15e68cf | |||
a7ed33b552 | |||
9cc7265b05 | |||
d567799d43 | |||
530c542002 | |||
7aa4d401f7 | |||
a8b8c2f438 | |||
241a05fc52 | |||
40737e9efa | |||
217828a849 | |||
69f1e487b3 | |||
2b2b2cac1f | |||
ee72714c08 | |||
83a96c557d | |||
892e425d87 | |||
5298e3872c | |||
c77ed82caa | |||
2d0224b64e | |||
68b099c277 | |||
283f3ff620 | |||
9a95257c40 | |||
bcfadd6085 | |||
d4ea1ec6ad | |||
a0f0e199b7 | |||
5a0c2a0c1d | |||
ce027da236 | |||
37b048effb | |||
92a5a51632 | |||
230f014b9e | |||
3f33f4d3a9 | |||
47fc0a5cfa | |||
c86b0d8a85 | |||
8cda974552 | |||
3f1399cb0d | |||
99655206c8 | |||
3037eb8d4f | |||
31ebdbc77f | |||
6e1ce5ab6c | |||
aa8dfac313 | |||
c6da2ab0de | |||
f0291dc5d3 | |||
994f8c325a | |||
ae5a6419d4 | |||
85feca305b | |||
7b71a331c6 | |||
032127b591 | |||
91159ea8e3 | |||
d5a9ee97f2 | |||
900933bbcc | |||
aeddd8c95a | |||
be77bdef12 | |||
f3afe5c99c | |||
aab9d9229c | |||
a714b8052d | |||
e873c93be3 | |||
cb5c337540 | |||
4d14372d5e | |||
4b8d1abb5d | |||
d63ada489a | |||
d4e284b7c5 | |||
21cb56d808 | |||
e1aa247548 | |||
638108e9d5 | |||
f655b3f0fd | |||
6a2be8b0ca | |||
ad0482be73 | |||
4522e85ac4 | |||
36e73cada4 | |||
8e5ac1338f | |||
cb6cf189b4 | |||
8ed05c27f2 | |||
9883ca8549 | |||
dc91698b3a | |||
b4e00275b2 | |||
03978ac5a5 | |||
33a68ec9c3 | |||
c78b658a92 | |||
6b988155e1 | |||
4677cdb4c2 | |||
96c23110ae | |||
a4e2ee99d3 | |||
9a9fa5594d | |||
1c73f3e100 | |||
75234e28e5 | |||
b20edaca26 | |||
62cb2cd13c | |||
bfea3572ea | |||
acf64f8476 | |||
b28ec430e4 | |||
7b68628e6c | |||
b584174d67 | |||
49e2cc6593 | |||
36ab7e0600 | |||
ad0997e15f | |||
8cdf406dd3 | |||
2d618722e6 | |||
c0afbae940 | |||
ed86d8d1fc | |||
c1441a2a8f | |||
b557b3170e | |||
9493de4443 | |||
175ffd9054 | |||
66c78cb819 | |||
962e41f9ca | |||
fd5f8a8046 | |||
d61191db40 | |||
0139236464 | |||
c5b2db72a2 | |||
303a1207c1 | |||
1078c86100 | |||
c67e9fabc4 | |||
ad98f14fc1 | |||
ec4745d174 | |||
0e53939e00 | |||
8d1cd3ae5c | |||
18fe0f0c44 | |||
3b89708653 | |||
23bf7b8d63 | |||
a8817fb973 | |||
8b14eb9020 | |||
25ee36bbba | |||
19693a85cd | |||
c7ba1994ac | |||
9aab0b9388 | |||
492b7d5ef9 | |||
352de7929b | |||
9f5d3f0ee5 | |||
691a3c6087 | |||
268e04cb4a | |||
7605f1f540 | |||
b543aee24e | |||
a74a64084d | |||
743b8cddf9 | |||
74774dd44f | |||
a8d4b1c90a | |||
3a6cdf02e5 | |||
56667e17c9 | |||
1e6b789bfa | |||
a61ddb6f61 | |||
62e12e3af5 | |||
93be7370d9 | |||
130c0b484d | |||
974848310c | |||
49494be653 | |||
0e2722c638 | |||
66946a4680 | |||
24d887a38a | |||
73e99cc513 | |||
e6db701c17 | |||
50fa577af8 | |||
8636ef5e24 | |||
62040cef56 | |||
8731b6279f | |||
ae66c0e497 | |||
c67703e7a3 | |||
b1771b92ec | |||
5f31444300 | |||
729cc4e04f | |||
2ed3e2160d | |||
8bbf6e3f54 | |||
d7fa40087c |
@ -3,3 +3,16 @@ root: ./docs/src
|
||||
structure:
|
||||
readme: introduction.md
|
||||
summary: SUMMARY.md
|
||||
|
||||
redirects:
|
||||
wallet: ./wallet-guide/README.md
|
||||
wallet/app-wallets: ./wallet-guide/apps.md
|
||||
wallet/app-wallets/trust-wallet: ./wallet-guide/trust-wallet.md
|
||||
wallet/app-wallets/ledger-live: ./wallet-guide/ledger-live.md
|
||||
wallet/cli-wallets: ./wallet-guide/cli.md
|
||||
wallet/cli-wallets/paper-wallet: ./paper-wallet/README.md
|
||||
wallet/cli-wallets/paper-wallet/paper-wallet-usage: ./paper-wallet/paper-wallet-usage.md
|
||||
wallet/cli-wallets/remote-wallet: ./hardware-wallets/README.md
|
||||
wallet/cli-wallets/remote-wallet/ledger: ./hardware-wallets/ledger.md
|
||||
wallet/cli-wallets/file-system-wallet: ./file-system-wallet/README.md
|
||||
wallet/support: ./wallet-guide/support.md
|
36
.mergify.yml
36
.mergify.yml
@ -1,9 +1,40 @@
|
||||
# Validate your changes with:
|
||||
#
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate/
|
||||
#
|
||||
# https://doc.mergify.io/
|
||||
pull_request_rules:
|
||||
- name: automatic merge (squash) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
# Join the dont-squash-my-commits group if you won't like your commits squashed
|
||||
- name: automatic merge (rebase) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author=@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
- name: remove automerge label on CI failure
|
||||
conditions:
|
||||
- label=automerge
|
||||
- "#status-failure!=0"
|
||||
actions:
|
||||
label:
|
||||
remove:
|
||||
- automerge
|
||||
comment:
|
||||
message: automerge label removed due to a CI failure
|
||||
- name: remove outdated reviews
|
||||
conditions:
|
||||
- base=master
|
||||
@ -21,7 +52,6 @@ pull_request_rules:
|
||||
- automerge
|
||||
- name: v1.0 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v1.0
|
||||
actions:
|
||||
backport:
|
||||
@ -30,7 +60,6 @@ pull_request_rules:
|
||||
- v1.0
|
||||
- name: v1.1 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v1.1
|
||||
actions:
|
||||
backport:
|
||||
@ -39,7 +68,6 @@ pull_request_rules:
|
||||
- v1.1
|
||||
- name: v1.2 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v1.2
|
||||
actions:
|
||||
backport:
|
||||
|
@ -13,11 +13,14 @@ script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^v\d+\.\d+/
|
||||
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
on_success: change
|
||||
|
6946
Cargo.lock
generated
6946
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@ -5,9 +5,6 @@ members = [
|
||||
"bench-tps",
|
||||
"accounts-bench",
|
||||
"banking-bench",
|
||||
"chacha",
|
||||
"chacha-cuda",
|
||||
"chacha-sys",
|
||||
"cli-config",
|
||||
"client",
|
||||
"core",
|
||||
@ -27,10 +24,12 @@ members = [
|
||||
"logger",
|
||||
"log-analyzer",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"notifier",
|
||||
"programs/bpf_loader",
|
||||
"programs/budget",
|
||||
"programs/btc_spv",
|
||||
@ -41,22 +40,21 @@ members = [
|
||||
"programs/noop",
|
||||
"programs/ownable",
|
||||
"programs/stake",
|
||||
"programs/storage",
|
||||
"programs/vest",
|
||||
"programs/vote",
|
||||
"archiver",
|
||||
"archiver-lib",
|
||||
"archiver-utils",
|
||||
"remote-wallet",
|
||||
"ramp-tps",
|
||||
"runtime",
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"version",
|
||||
"vote-signer",
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
|
125
README.md
125
README.md
@ -1,23 +1,17 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
[](https://crates.io/crates/solana-core)
|
||||
[](https://docs.rs/solana-core)
|
||||
[](https://buildkite.com/solana-labs/solana/builds?branch=master)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
Blockchain Rebuilt for Scale
|
||||
===
|
||||
# Building
|
||||
|
||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||
up to 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
Read all about it at [Solana: Blockchain Rebuilt for Scale](https://docs.solana.com/v/master).
|
||||
|
||||
Developing
|
||||
===
|
||||
|
||||
Building
|
||||
---
|
||||
|
||||
Install rustc, cargo and rustfmt:
|
||||
## **1. Install rustc, cargo and rustfmt.**
|
||||
|
||||
```bash
|
||||
$ curl https://sh.rustup.rs -sSf | sh
|
||||
@ -38,112 +32,39 @@ $ sudo apt-get update
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang
|
||||
```
|
||||
|
||||
Download the source code:
|
||||
## **2. Download the source code.**
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
Build
|
||||
## **3. Build.**
|
||||
|
||||
```bash
|
||||
$ cargo build
|
||||
```
|
||||
|
||||
Then to run a minimal local cluster
|
||||
## **4. Run a minimal local cluster.**
|
||||
```bash
|
||||
$ ./run.sh
|
||||
```
|
||||
|
||||
Testing
|
||||
---
|
||||
# Testing
|
||||
|
||||
Run the test suite:
|
||||
**Run the test suite:**
|
||||
|
||||
```bash
|
||||
$ cargo test
|
||||
```
|
||||
|
||||
Local Testnet
|
||||
---
|
||||
|
||||
Start your own testnet locally, instructions are in the online docs [Solana: Blockchain Rebuild for Scale: Getting Started](https://docs.solana.com/building-from-source).
|
||||
|
||||
Remote Testnets
|
||||
---
|
||||
### Starting a local testnet
|
||||
Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/bench-tps).
|
||||
|
||||
### Accessing the remote testnet
|
||||
* `testnet` - public stable testnet accessible via devnet.solana.com. Runs 24/7
|
||||
|
||||
|
||||
## Deploy process
|
||||
|
||||
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
|
||||
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
|
||||
Each testnet can be manually manipulated from buildkite as well.
|
||||
|
||||
## How do I reset the testnet?
|
||||
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
|
||||
and when prompted select the desired testnet
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
Increase the TX rate by increasing the number of cores on the client machine which is running
|
||||
`bench-tps` or run multiple clients. Decrease by lowering cores or using the rayon env
|
||||
variable `RAYON_NUM_THREADS=<xx>`
|
||||
|
||||
## How can I test a change on the testnet?
|
||||
|
||||
Currently, a merged PR is the only way to test a change on the testnet. But you
|
||||
can run your own testnet using the scripts in the `net/` directory.
|
||||
|
||||
## Adjusting the number of clients or validators on the testnet
|
||||
Edit `ci/testnet-manager.sh`
|
||||
|
||||
|
||||
## Metrics Server Maintenance
|
||||
Sometimes the dashboard becomes unresponsive. This happens due to glitch in the metrics server.
|
||||
The current solution is to reset the metrics server. Use the following steps.
|
||||
|
||||
1. The server is hosted in a GCP VM instance. Check if the VM instance is down by trying to SSH
|
||||
into it from the GCP console. The name of the VM is ```metrics-solana-com```.
|
||||
2. If the VM is inaccessible, reset it from the GCP console.
|
||||
3. Once VM is up (or, was already up), the metrics services can be restarted from build automation.
|
||||
1. Navigate to https://buildkite.com/solana-labs/metrics-dot-solana-dot-com in your web browser
|
||||
2. Click on ```New Build```
|
||||
3. This will show a pop up dialog. Click on ```options``` drop down.
|
||||
4. Type in ```FORCE_START=true``` in ```Environment Variables``` text box.
|
||||
5. Click ```Create Build```
|
||||
6. This will restart the metrics services, and the dashboards should be accessible afterwards.
|
||||
|
||||
## Debugging Testnet
|
||||
Testnet may exhibit different symptoms of failures. Primary statistics to check are
|
||||
1. Rise in Confirmation Time
|
||||
2. Nodes are not voting
|
||||
3. Panics, and OOM notifications
|
||||
|
||||
Check the following if there are any signs of failure.
|
||||
1. Did testnet deployment fail?
|
||||
1. View buildkite logs for the last deployment: https://buildkite.com/solana-labs/testnet-management
|
||||
2. Use the relevant branch
|
||||
3. If the deployment failed, look at the build logs. The build artifacts for each remote node is uploaded.
|
||||
It's a good first step to triage from these logs.
|
||||
2. You may have to log into remote node if the deployment succeeded, but something failed during runtime.
|
||||
1. Get the private key for the testnet deployment from ```metrics-solana-com``` GCP instance.
|
||||
2. SSH into ```metrics-solana-com``` using GCP console and do the following.
|
||||
```bash
|
||||
sudo bash
|
||||
cd ~buildkite-agent/.ssh
|
||||
ls
|
||||
```
|
||||
3. Copy the relevant private key to your local machine
|
||||
4. Find the public IP address of the AWS instance for the remote node using AWS console
|
||||
5. ```ssh -i <private key file> ubuntu@<ip address of remote node>```
|
||||
6. The logs are in ```~solana\solana``` folder
|
||||
|
||||
|
||||
Benchmarking
|
||||
---
|
||||
# Benchmarking
|
||||
|
||||
First install the nightly build of rustc. `cargo bench` requires use of the
|
||||
unstable features only available in the nightly build.
|
||||
@ -158,13 +79,11 @@ Run the benchmarks:
|
||||
$ cargo +nightly bench
|
||||
```
|
||||
|
||||
Release Process
|
||||
---
|
||||
# Release Process
|
||||
|
||||
The release process for this project is described [here](RELEASE.md).
|
||||
|
||||
|
||||
Code coverage
|
||||
---
|
||||
# Code coverage
|
||||
|
||||
To generate code coverage statistics:
|
||||
|
||||
@ -173,7 +92,6 @@ $ scripts/coverage.sh
|
||||
$ open target/cov/lcov-local/index.html
|
||||
```
|
||||
|
||||
|
||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
|
||||
@ -186,7 +104,6 @@ better way to solve the same problem, a Pull Request with your solution would mo
|
||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||
send us that patch!
|
||||
|
||||
Disclaimer
|
||||
===
|
||||
# Disclaimer
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
@ -116,7 +116,8 @@ There are three release channels that map to branches as follows:
|
||||
|
||||
1. After the new release has been tagged, update the Cargo.toml files on **release branch** to the next semantic version (e.g. 0.9.0 -> 0.9.1) with:
|
||||
```
|
||||
scripts/increment-cargo-version.sh patch
|
||||
$ scripts/increment-cargo-version.sh patch
|
||||
$ ./scripts/cargo-for-all-lock-files.sh tree
|
||||
```
|
||||
1. Rebuild to get an updated version of `Cargo.lock`:
|
||||
```
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.1.0"
|
||||
version = "1.2.8"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,10 +10,13 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
rand = "0.6.5"
|
||||
clap = "2.33.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-measure = { path = "../measure", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,9 +1,11 @@
|
||||
use clap::{value_t, App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::accounts::{create_test_accounts, update_accounts, Accounts};
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts, Accounts},
|
||||
accounts_index::Ancestors,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
@ -76,7 +78,7 @@ fn main() {
|
||||
num_slots,
|
||||
create_time
|
||||
);
|
||||
let mut ancestors: HashMap<u64, usize> = vec![(0, 0)].into_iter().collect();
|
||||
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
||||
for i in 1..num_slots {
|
||||
ancestors.insert(i as u64, i - 1);
|
||||
accounts.add_root(i as u64);
|
||||
|
@ -1,40 +0,0 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.1.0"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_lib"
|
@ -1,942 +0,0 @@
|
||||
use crate::result::ArchiverError;
|
||||
use crossbeam_channel::unbounded;
|
||||
use rand::{thread_rng, Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_archiver_utils::sample_file;
|
||||
use solana_chacha::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
|
||||
use solana_client::{
|
||||
rpc_client::RpcClient, rpc_request::RpcRequest, rpc_response::RpcStorageTurn,
|
||||
thin_client::ThinClient,
|
||||
};
|
||||
use solana_core::{
|
||||
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
|
||||
cluster_slots::ClusterSlots,
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::GossipService,
|
||||
repair_service,
|
||||
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
|
||||
serve_repair::ServeRepair,
|
||||
shred_fetch_stage::ShredFetchStage,
|
||||
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
|
||||
storage_stage::NUM_STORAGE_SAMPLES,
|
||||
window_service::WindowService,
|
||||
};
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
|
||||
};
|
||||
use solana_net_utils::bind_in_range;
|
||||
use solana_perf::packet::Packets;
|
||||
use solana_perf::packet::{limited_deserialize, PACKET_DATA_SIZE};
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_sdk::packet::Packet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
client::{AsyncClient, SyncClient},
|
||||
clock::{get_complete_segment_from_slot, get_segment_from_slot, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
signature::{Keypair, Signature, Signer},
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
transport::TransportError,
|
||||
};
|
||||
use solana_storage_program::{
|
||||
storage_contract::StorageContract,
|
||||
storage_instruction::{self, StorageAccountType},
|
||||
};
|
||||
use solana_streamer::streamer::{receiver, responder, PacketReceiver};
|
||||
use std::{
|
||||
io::{self, ErrorKind},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
||||
path::{Path, PathBuf},
|
||||
result,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, Sender},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{sleep, spawn, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
type Result<T> = std::result::Result<T, ArchiverError>;
|
||||
|
||||
static ENCRYPTED_FILENAME: &str = "ledger.enc";
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum ArchiverRequest {
|
||||
GetSlotHeight(SocketAddr),
|
||||
}
|
||||
|
||||
pub struct Archiver {
|
||||
thread_handles: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
// Shared Archiver Meta struct used internally
|
||||
#[derive(Default)]
|
||||
struct ArchiverMeta {
|
||||
slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
ledger_path: PathBuf,
|
||||
signature: Signature,
|
||||
ledger_data_file_encrypted: PathBuf,
|
||||
sampling_offsets: Vec<u64>,
|
||||
blockhash: Hash,
|
||||
sha_state: Hash,
|
||||
num_chacha_blocks: usize,
|
||||
client_commitment: CommitmentConfig,
|
||||
}
|
||||
|
||||
fn get_slot_from_signature(
|
||||
signature: &Signature,
|
||||
storage_turn: u64,
|
||||
slots_per_segment: u64,
|
||||
) -> u64 {
|
||||
let signature_vec = signature.as_ref();
|
||||
let mut segment_index = u64::from(signature_vec[0])
|
||||
| (u64::from(signature_vec[1]) << 8)
|
||||
| (u64::from(signature_vec[1]) << 16)
|
||||
| (u64::from(signature_vec[2]) << 24);
|
||||
let max_segment_index =
|
||||
get_complete_segment_from_slot(storage_turn, slots_per_segment).unwrap();
|
||||
segment_index %= max_segment_index as u64;
|
||||
segment_index * slots_per_segment
|
||||
}
|
||||
|
||||
fn create_request_processor(
|
||||
socket: UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
slot_receiver: Receiver<u64>,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
let mut thread_handles = vec![];
|
||||
let (s_reader, r_reader) = channel();
|
||||
let (s_responder, r_responder) = channel();
|
||||
let storage_socket = Arc::new(socket);
|
||||
let recycler = Recycler::default();
|
||||
let t_receiver = receiver(storage_socket.clone(), exit, s_reader, recycler, "archiver");
|
||||
thread_handles.push(t_receiver);
|
||||
|
||||
let t_responder = responder("archiver-responder", storage_socket, r_responder);
|
||||
thread_handles.push(t_responder);
|
||||
|
||||
let exit = exit.clone();
|
||||
let t_processor = spawn(move || {
|
||||
let slot = poll_for_slot(slot_receiver, &exit);
|
||||
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
let packets = r_reader.recv_timeout(Duration::from_secs(1));
|
||||
|
||||
if let Ok(packets) = packets {
|
||||
for packet in &packets.packets {
|
||||
let req: result::Result<ArchiverRequest, Box<bincode::ErrorKind>> =
|
||||
limited_deserialize(&packet.data[..packet.meta.size]);
|
||||
match req {
|
||||
Ok(ArchiverRequest::GetSlotHeight(from)) => {
|
||||
let packet = Packet::from_data(&from, slot);
|
||||
let _ = s_responder.send(Packets::new(vec![packet]));
|
||||
}
|
||||
Err(e) => {
|
||||
info!("invalid request: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
thread_handles.push(t_processor);
|
||||
thread_handles
|
||||
}
|
||||
|
||||
fn poll_for_slot(receiver: Receiver<u64>, exit: &Arc<AtomicBool>) -> u64 {
|
||||
loop {
|
||||
let slot = receiver.recv_timeout(Duration::from_secs(1));
|
||||
if let Ok(slot) = slot {
|
||||
return slot;
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Archiver {
|
||||
/// Returns a Result that contains an archiver on success
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ledger_path` - path to where the ledger will be stored.
|
||||
/// Causes panic if none
|
||||
/// * `node` - The archiver node
|
||||
/// * `cluster_entrypoint` - ContactInfo representing an entry into the network
|
||||
/// * `keypair` - Keypair for this archiver
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
ledger_path: &Path,
|
||||
node: Node,
|
||||
cluster_entrypoint: ContactInfo,
|
||||
keypair: Arc<Keypair>,
|
||||
storage_keypair: Arc<Keypair>,
|
||||
client_commitment: CommitmentConfig,
|
||||
) -> Result<Self> {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
info!("Archiver: id: {}", keypair.pubkey());
|
||||
info!("Creating cluster info....");
|
||||
let mut cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
|
||||
cluster_info.set_entrypoint(cluster_entrypoint.clone());
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_slots = Arc::new(ClusterSlots::default());
|
||||
// Note for now, this ledger will not contain any of the existing entries
|
||||
// in the ledger located at ledger_path, and will only append on newly received
|
||||
// entries after being passed to window_service
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
|
||||
let gossip_service = GossipService::new(&cluster_info, None, node.sockets.gossip, &exit);
|
||||
|
||||
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
|
||||
let (nodes, _) =
|
||||
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) {
|
||||
Ok(nodes_and_archivers) => nodes_and_archivers,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
gossip_service.join()?;
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
let client = solana_core::gossip_service::get_client(&nodes);
|
||||
|
||||
info!("Setting up mining account...");
|
||||
if let Err(e) =
|
||||
Self::setup_mining_account(&client, &keypair, &storage_keypair, client_commitment)
|
||||
{
|
||||
//shutdown services before exiting
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
gossip_service.join()?;
|
||||
return Err(e);
|
||||
};
|
||||
|
||||
let repair_socket = Arc::new(node.sockets.repair);
|
||||
let shred_sockets: Vec<Arc<UdpSocket>> =
|
||||
node.sockets.tvu.into_iter().map(Arc::new).collect();
|
||||
let shred_forward_sockets: Vec<Arc<UdpSocket>> = node
|
||||
.sockets
|
||||
.tvu_forwards
|
||||
.into_iter()
|
||||
.map(Arc::new)
|
||||
.collect();
|
||||
let (shred_fetch_sender, shred_fetch_receiver) = channel();
|
||||
let fetch_stage = ShredFetchStage::new(
|
||||
shred_sockets,
|
||||
shred_forward_sockets,
|
||||
repair_socket.clone(),
|
||||
&shred_fetch_sender,
|
||||
None,
|
||||
&exit,
|
||||
);
|
||||
let (slot_sender, slot_receiver) = channel();
|
||||
let request_processor =
|
||||
create_request_processor(node.sockets.storage.unwrap(), &exit, slot_receiver);
|
||||
|
||||
let t_archiver = {
|
||||
let exit = exit.clone();
|
||||
let node_info = node.info.clone();
|
||||
let mut meta = ArchiverMeta {
|
||||
ledger_path: ledger_path.to_path_buf(),
|
||||
client_commitment,
|
||||
..ArchiverMeta::default()
|
||||
};
|
||||
spawn(move || {
|
||||
// setup archiver
|
||||
let window_service = match Self::setup(
|
||||
&mut meta,
|
||||
cluster_info.clone(),
|
||||
&blockstore,
|
||||
&exit,
|
||||
&node_info,
|
||||
&storage_keypair,
|
||||
repair_socket,
|
||||
shred_fetch_receiver,
|
||||
slot_sender,
|
||||
cluster_slots,
|
||||
) {
|
||||
Ok(window_service) => window_service,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
error!("setup failed {:?}; archiver thread exiting...", e);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
request_processor
|
||||
.into_iter()
|
||||
.for_each(|t| t.join().unwrap());
|
||||
fetch_stage.join().unwrap();
|
||||
gossip_service.join().unwrap();
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
info!("setup complete");
|
||||
// run archiver
|
||||
Self::run(
|
||||
&mut meta,
|
||||
&blockstore,
|
||||
cluster_info,
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
&exit,
|
||||
);
|
||||
// wait until exit
|
||||
request_processor
|
||||
.into_iter()
|
||||
.for_each(|t| t.join().unwrap());
|
||||
fetch_stage.join().unwrap();
|
||||
gossip_service.join().unwrap();
|
||||
window_service.join().unwrap()
|
||||
})
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
thread_handles: vec![t_archiver],
|
||||
exit,
|
||||
})
|
||||
}
|
||||
|
||||
fn run(
|
||||
meta: &mut ArchiverMeta,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) {
|
||||
// encrypt segment
|
||||
Self::encrypt_ledger(meta, blockstore).expect("ledger encrypt not successful");
|
||||
let enc_file_path = meta.ledger_data_file_encrypted.clone();
|
||||
// do replicate
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
// TODO check if more segments are available - based on space constraints
|
||||
Self::create_sampling_offsets(meta);
|
||||
let sampling_offsets = &meta.sampling_offsets;
|
||||
meta.sha_state =
|
||||
match Self::sample_file_to_create_mining_hash(&enc_file_path, sampling_offsets) {
|
||||
Ok(hash) => hash,
|
||||
Err(err) => {
|
||||
warn!("Error sampling file, exiting: {:?}", err);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
Self::submit_mining_proof(meta, &cluster_info, archiver_keypair, storage_keypair);
|
||||
|
||||
// TODO make this a lot more frequent by picking a "new" blockhash instead of picking a storage blockhash
|
||||
// prep the next proof
|
||||
let (storage_blockhash, _) = match Self::poll_for_blockhash_and_slot(
|
||||
&cluster_info,
|
||||
meta.slots_per_segment,
|
||||
&meta.blockhash,
|
||||
exit,
|
||||
) {
|
||||
Ok(blockhash_and_slot) => blockhash_and_slot,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Error couldn't get a newer blockhash than {:?}. {:?}",
|
||||
meta.blockhash, e
|
||||
);
|
||||
break;
|
||||
}
|
||||
};
|
||||
meta.blockhash = storage_blockhash;
|
||||
Self::redeem_rewards(
|
||||
&cluster_info,
|
||||
archiver_keypair,
|
||||
storage_keypair,
|
||||
meta.client_commitment,
|
||||
);
|
||||
}
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
fn redeem_rewards(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
client_commitment: CommitmentConfig,
|
||||
) {
|
||||
let nodes = cluster_info.read().unwrap().tvu_peers();
|
||||
let client = solana_core::gossip_service::get_client(&nodes);
|
||||
|
||||
if let Ok(Some(account)) =
|
||||
client.get_account_with_commitment(&storage_keypair.pubkey(), client_commitment)
|
||||
{
|
||||
if let Ok(StorageContract::ArchiverStorage { validations, .. }) = account.state() {
|
||||
if !validations.is_empty() {
|
||||
let ix = storage_instruction::claim_reward(
|
||||
&archiver_keypair.pubkey(),
|
||||
&storage_keypair.pubkey(),
|
||||
);
|
||||
let message = Message::new_with_payer(&[ix], Some(&archiver_keypair.pubkey()));
|
||||
if let Err(e) = client.send_message(&[archiver_keypair.as_ref()], message) {
|
||||
error!("unable to redeem reward, tx failed: {:?}", e);
|
||||
} else {
|
||||
info!(
|
||||
"collected mining rewards: Account balance {:?}",
|
||||
client.get_balance_with_commitment(
|
||||
&archiver_keypair.pubkey(),
|
||||
client_commitment
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("Redeem mining reward: No account data found");
|
||||
}
|
||||
}
|
||||
|
||||
// Find a segment to replicate and download it.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn setup(
|
||||
meta: &mut ArchiverMeta,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
node_info: &ContactInfo,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
shred_fetch_receiver: PacketReceiver,
|
||||
slot_sender: Sender<u64>,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
) -> Result<WindowService> {
|
||||
let slots_per_segment =
|
||||
match Self::get_segment_config(&cluster_info, meta.client_commitment) {
|
||||
Ok(slots_per_segment) => slots_per_segment,
|
||||
Err(e) => {
|
||||
error!("unable to get segment size configuration, exiting...");
|
||||
//shutdown services before exiting
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
let (segment_blockhash, segment_slot) = match Self::poll_for_segment(
|
||||
&cluster_info,
|
||||
slots_per_segment,
|
||||
&Hash::default(),
|
||||
exit,
|
||||
) {
|
||||
Ok(blockhash_and_slot) => blockhash_and_slot,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
let signature = storage_keypair.sign_message(segment_blockhash.as_ref());
|
||||
let slot = get_slot_from_signature(&signature, segment_slot, slots_per_segment);
|
||||
info!("replicating slot: {}", slot);
|
||||
slot_sender.send(slot)?;
|
||||
meta.slot = slot;
|
||||
meta.slots_per_segment = slots_per_segment;
|
||||
meta.signature = signature;
|
||||
meta.blockhash = segment_blockhash;
|
||||
|
||||
let mut repair_slot_range = RepairSlotRange::default();
|
||||
repair_slot_range.end = slot + slots_per_segment;
|
||||
repair_slot_range.start = slot;
|
||||
|
||||
let (retransmit_sender, _) = channel();
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
|
||||
let _sigverify_stage = SigVerifyStage::new(
|
||||
shred_fetch_receiver,
|
||||
verified_sender,
|
||||
DisabledSigVerifier::default(),
|
||||
);
|
||||
|
||||
let window_service = WindowService::new(
|
||||
blockstore.clone(),
|
||||
cluster_info.clone(),
|
||||
verified_receiver,
|
||||
retransmit_sender,
|
||||
repair_socket,
|
||||
&exit,
|
||||
RepairStrategy::RepairRange(repair_slot_range),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
|_, _, _, _| true,
|
||||
cluster_slots,
|
||||
);
|
||||
info!("waiting for ledger download");
|
||||
Self::wait_for_segment_download(
|
||||
slot,
|
||||
slots_per_segment,
|
||||
&blockstore,
|
||||
&exit,
|
||||
&node_info,
|
||||
cluster_info,
|
||||
);
|
||||
Ok(window_service)
|
||||
}
|
||||
|
||||
fn wait_for_segment_download(
|
||||
start_slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
node_info: &ContactInfo,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
) {
|
||||
info!(
|
||||
"window created, waiting for ledger download starting at slot {:?}",
|
||||
start_slot
|
||||
);
|
||||
let mut current_slot = start_slot;
|
||||
'outer: loop {
|
||||
while blockstore.is_full(current_slot) {
|
||||
current_slot += 1;
|
||||
info!("current slot: {}", current_slot);
|
||||
if current_slot >= start_slot + slots_per_segment {
|
||||
break 'outer;
|
||||
}
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
|
||||
info!("Done receiving entries from window_service");
|
||||
|
||||
// Remove archiver from the data plane
|
||||
let mut contact_info = node_info.clone();
|
||||
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
|
||||
contact_info.wallclock = timestamp();
|
||||
// copy over the adopted shred_version from the entrypoint
|
||||
contact_info.shred_version = cluster_info.read().unwrap().my_data().shred_version;
|
||||
{
|
||||
let mut cluster_info_w = cluster_info.write().unwrap();
|
||||
cluster_info_w.insert_self(contact_info);
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
|
||||
meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME);
|
||||
|
||||
{
|
||||
let mut ivec = [0u8; 64];
|
||||
ivec.copy_from_slice(&meta.signature.as_ref());
|
||||
|
||||
let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
|
||||
blockstore,
|
||||
meta.slot,
|
||||
meta.slots_per_segment,
|
||||
&meta.ledger_data_file_encrypted,
|
||||
&mut ivec,
|
||||
)?;
|
||||
|
||||
meta.num_chacha_blocks = num_encrypted_bytes / CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Done encrypting the ledger: {:?}",
|
||||
meta.ledger_data_file_encrypted
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_sampling_offsets(meta: &mut ArchiverMeta) {
|
||||
meta.sampling_offsets.clear();
|
||||
let mut rng_seed = [0u8; 32];
|
||||
rng_seed.copy_from_slice(&meta.blockhash.as_ref());
|
||||
let mut rng = ChaChaRng::from_seed(rng_seed);
|
||||
for _ in 0..NUM_STORAGE_SAMPLES {
|
||||
meta.sampling_offsets
|
||||
.push(rng.gen_range(0, meta.num_chacha_blocks) as u64);
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_file_to_create_mining_hash(
|
||||
enc_file_path: &Path,
|
||||
sampling_offsets: &[u64],
|
||||
) -> Result<Hash> {
|
||||
let sha_state = sample_file(enc_file_path, sampling_offsets)?;
|
||||
info!("sampled sha_state: {}", sha_state);
|
||||
Ok(sha_state)
|
||||
}
|
||||
|
||||
fn setup_mining_account(
|
||||
client: &ThinClient,
|
||||
keypair: &Keypair,
|
||||
storage_keypair: &Keypair,
|
||||
client_commitment: CommitmentConfig,
|
||||
) -> Result<()> {
|
||||
// make sure archiver has some balance
|
||||
info!("checking archiver keypair...");
|
||||
if client.poll_balance_with_timeout_and_commitment(
|
||||
&keypair.pubkey(),
|
||||
&Duration::from_millis(100),
|
||||
&Duration::from_secs(5),
|
||||
client_commitment,
|
||||
)? == 0
|
||||
{
|
||||
return Err(ArchiverError::EmptyStorageAccountBalance);
|
||||
}
|
||||
|
||||
info!("checking storage account keypair...");
|
||||
// check if the storage account exists
|
||||
let balance =
|
||||
client.poll_get_balance_with_commitment(&storage_keypair.pubkey(), client_commitment);
|
||||
if balance.is_err() || balance.unwrap() == 0 {
|
||||
let blockhash = match client.get_recent_blockhash_with_commitment(client_commitment) {
|
||||
Ok((blockhash, _)) => blockhash,
|
||||
Err(e) => {
|
||||
return Err(ArchiverError::TransportError(e));
|
||||
}
|
||||
};
|
||||
|
||||
let ix = storage_instruction::create_storage_account(
|
||||
&keypair.pubkey(),
|
||||
&keypair.pubkey(),
|
||||
&storage_keypair.pubkey(),
|
||||
1,
|
||||
StorageAccountType::Archiver,
|
||||
);
|
||||
let tx = Transaction::new_signed_instructions(&[keypair], ix, blockhash);
|
||||
let signature = client.async_send_transaction(tx)?;
|
||||
client
|
||||
.poll_for_signature_with_commitment(&signature, client_commitment)
|
||||
.map_err(|err| match err {
|
||||
TransportError::IoError(e) => e,
|
||||
TransportError::TransactionError(_) => io::Error::new(
|
||||
ErrorKind::Other,
|
||||
"setup_mining_account: signature not found",
|
||||
),
|
||||
TransportError::Custom(e) => io::Error::new(ErrorKind::Other, e),
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn submit_mining_proof(
|
||||
meta: &ArchiverMeta,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
) {
|
||||
// No point if we've got no storage account...
|
||||
let nodes = cluster_info.read().unwrap().tvu_peers();
|
||||
let client = solana_core::gossip_service::get_client(&nodes);
|
||||
let storage_balance = client
|
||||
.poll_get_balance_with_commitment(&storage_keypair.pubkey(), meta.client_commitment);
|
||||
if storage_balance.is_err() || storage_balance.unwrap() == 0 {
|
||||
error!("Unable to submit mining proof, no storage account");
|
||||
return;
|
||||
}
|
||||
// ...or no lamports for fees
|
||||
let balance = client
|
||||
.poll_get_balance_with_commitment(&archiver_keypair.pubkey(), meta.client_commitment);
|
||||
if balance.is_err() || balance.unwrap() == 0 {
|
||||
error!("Unable to submit mining proof, insufficient Archiver Account balance");
|
||||
return;
|
||||
}
|
||||
|
||||
let blockhash = match client.get_recent_blockhash_with_commitment(meta.client_commitment) {
|
||||
Ok((blockhash, _)) => blockhash,
|
||||
Err(_) => {
|
||||
error!("unable to get recent blockhash, can't submit proof");
|
||||
return;
|
||||
}
|
||||
};
|
||||
let instruction = storage_instruction::mining_proof(
|
||||
&storage_keypair.pubkey(),
|
||||
meta.sha_state,
|
||||
get_segment_from_slot(meta.slot, meta.slots_per_segment),
|
||||
Signature::new(&meta.signature.as_ref()),
|
||||
meta.blockhash,
|
||||
);
|
||||
let message = Message::new_with_payer(&[instruction], Some(&archiver_keypair.pubkey()));
|
||||
let mut transaction = Transaction::new(
|
||||
&[archiver_keypair.as_ref(), storage_keypair.as_ref()],
|
||||
message,
|
||||
blockhash,
|
||||
);
|
||||
if let Err(err) = client.send_and_confirm_transaction(
|
||||
&[archiver_keypair.as_ref(), storage_keypair.as_ref()],
|
||||
&mut transaction,
|
||||
10,
|
||||
0,
|
||||
) {
|
||||
error!("Error: {:?}; while sending mining proof", err);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn close(self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
self.join()
|
||||
}
|
||||
|
||||
pub fn join(self) {
|
||||
for handle in self.thread_handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn get_segment_config(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
client_commitment: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
let rpc_peers = {
|
||||
let cluster_info = cluster_info.read().unwrap();
|
||||
cluster_info.all_rpc_peers()
|
||||
};
|
||||
debug!("rpc peers: {:?}", rpc_peers);
|
||||
if !rpc_peers.is_empty() {
|
||||
let rpc_client = {
|
||||
let node_index = thread_rng().gen_range(0, rpc_peers.len());
|
||||
RpcClient::new_socket(rpc_peers[node_index].rpc)
|
||||
};
|
||||
Ok(rpc_client
|
||||
.send(
|
||||
&RpcRequest::GetSlotsPerSegment,
|
||||
serde_json::json!([client_commitment]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
ArchiverError::ClientError(err)
|
||||
})?
|
||||
.as_u64()
|
||||
.unwrap())
|
||||
} else {
|
||||
Err(ArchiverError::NoRpcPeers)
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits until the first segment is ready, and returns the current segment
|
||||
fn poll_for_segment(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
slots_per_segment: u64,
|
||||
previous_blockhash: &Hash,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(Hash, u64)> {
|
||||
loop {
|
||||
let (blockhash, turn_slot) = Self::poll_for_blockhash_and_slot(
|
||||
cluster_info,
|
||||
slots_per_segment,
|
||||
previous_blockhash,
|
||||
exit,
|
||||
)?;
|
||||
if get_complete_segment_from_slot(turn_slot, slots_per_segment).is_some() {
|
||||
return Ok((blockhash, turn_slot));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Poll for a different blockhash and associated max_slot than `previous_blockhash`
|
||||
fn poll_for_blockhash_and_slot(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
slots_per_segment: u64,
|
||||
previous_blockhash: &Hash,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(Hash, u64)> {
|
||||
info!("waiting for the next turn...");
|
||||
loop {
|
||||
let rpc_peers = {
|
||||
let cluster_info = cluster_info.read().unwrap();
|
||||
cluster_info.all_rpc_peers()
|
||||
};
|
||||
debug!("rpc peers: {:?}", rpc_peers);
|
||||
if !rpc_peers.is_empty() {
|
||||
let rpc_client = {
|
||||
let node_index = thread_rng().gen_range(0, rpc_peers.len());
|
||||
RpcClient::new_socket(rpc_peers[node_index].rpc)
|
||||
};
|
||||
let response = rpc_client
|
||||
.send(
|
||||
&RpcRequest::GetStorageTurn,
|
||||
serde_json::value::Value::Null,
|
||||
0,
|
||||
)
|
||||
.map_err(|err| {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
ArchiverError::ClientError(err)
|
||||
})?;
|
||||
let RpcStorageTurn {
|
||||
blockhash: storage_blockhash,
|
||||
slot: turn_slot,
|
||||
} = serde_json::from_value::<RpcStorageTurn>(response)
|
||||
.map_err(ArchiverError::JsonError)?;
|
||||
let turn_blockhash = storage_blockhash.parse().map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Blockhash parse failure: {:?} on {:?}",
|
||||
err, storage_blockhash
|
||||
),
|
||||
)
|
||||
})?;
|
||||
if turn_blockhash != *previous_blockhash {
|
||||
info!("turn slot: {}", turn_slot);
|
||||
if get_segment_from_slot(turn_slot, slots_per_segment) != 0 {
|
||||
return Ok((turn_blockhash, turn_slot));
|
||||
}
|
||||
}
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Err(ArchiverError::IO(io::Error::new(
|
||||
ErrorKind::Other,
|
||||
"exit signalled...",
|
||||
)));
|
||||
}
|
||||
sleep(Duration::from_secs(5));
|
||||
}
|
||||
}
|
||||
|
||||
/// Ask an archiver to populate a given blockstore with its segment.
|
||||
/// Return the slot at the start of the archiver's segment
|
||||
///
|
||||
/// It is recommended to use a temporary blockstore for this since the download will not verify
|
||||
/// shreds received and might impact the chaining of shreds across slots
|
||||
pub fn download_from_archiver(
|
||||
serve_repair: &ServeRepair,
|
||||
archiver_info: &ContactInfo,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
slots_per_segment: u64,
|
||||
) -> Result<u64> {
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
// Create a client which downloads from the archiver and see that it
|
||||
// can respond with shreds.
|
||||
let start_slot = Self::get_archiver_segment_slot(ip_addr, archiver_info.storage_addr);
|
||||
info!("Archiver download: start at {}", start_slot);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (s_reader, r_reader) = channel();
|
||||
let repair_socket = Arc::new(bind_in_range(ip_addr, VALIDATOR_PORT_RANGE).unwrap().1);
|
||||
let t_receiver = receiver(
|
||||
repair_socket.clone(),
|
||||
&exit,
|
||||
s_reader,
|
||||
Recycler::default(),
|
||||
"archiver_reeciver",
|
||||
);
|
||||
let id = serve_repair.keypair().pubkey();
|
||||
info!(
|
||||
"Sending repair requests from: {} to: {}",
|
||||
serve_repair.my_info().id,
|
||||
archiver_info.gossip
|
||||
);
|
||||
let repair_slot_range = RepairSlotRange {
|
||||
start: start_slot,
|
||||
end: start_slot + slots_per_segment,
|
||||
};
|
||||
// try for upto 180 seconds //TODO needs tuning if segments are huge
|
||||
for _ in 0..120 {
|
||||
// Strategy used by archivers
|
||||
let repairs = RepairService::generate_repairs_in_range(
|
||||
blockstore,
|
||||
repair_service::MAX_REPAIR_LENGTH,
|
||||
&repair_slot_range,
|
||||
);
|
||||
//iter over the repairs and send them
|
||||
if let Ok(repairs) = repairs {
|
||||
let reqs: Vec<_> = repairs
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
serve_repair
|
||||
.map_repair_request(&repair_request)
|
||||
.map(|result| ((archiver_info.gossip, result), repair_request))
|
||||
.ok()
|
||||
})
|
||||
.collect();
|
||||
|
||||
for ((to, req), repair_request) in reqs {
|
||||
if let Ok(local_addr) = repair_socket.local_addr() {
|
||||
datapoint_info!(
|
||||
"archiver_download",
|
||||
("repair_request", format!("{:?}", repair_request), String),
|
||||
("to", to.to_string(), String),
|
||||
("from", local_addr.to_string(), String),
|
||||
("id", id.to_string(), String)
|
||||
);
|
||||
}
|
||||
repair_socket
|
||||
.send_to(&req, archiver_info.gossip)
|
||||
.unwrap_or_else(|e| {
|
||||
error!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
});
|
||||
}
|
||||
}
|
||||
let res = r_reader.recv_timeout(Duration::new(1, 0));
|
||||
if let Ok(mut packets) = res {
|
||||
while let Ok(mut more) = r_reader.try_recv() {
|
||||
packets.packets.append_pinned(&mut more.packets);
|
||||
}
|
||||
let shreds: Vec<Shred> = packets
|
||||
.packets
|
||||
.into_iter()
|
||||
.filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok())
|
||||
.collect();
|
||||
blockstore.insert_shreds(shreds, None, false)?;
|
||||
}
|
||||
// check if all the slots in the segment are complete
|
||||
if Self::segment_complete(start_slot, slots_per_segment, blockstore) {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().unwrap();
|
||||
|
||||
// check if all the slots in the segment are complete
|
||||
if !Self::segment_complete(start_slot, slots_per_segment, blockstore) {
|
||||
return Err(ArchiverError::SegmentDownloadError);
|
||||
}
|
||||
Ok(start_slot)
|
||||
}
|
||||
|
||||
fn segment_complete(
|
||||
start_slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> bool {
|
||||
for slot in start_slot..(start_slot + slots_per_segment) {
|
||||
if !blockstore.is_full(slot) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn get_archiver_segment_slot(bind_ip_addr: IpAddr, to: SocketAddr) -> u64 {
|
||||
let (_port, socket) = bind_in_range(bind_ip_addr, VALIDATOR_PORT_RANGE).unwrap();
|
||||
socket
|
||||
.set_read_timeout(Some(Duration::from_secs(5)))
|
||||
.unwrap();
|
||||
|
||||
let req = ArchiverRequest::GetSlotHeight(socket.local_addr().unwrap());
|
||||
let serialized_req = bincode::serialize(&req).unwrap();
|
||||
for _ in 0..10 {
|
||||
socket.send_to(&serialized_req, to).unwrap();
|
||||
let mut buf = [0; 1024];
|
||||
if let Ok((size, _addr)) = socket.recv_from(&mut buf) {
|
||||
// Ignore bad packet and try again
|
||||
if let Ok(slot) = bincode::config()
|
||||
.limit(PACKET_DATA_SIZE as u64)
|
||||
.deserialize(&buf[..size])
|
||||
{
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
panic!("Couldn't get segment slot from archiver!");
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_metrics;
|
||||
|
||||
pub mod archiver;
|
||||
mod result;
|
@ -1,48 +0,0 @@
|
||||
use serde_json;
|
||||
use solana_client::client_error;
|
||||
use solana_ledger::blockstore;
|
||||
use solana_sdk::transport;
|
||||
use std::any::Any;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ArchiverError {
|
||||
#[error("IO error")]
|
||||
IO(#[from] std::io::Error),
|
||||
|
||||
#[error("blockstore error")]
|
||||
BlockstoreError(#[from] blockstore::BlockstoreError),
|
||||
|
||||
#[error("crossbeam error")]
|
||||
CrossbeamSendError(#[from] crossbeam_channel::SendError<u64>),
|
||||
|
||||
#[error("send error")]
|
||||
SendError(#[from] std::sync::mpsc::SendError<u64>),
|
||||
|
||||
#[error("join error")]
|
||||
JoinError(Box<dyn Any + Send + 'static>),
|
||||
|
||||
#[error("transport error")]
|
||||
TransportError(#[from] transport::TransportError),
|
||||
|
||||
#[error("client error")]
|
||||
ClientError(#[from] client_error::ClientError),
|
||||
|
||||
#[error("Json parsing error")]
|
||||
JsonError(#[from] serde_json::error::Error),
|
||||
|
||||
#[error("Storage account has no balance")]
|
||||
EmptyStorageAccountBalance,
|
||||
|
||||
#[error("No RPC peers..")]
|
||||
NoRpcPeers,
|
||||
|
||||
#[error("Couldn't download full segment")]
|
||||
SegmentDownloadError,
|
||||
}
|
||||
|
||||
impl std::convert::From<Box<dyn Any + Send + 'static>> for ArchiverError {
|
||||
fn from(e: Box<dyn Any + Send + 'static>) -> ArchiverError {
|
||||
ArchiverError::JoinError(e)
|
||||
}
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.1.0"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_utils"
|
@ -1,120 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use solana_sdk::hash::{Hash, Hasher};
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufReader, ErrorKind, Read, Seek, SeekFrom};
|
||||
use std::mem::size_of;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
|
||||
let in_file = File::open(in_path)?;
|
||||
let metadata = in_file.metadata()?;
|
||||
let mut buffer_file = BufReader::new(in_file);
|
||||
|
||||
let mut hasher = Hasher::default();
|
||||
let sample_size = size_of::<Hash>();
|
||||
let sample_size64 = sample_size as u64;
|
||||
let mut buf = vec![0; sample_size];
|
||||
|
||||
let file_len = metadata.len();
|
||||
if file_len < sample_size64 {
|
||||
return Err(io::Error::new(ErrorKind::Other, "file too short!"));
|
||||
}
|
||||
for offset in sample_offsets {
|
||||
if *offset > (file_len - sample_size64) / sample_size64 {
|
||||
return Err(io::Error::new(ErrorKind::Other, "offset too large"));
|
||||
}
|
||||
buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?;
|
||||
trace!("sampling @ {} ", *offset);
|
||||
match buffer_file.read(&mut buf) {
|
||||
Ok(size) => {
|
||||
assert_eq!(size, buf.len());
|
||||
hasher.hash(&buf);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Error sampling file");
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(hasher.result())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::fs::{create_dir_all, remove_file};
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
|
||||
extern crate hex;
|
||||
|
||||
fn tmp_file_path(name: &str) -> PathBuf {
|
||||
use std::env;
|
||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let mut rand_bits = [0u8; 32];
|
||||
thread_rng().fill(&mut rand_bits[..]);
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
path.push(out_dir);
|
||||
path.push("tmp");
|
||||
create_dir_all(&path).unwrap();
|
||||
|
||||
path.push(format!("{}-{:?}", name, hex::encode(rand_bits)));
|
||||
println!("path: {:?}", path);
|
||||
path
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file() {
|
||||
solana_logger::setup();
|
||||
let in_path = tmp_file_path("test_sample_file_input.txt");
|
||||
let num_strings = 4096;
|
||||
let string = "12foobar";
|
||||
{
|
||||
let mut in_file = File::create(&in_path).unwrap();
|
||||
for _ in 0..num_strings {
|
||||
in_file.write(string.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
let num_samples = (string.len() * num_strings / size_of::<Hash>()) as u64;
|
||||
let samples: Vec<_> = (0..num_samples).collect();
|
||||
let res = sample_file(&in_path, samples.as_slice());
|
||||
let ref_hash: Hash = Hash::new(&[
|
||||
173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144,
|
||||
151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52,
|
||||
]);
|
||||
let res = res.unwrap();
|
||||
assert_eq!(res, ref_hash);
|
||||
|
||||
// Sample just past the end
|
||||
assert!(sample_file(&in_path, &[num_samples]).is_err());
|
||||
remove_file(&in_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file_invalid_offset() {
|
||||
let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt");
|
||||
{
|
||||
let mut in_file = File::create(&in_path).unwrap();
|
||||
for _ in 0..4096 {
|
||||
in_file.write("123456foobar".as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
let samples = [0, 200000];
|
||||
let res = sample_file(&in_path, &samples);
|
||||
assert!(res.is_err());
|
||||
remove_file(in_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file_missing_file() {
|
||||
let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt");
|
||||
let samples = [0, 5];
|
||||
let res = sample_file(&in_path, &samples);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.10.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
@ -1,131 +0,0 @@
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use console::style;
|
||||
use solana_archiver_lib::archiver::Archiver;
|
||||
use solana_clap_utils::{
|
||||
input_parsers::keypair_of, input_validators::is_keypair_or_ask_keyword,
|
||||
keypair::SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
};
|
||||
use solana_core::{
|
||||
cluster_info::{Node, VALIDATOR_PORT_RANGE},
|
||||
contact_info::ContactInfo,
|
||||
};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_clap_utils::version!())
|
||||
.arg(
|
||||
Arg::with_name("identity_keypair")
|
||||
.short("i")
|
||||
.long("identity")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help("File containing an identity (keypair)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("entrypoint")
|
||||
.short("n")
|
||||
.long("entrypoint")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(solana_net_utils::is_host_port)
|
||||
.help("Rendezvous with the cluster at this entry point"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR as persistent ledger location"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("storage_keypair")
|
||||
.short("s")
|
||||
.long("storage-keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help("File containing the storage account keypair"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
||||
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let ledger_path = PathBuf::from(matches.value_of("ledger").unwrap());
|
||||
|
||||
let identity_keypair = keypair_of(&matches, "identity_keypair").unwrap_or_else(Keypair::new);
|
||||
|
||||
let storage_keypair = keypair_of(&matches, "storage_keypair").unwrap_or_else(|| {
|
||||
clap::Error::with_description(
|
||||
"The `storage-keypair` argument was not found",
|
||||
clap::ErrorKind::ArgumentNotFound,
|
||||
)
|
||||
.exit();
|
||||
});
|
||||
|
||||
let entrypoint_addr = matches
|
||||
.value_of("entrypoint")
|
||||
.map(|entrypoint| {
|
||||
solana_net_utils::parse_host_port(entrypoint)
|
||||
.expect("failed to parse entrypoint address")
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let gossip_addr = {
|
||||
let ip = solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap();
|
||||
let mut addr = SocketAddr::new(ip, 0);
|
||||
addr.set_ip(solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap());
|
||||
addr
|
||||
};
|
||||
let node = Node::new_archiver_with_external_ip(
|
||||
&identity_keypair.pubkey(),
|
||||
&gossip_addr,
|
||||
VALIDATOR_PORT_RANGE,
|
||||
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
|
||||
);
|
||||
|
||||
println!(
|
||||
"{} version {} (branch={}, commit={})",
|
||||
style(crate_name!()).bold(),
|
||||
solana_clap_utils::version!(),
|
||||
option_env!("CI_BRANCH").unwrap_or("unknown"),
|
||||
option_env!("CI_COMMIT").unwrap_or("unknown")
|
||||
);
|
||||
solana_metrics::set_host_id(identity_keypair.pubkey().to_string());
|
||||
println!(
|
||||
"replicating the data with identity_keypair={:?} gossip_addr={:?}",
|
||||
identity_keypair.pubkey(),
|
||||
gossip_addr
|
||||
);
|
||||
|
||||
let entrypoint_info = ContactInfo::new_gossip_entry_point(&entrypoint_addr);
|
||||
let archiver = Archiver::new(
|
||||
&ledger_path,
|
||||
node,
|
||||
entrypoint_info,
|
||||
Arc::new(identity_keypair),
|
||||
Arc::new(storage_keypair),
|
||||
CommitmentConfig::recent(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
archiver.join();
|
||||
}
|
@ -2,21 +2,27 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.1.0"
|
||||
version = "1.2.8"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
rand = "0.6.5"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.2.8" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.8" }
|
||||
solana-perf = { path = "../perf", version = "1.2.8" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-measure = { path = "../measure", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,3 +1,4 @@
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
@ -28,7 +29,7 @@ use solana_sdk::{
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
@ -64,15 +65,22 @@ fn check_txs(
|
||||
no_bank
|
||||
}
|
||||
|
||||
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
|
||||
fn make_accounts_txs(
|
||||
total_num_transactions: usize,
|
||||
hash: Hash,
|
||||
same_payer: bool,
|
||||
) -> Vec<Transaction> {
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
|
||||
(0..txes)
|
||||
let payer_key = Keypair::new();
|
||||
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
|
||||
(0..total_num_transactions)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.message.account_keys[0] = Pubkey::new_rand();
|
||||
if !same_payer {
|
||||
new.message.account_keys[0] = Pubkey::new_rand();
|
||||
}
|
||||
new.message.account_keys[1] = Pubkey::new_rand();
|
||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||
new
|
||||
@ -96,13 +104,61 @@ fn bytes_as_usize(bytes: &[u8]) -> usize {
|
||||
bytes[0] as usize | (bytes[1] as usize) << 8
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_version::version!())
|
||||
.arg(
|
||||
Arg::with_name("num_chunks")
|
||||
.long("num-chunks")
|
||||
.takes_value(true)
|
||||
.value_name("SIZE")
|
||||
.help("Number of transaction chunks."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("packets_per_chunk")
|
||||
.long("packets-per-chunk")
|
||||
.takes_value(true)
|
||||
.value_name("SIZE")
|
||||
.help("Packets per chunk"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("skip_sanity")
|
||||
.long("skip-sanity")
|
||||
.takes_value(false)
|
||||
.help("Skip transaction sanity execution"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("same_payer")
|
||||
.long("same-payer")
|
||||
.takes_value(false)
|
||||
.help("Use the same payer for transfers"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("iterations")
|
||||
.long("iterations")
|
||||
.takes_value(true)
|
||||
.help("Number of iterations"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_threads")
|
||||
.long("num-threads")
|
||||
.takes_value(true)
|
||||
.help("Number of iterations"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let num_threads =
|
||||
value_t!(matches, "num_threads", usize).unwrap_or(BankingStage::num_threads() as usize);
|
||||
// a multiple of packet chunk duplicates to avoid races
|
||||
const CHUNKS: usize = 8 * 2;
|
||||
const PACKETS_PER_BATCH: usize = 192;
|
||||
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
|
||||
let num_chunks = value_t!(matches, "num_chunks", usize).unwrap_or(16);
|
||||
let packets_per_chunk = value_t!(matches, "packets_per_chunk", usize).unwrap_or(192);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(1000);
|
||||
|
||||
let total_num_transactions = num_chunks * num_threads * packets_per_chunk;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@ -113,37 +169,47 @@ fn main() {
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
info!("threads: {} txs: {}", num_threads, txes);
|
||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||
|
||||
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_config.hash());
|
||||
let same_payer = matches.is_present("same_payer");
|
||||
let mut transactions =
|
||||
make_accounts_txs(total_num_transactions, genesis_config.hash(), same_payer);
|
||||
|
||||
// fund all the accounts
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = system_transaction::transfer(
|
||||
let mut fund = system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&tx.message.account_keys[0],
|
||||
mint_total / txes as u64,
|
||||
mint_total / total_num_transactions as u64,
|
||||
genesis_config.hash(),
|
||||
);
|
||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
|
||||
let skip_sanity = matches.is_present("skip_sanity");
|
||||
if !skip_sanity {
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
|
||||
}
|
||||
bank.clear_signatures();
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
||||
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
@ -152,7 +218,7 @@ fn main() {
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
@ -162,7 +228,7 @@ fn main() {
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
let chunk_len = verified.len() / CHUNKS;
|
||||
let chunk_len = verified.len() / num_chunks;
|
||||
let mut start = 0;
|
||||
|
||||
// This is so that the signal_receiver does not go out of scope after the closure.
|
||||
@ -171,17 +237,17 @@ fn main() {
|
||||
let signal_receiver = Arc::new(signal_receiver);
|
||||
let mut total_us = 0;
|
||||
let mut tx_total_us = 0;
|
||||
let base_tx_count = bank.transaction_count();
|
||||
let mut txs_processed = 0;
|
||||
let mut root = 1;
|
||||
let collector = Pubkey::new_rand();
|
||||
const ITERS: usize = 1_000;
|
||||
let config = Config {
|
||||
packets_per_batch: PACKETS_PER_BATCH,
|
||||
packets_per_batch: packets_per_chunk,
|
||||
chunk_len,
|
||||
num_threads,
|
||||
};
|
||||
let mut total_sent = 0;
|
||||
for _ in 0..ITERS {
|
||||
for _ in 0..iterations {
|
||||
let now = Instant::now();
|
||||
let mut sent = 0;
|
||||
|
||||
@ -222,7 +288,11 @@ fn main() {
|
||||
sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if check_txs(&signal_receiver, txes / CHUNKS, &poh_recorder) {
|
||||
if check_txs(
|
||||
&signal_receiver,
|
||||
total_num_transactions / num_chunks,
|
||||
&poh_recorder,
|
||||
) {
|
||||
debug!(
|
||||
"resetting bank {} tx count: {} txs_proc: {}",
|
||||
bank.slot(),
|
||||
@ -253,7 +323,7 @@ fn main() {
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
bank_forks.set_root(root, &None);
|
||||
bank_forks.set_root(root, &None, None);
|
||||
root += 1;
|
||||
}
|
||||
debug!(
|
||||
@ -274,7 +344,7 @@ fn main() {
|
||||
debug!(
|
||||
"time: {} us checked: {} sent: {}",
|
||||
duration_as_us(&now.elapsed()),
|
||||
txes / CHUNKS,
|
||||
total_num_transactions / num_chunks,
|
||||
sent,
|
||||
);
|
||||
total_sent += sent;
|
||||
@ -285,20 +355,26 @@ fn main() {
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
}
|
||||
|
||||
start += chunk_len;
|
||||
start %= verified.len();
|
||||
}
|
||||
let txs_processed = bank_forks.working_bank().transaction_count();
|
||||
debug!("processed: {} base: {}", txs_processed, base_tx_count);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_total', 'median': '{}'}}",
|
||||
"{{'name': 'banking_bench_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
|
||||
);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
|
||||
"{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
|
||||
);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64),
|
||||
);
|
||||
|
||||
drop(verified_sender);
|
||||
drop(vote_sender);
|
||||
|
@ -2,33 +2,37 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.1.0"
|
||||
version = "1.2.8"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.32.0"
|
||||
clap = "2.33.1"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.8"
|
||||
num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-core = { path = "../core", version = "1.2.8" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.8" }
|
||||
solana-client = { path = "../client", version = "1.2.8" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.8" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.8" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -14,6 +14,7 @@ use solana_metrics::datapoint_info;
|
||||
use solana_sdk::{
|
||||
client::{Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
timing::{duration_as_ms, duration_as_s},
|
||||
@ -449,7 +450,7 @@ fn swapper<T>(
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
let to_swap_txs: Vec<_> = to_swap
|
||||
@ -457,16 +458,14 @@ fn swapper<T>(
|
||||
.map(|(signer, swap, profit)| {
|
||||
let s: &Keypair = &signer;
|
||||
let owner = &signer.pubkey();
|
||||
Transaction::new_signed_instructions(
|
||||
&[s],
|
||||
vec![exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
&profit,
|
||||
)],
|
||||
blockhash,
|
||||
)
|
||||
let instruction = exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
&profit,
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&s.pubkey()));
|
||||
Transaction::new(&[s], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -577,7 +576,7 @@ fn trader<T>(
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
|
||||
@ -588,28 +587,26 @@ fn trader<T>(
|
||||
let owner_pubkey = &owner.pubkey();
|
||||
let trade_pubkey = &trade.pubkey();
|
||||
let space = mem::size_of::<ExchangeState>() as u64;
|
||||
Transaction::new_signed_instructions(
|
||||
&[owner.as_ref(), trade],
|
||||
vec![
|
||||
system_instruction::create_account(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
1,
|
||||
space,
|
||||
&id(),
|
||||
),
|
||||
exchange_instruction::trade_request(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
*side,
|
||||
pair,
|
||||
tokens,
|
||||
price,
|
||||
src,
|
||||
),
|
||||
],
|
||||
blockhash,
|
||||
)
|
||||
let instructions = [
|
||||
system_instruction::create_account(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
1,
|
||||
space,
|
||||
&id(),
|
||||
),
|
||||
exchange_instruction::trade_request(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
*side,
|
||||
pair,
|
||||
tokens,
|
||||
price,
|
||||
src,
|
||||
),
|
||||
];
|
||||
let message = Message::new(&instructions, Some(&owner_pubkey));
|
||||
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -747,22 +744,19 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
(
|
||||
k.clone(),
|
||||
Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
||||
&k.pubkey(),
|
||||
&m,
|
||||
)),
|
||||
)
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(k.clone(), Transaction::new_unsigned(message))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut retries = 0;
|
||||
let amount = chunk[0].1[0].1;
|
||||
while !to_fund_txs.is_empty() {
|
||||
let receivers = to_fund_txs
|
||||
let receivers: usize = to_fund_txs
|
||||
.iter()
|
||||
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
|
||||
.map(|(_, tx)| tx.message().instructions.len())
|
||||
.sum();
|
||||
|
||||
debug!(
|
||||
" {} to {} in {} txs",
|
||||
@ -775,7 +769,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
to_fund_txs.len(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("blockhash");
|
||||
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
||||
@ -847,16 +841,18 @@ pub fn create_token_accounts<T: Client>(
|
||||
);
|
||||
let request_ix =
|
||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
||||
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
|
||||
(
|
||||
(from_keypair, new_keypair),
|
||||
Transaction::new_unsigned_instructions(vec![create_ix, request_ix]),
|
||||
Transaction::new_unsigned(message),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let accounts = to_create_txs
|
||||
let accounts: usize = to_create_txs
|
||||
.iter()
|
||||
.fold(0, |len, (_, tx)| len + tx.message().instructions.len() / 2);
|
||||
.map(|(_, tx)| tx.message().instructions.len() / 2)
|
||||
.sum();
|
||||
|
||||
debug!(
|
||||
" Creating {} accounts in {} txs",
|
||||
@ -866,7 +862,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
|
||||
let mut retries = 0;
|
||||
while !to_create_txs.is_empty() {
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
to_create_txs
|
||||
@ -995,7 +991,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
|
@ -11,7 +11,7 @@ fn main() {
|
||||
solana_logger::setup();
|
||||
solana_metrics::set_panic_hook("bench-exchange");
|
||||
|
||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
||||
let cli::Config {
|
||||
@ -54,10 +54,9 @@ fn main() {
|
||||
);
|
||||
} else {
|
||||
info!("Connecting to the cluster");
|
||||
let (nodes, _archivers) =
|
||||
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
|
@ -59,7 +59,7 @@ fn test_exchange_local_cluster() {
|
||||
let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let (nodes, _) =
|
||||
let nodes =
|
||||
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
@ -86,7 +86,7 @@ fn test_exchange_bank_client() {
|
||||
solana_logger::setup();
|
||||
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
|
||||
let mut bank = Bank::new(&genesis_config);
|
||||
bank.add_instruction_processor(id(), process_instruction);
|
||||
bank.add_builtin_program("exchange_program", id(), process_instruction);
|
||||
let clients = vec![BankClient::new(bank)];
|
||||
|
||||
let mut config = Config::default();
|
||||
|
@ -2,14 +2,18 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.1.0"
|
||||
version = "1.2.8"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -52,7 +52,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_clap_utils::version!())
|
||||
.version(solana_version::version!())
|
||||
.arg(
|
||||
Arg::with_name("num-recv-sockets")
|
||||
.long("num-recv-sockets")
|
||||
|
@ -2,36 +2,40 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.1.0"
|
||||
version = "1.2.8"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
clap = "2.33.0"
|
||||
clap = "2.33.1"
|
||||
log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.1.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.0", optional = true }
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-core = { path = "../core", version = "1.2.8" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.8" }
|
||||
solana-client = { path = "../client", version = "1.2.8" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.8" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.2.8", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.8" }
|
||||
solana-measure = { path = "../measure", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.8", optional = true }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.8" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -14,6 +14,7 @@ use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
@ -26,9 +27,9 @@ use std::{
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, RwLock,
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
@ -55,7 +56,9 @@ type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
loop {
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
|
||||
Ok((blockhash, fee_calculator)) => return (blockhash, fee_calculator),
|
||||
Ok((blockhash, fee_calculator, _last_valid_slot)) => {
|
||||
return (blockhash, fee_calculator)
|
||||
}
|
||||
Err(err) => {
|
||||
info!("Couldn't get recent blockhash: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
@ -64,105 +67,63 @@ fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
fn wait_for_target_slots_per_epoch<T>(target_slots_per_epoch: u64, client: &Arc<T>)
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
let Config {
|
||||
id,
|
||||
threads,
|
||||
thread_batch_sleep_ms,
|
||||
duration,
|
||||
tx_count,
|
||||
sustained,
|
||||
..
|
||||
} = config;
|
||||
|
||||
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
||||
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
||||
assert!(gen_keypairs.len() >= 2 * tx_count);
|
||||
for chunk in gen_keypairs.chunks_exact(2 * tx_count) {
|
||||
source_keypair_chunks.push(chunk[..tx_count].iter().collect());
|
||||
dest_keypair_chunks.push(chunk[tx_count..].iter().collect());
|
||||
}
|
||||
|
||||
let first_tx_count = loop {
|
||||
match client.get_transaction_count() {
|
||||
Ok(count) => break count,
|
||||
Err(err) => {
|
||||
info!("Couldn't get transaction count: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
if target_slots_per_epoch != 0 {
|
||||
info!(
|
||||
"Waiting until epochs are {} slots long..",
|
||||
target_slots_per_epoch
|
||||
);
|
||||
loop {
|
||||
if let Ok(epoch_info) = client.get_epoch_info() {
|
||||
if epoch_info.slots_in_epoch >= target_slots_per_epoch {
|
||||
info!("Done epoch_info: {:?}", epoch_info);
|
||||
break;
|
||||
}
|
||||
info!(
|
||||
"Waiting for epoch: {} now: {}",
|
||||
target_slots_per_epoch, epoch_info.slots_in_epoch
|
||||
);
|
||||
}
|
||||
sleep(Duration::from_secs(3));
|
||||
}
|
||||
};
|
||||
info!("Initial transaction count {}", first_tx_count);
|
||||
}
|
||||
}
|
||||
|
||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
fn create_sampler_thread<T>(
|
||||
client: &Arc<T>,
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
sample_period: u64,
|
||||
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
|
||||
) -> JoinHandle<()>
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
info!("Sampling TPS every {} second...", sample_period);
|
||||
let sample_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let blockhash_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let recent_blockhash = recent_blockhash.clone();
|
||||
let client = client.clone();
|
||||
let id = id.pubkey();
|
||||
Builder::new()
|
||||
.name("solana-blockhash-poller".to_string())
|
||||
.spawn(move || {
|
||||
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let s_threads: Vec<_> = (0..threads)
|
||||
.map(|_| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let shared_txs = shared_txs.clone();
|
||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||
let total_tx_sent_count = total_tx_sent_count.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sender".to_string())
|
||||
.spawn(move || {
|
||||
do_tx_transfers(
|
||||
&exit_signal,
|
||||
&shared_txs,
|
||||
&shared_tx_active_thread_count,
|
||||
&total_tx_sent_count,
|
||||
thread_batch_sleep_ms,
|
||||
&client,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
||||
})
|
||||
.collect();
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn generate_chunked_transfers(
|
||||
recent_blockhash: Arc<RwLock<Hash>>,
|
||||
shared_txs: &SharedTransactions,
|
||||
shared_tx_active_thread_count: Arc<AtomicIsize>,
|
||||
source_keypair_chunks: Vec<Vec<&Keypair>>,
|
||||
dest_keypair_chunks: &mut Vec<VecDeque<&Keypair>>,
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) {
|
||||
// generate and send transactions for the specified duration
|
||||
let start = Instant::now();
|
||||
let keypair_chunks = source_keypair_chunks.len();
|
||||
@ -170,7 +131,7 @@ where
|
||||
let mut chunk_index = 0;
|
||||
while start.elapsed() < duration {
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
shared_txs,
|
||||
&recent_blockhash,
|
||||
&source_keypair_chunks[chunk_index],
|
||||
&dest_keypair_chunks[chunk_index],
|
||||
@ -206,6 +167,135 @@ where
|
||||
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_sender_threads<T>(
|
||||
client: &Arc<T>,
|
||||
shared_txs: &SharedTransactions,
|
||||
thread_batch_sleep_ms: usize,
|
||||
total_tx_sent_count: &Arc<AtomicUsize>,
|
||||
threads: usize,
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
shared_tx_active_thread_count: &Arc<AtomicIsize>,
|
||||
) -> Vec<JoinHandle<()>>
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
(0..threads)
|
||||
.map(|_| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let shared_txs = shared_txs.clone();
|
||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||
let total_tx_sent_count = total_tx_sent_count.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sender".to_string())
|
||||
.spawn(move || {
|
||||
do_tx_transfers(
|
||||
&exit_signal,
|
||||
&shared_txs,
|
||||
&shared_tx_active_thread_count,
|
||||
&total_tx_sent_count,
|
||||
thread_batch_sleep_ms,
|
||||
&client,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
let Config {
|
||||
id,
|
||||
threads,
|
||||
thread_batch_sleep_ms,
|
||||
duration,
|
||||
tx_count,
|
||||
sustained,
|
||||
target_slots_per_epoch,
|
||||
..
|
||||
} = config;
|
||||
|
||||
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
||||
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
||||
assert!(gen_keypairs.len() >= 2 * tx_count);
|
||||
for chunk in gen_keypairs.chunks_exact(2 * tx_count) {
|
||||
source_keypair_chunks.push(chunk[..tx_count].iter().collect());
|
||||
dest_keypair_chunks.push(chunk[tx_count..].iter().collect());
|
||||
}
|
||||
|
||||
let first_tx_count = loop {
|
||||
match client.get_transaction_count() {
|
||||
Ok(count) => break count,
|
||||
Err(err) => {
|
||||
info!("Couldn't get transaction count: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
};
|
||||
info!("Initial transaction count {}", first_tx_count);
|
||||
|
||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
let sample_thread = create_sampler_thread(&client, &exit_signal, sample_period, &maxes);
|
||||
|
||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let blockhash_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let recent_blockhash = recent_blockhash.clone();
|
||||
let client = client.clone();
|
||||
let id = id.pubkey();
|
||||
Builder::new()
|
||||
.name("solana-blockhash-poller".to_string())
|
||||
.spawn(move || {
|
||||
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let s_threads = create_sender_threads(
|
||||
&client,
|
||||
&shared_txs,
|
||||
thread_batch_sleep_ms,
|
||||
&total_tx_sent_count,
|
||||
threads,
|
||||
&exit_signal,
|
||||
&shared_tx_active_thread_count,
|
||||
);
|
||||
|
||||
wait_for_target_slots_per_epoch(target_slots_per_epoch, &client);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
generate_chunked_transfers(
|
||||
recent_blockhash,
|
||||
&shared_txs,
|
||||
shared_tx_active_thread_count,
|
||||
source_keypair_chunks,
|
||||
&mut dest_keypair_chunks,
|
||||
threads,
|
||||
duration,
|
||||
sustained,
|
||||
libra_args,
|
||||
);
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
exit_signal.store(true, Ordering::Relaxed);
|
||||
@ -563,11 +653,9 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||
.par_iter()
|
||||
.map(|(k, t)| {
|
||||
let tx = Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
||||
&k.pubkey(),
|
||||
&t,
|
||||
));
|
||||
(*k, tx)
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(*k, Transaction::new_unsigned(message))
|
||||
})
|
||||
.collect();
|
||||
make_txs.stop();
|
||||
@ -603,7 +691,9 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let too_many_failures = Arc::new(AtomicBool::new(false));
|
||||
let loops = if starting_txs < 1000 { 3 } else { 1 };
|
||||
// Only loop multiple times for small (quick) transaction batches
|
||||
let time = Arc::new(Mutex::new(Instant::now()));
|
||||
for _ in 0..loops {
|
||||
let time = time.clone();
|
||||
let failed_verify = Arc::new(AtomicUsize::new(0));
|
||||
let client = client.clone();
|
||||
let verified_txs = &verified_txs;
|
||||
@ -634,11 +724,15 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
}
|
||||
if remaining_count % 100 == 0 {
|
||||
info!(
|
||||
"Verifying transfers... {} remaining, {} verified, {} failures",
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
if remaining_count > 0 {
|
||||
let mut time_l = time.lock().unwrap();
|
||||
if time_l.elapsed().as_secs() > 2 {
|
||||
info!(
|
||||
"Verifying transfers... {} remaining, {} verified, {} failures",
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
*time_l = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
verified
|
||||
@ -876,7 +970,7 @@ fn fund_move_keys<T: Client>(
|
||||
let libra_funding_key = Keypair::new();
|
||||
let tx = librapay_transaction::create_account(funding_key, &libra_funding_key, 1, blockhash);
|
||||
client
|
||||
.send_message(&[funding_key, &libra_funding_key], tx.message)
|
||||
.send_and_confirm_message(&[funding_key, &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("minting to funding keypair");
|
||||
@ -889,7 +983,7 @@ fn fund_move_keys<T: Client>(
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_message(&[funding_key, libra_genesis_key], tx.message)
|
||||
.send_and_confirm_message(&[funding_key, libra_genesis_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("creating {} move accounts...", keypairs.len());
|
||||
@ -911,7 +1005,7 @@ fn fund_move_keys<T: Client>(
|
||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
||||
let mut keys = vec![funding_key];
|
||||
keys.extend(&keypairs);
|
||||
client.send_message(&keys, tx.message).unwrap();
|
||||
client.send_and_confirm_message(&keys, tx.message).unwrap();
|
||||
|
||||
if i % 10 == 0 {
|
||||
info!(
|
||||
@ -929,12 +1023,12 @@ fn fund_move_keys<T: Client>(
|
||||
.iter()
|
||||
.map(|key| (key.pubkey(), total / NUM_FUNDING_KEYS as u64))
|
||||
.collect();
|
||||
let tx = Transaction::new_signed_instructions(
|
||||
&[funding_key],
|
||||
system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts),
|
||||
blockhash,
|
||||
);
|
||||
client.send_message(&[funding_key], tx.message).unwrap();
|
||||
let instructions = system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts);
|
||||
let message = Message::new(&instructions, Some(&funding_key.pubkey()));
|
||||
let tx = Transaction::new(&[funding_key], message, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key], tx.message)
|
||||
.unwrap();
|
||||
let mut balance = 0;
|
||||
for _ in 0..20 {
|
||||
if let Ok(balance_) = client
|
||||
@ -957,7 +1051,7 @@ fn fund_move_keys<T: Client>(
|
||||
for (i, key) in libra_funding_keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::create_account(&funding_keys[i], &key, 1, blockhash);
|
||||
client
|
||||
.send_message(&[&funding_keys[i], &key], tx.message)
|
||||
.send_and_confirm_message(&[&funding_keys[i], &key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
let tx = librapay_transaction::transfer(
|
||||
@ -970,7 +1064,7 @@ fn fund_move_keys<T: Client>(
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.send_and_confirm_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("funded libra funding key {}", i);
|
||||
|
@ -25,6 +25,7 @@ pub struct Config {
|
||||
pub multi_client: bool,
|
||||
pub use_move: bool,
|
||||
pub num_lamports_per_account: u64,
|
||||
pub target_slots_per_epoch: u64,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -47,6 +48,7 @@ impl Default for Config {
|
||||
multi_client: true,
|
||||
use_move: false,
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
target_slots_per_epoch: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -172,6 +174,15 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
"Number of lamports per account.",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_slots_per_epoch")
|
||||
.long("target-slots-per-epoch")
|
||||
.value_name("SLOTS")
|
||||
.takes_value(true)
|
||||
.help(
|
||||
"Wait until epochs are this many slots long.",
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
/// Parses a clap `ArgMatches` structure into a `Config`
|
||||
@ -259,5 +270,12 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");
|
||||
}
|
||||
|
||||
if let Some(t) = matches.value_of("target_slots_per_epoch") {
|
||||
args.target_slots_per_epoch = t
|
||||
.to_string()
|
||||
.parse()
|
||||
.expect("can't parse target slots per epoch");
|
||||
}
|
||||
|
||||
args
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ fn main() {
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
solana_metrics::set_panic_hook("bench-tps");
|
||||
|
||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
||||
let cli::Config {
|
||||
@ -67,11 +67,10 @@ fn main() {
|
||||
}
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let (nodes, _archivers) =
|
||||
discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.1.0"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha_cuda"
|
@ -1,280 +0,0 @@
|
||||
// Module used by validators to approve storage mining proofs in parallel using the GPU
|
||||
|
||||
use solana_chacha::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_perf::perf_libs;
|
||||
use solana_sdk::hash::Hash;
|
||||
use std::io;
|
||||
use std::mem::size_of;
|
||||
use std::sync::Arc;
|
||||
|
||||
// Encrypt a file with multiple starting IV states, determined by ivecs.len()
|
||||
//
|
||||
// Then sample each block at the offsets provided by samples argument with sha256
|
||||
// and return the vec of sha states
|
||||
pub fn chacha_cbc_encrypt_file_many_keys(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
segment: u64,
|
||||
slots_per_segment: u64,
|
||||
ivecs: &mut [u8],
|
||||
samples: &[u64],
|
||||
) -> io::Result<Vec<Hash>> {
|
||||
let api = perf_libs::api().expect("no perf libs");
|
||||
if ivecs.len() % CHACHA_BLOCK_SIZE != 0 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"bad IV length({}) not divisible by {} ",
|
||||
ivecs.len(),
|
||||
CHACHA_BLOCK_SIZE,
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
const BUFFER_SIZE: usize = 8 * 1024;
|
||||
let mut buffer = [0; BUFFER_SIZE];
|
||||
let num_keys = ivecs.len() / CHACHA_BLOCK_SIZE;
|
||||
let mut sha_states = vec![0; num_keys * size_of::<Hash>()];
|
||||
let mut int_sha_states = vec![0; num_keys * 112];
|
||||
let keys: Vec<u8> = vec![0; num_keys * CHACHA_KEY_SIZE]; // keys not used ATM, uniqueness comes from IV
|
||||
let mut current_slot = segment * slots_per_segment;
|
||||
let mut start_index = 0;
|
||||
let start_slot = current_slot;
|
||||
let mut total_size = 0;
|
||||
let mut time: f32 = 0.0;
|
||||
unsafe {
|
||||
(api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32);
|
||||
}
|
||||
loop {
|
||||
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
|
||||
Ok((last_index, mut size)) => {
|
||||
debug!(
|
||||
"chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}",
|
||||
segment,
|
||||
last_index.saturating_sub(start_index),
|
||||
size
|
||||
);
|
||||
|
||||
if size == 0 {
|
||||
if current_slot.saturating_sub(start_slot) < slots_per_segment {
|
||||
current_slot += 1;
|
||||
start_index = 0;
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if size < BUFFER_SIZE {
|
||||
// round to the nearest key_size boundary
|
||||
size = (size + CHACHA_KEY_SIZE - 1) & !(CHACHA_KEY_SIZE - 1);
|
||||
}
|
||||
|
||||
unsafe {
|
||||
(api.chacha_cbc_encrypt_many_sample)(
|
||||
buffer[..size].as_ptr(),
|
||||
int_sha_states.as_mut_ptr(),
|
||||
size,
|
||||
keys.as_ptr(),
|
||||
ivecs.as_mut_ptr(),
|
||||
num_keys as u32,
|
||||
samples.as_ptr(),
|
||||
samples.len() as u32,
|
||||
total_size,
|
||||
&mut time,
|
||||
);
|
||||
}
|
||||
|
||||
total_size += size as u64;
|
||||
start_index = last_index + 1;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Error encrypting file: {:?}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
unsafe {
|
||||
(api.chacha_end_sha_state)(
|
||||
int_sha_states.as_ptr(),
|
||||
sha_states.as_mut_ptr(),
|
||||
num_keys as u32,
|
||||
);
|
||||
}
|
||||
let mut res = Vec::new();
|
||||
for x in 0..num_keys {
|
||||
let start = x * size_of::<Hash>();
|
||||
let end = start + size_of::<Hash>();
|
||||
res.push(Hash::new(&sha_states[start..end]));
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_archiver_utils::sample_file;
|
||||
use solana_chacha::chacha::chacha_cbc_encrypt_ledger;
|
||||
use solana_ledger::entry::create_ticks;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::clock::DEFAULT_SLOTS_PER_SEGMENT;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use std::fs::{remove_dir_all, remove_file};
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_file_many_keys_single() {
|
||||
solana_logger::setup();
|
||||
if perf_libs::api().is_none() {
|
||||
info!("perf-libs unavailable, skipped");
|
||||
return;
|
||||
}
|
||||
|
||||
let slots_per_segment = 32;
|
||||
let entries = create_ticks(slots_per_segment, 0, Hash::default());
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ticks_per_slot = 16;
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
blockstore
|
||||
.write_entries(
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
ticks_per_slot,
|
||||
Some(0),
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let out_path = Path::new("test_chacha_encrypt_file_many_keys_single_output.txt.enc");
|
||||
|
||||
let samples = [0];
|
||||
let mut ivecs = hex!(
|
||||
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
|
||||
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
|
||||
);
|
||||
|
||||
let mut cpu_iv = ivecs.clone();
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blockstore,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
out_path,
|
||||
&mut cpu_iv,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let ref_hash = sample_file(&out_path, &samples).unwrap();
|
||||
|
||||
let hashes = chacha_cbc_encrypt_file_many_keys(
|
||||
&blockstore,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
&mut ivecs,
|
||||
&samples,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(hashes[0], ref_hash);
|
||||
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
let _ignored = remove_file(out_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_file_many_keys_multiple_keys() {
|
||||
solana_logger::setup();
|
||||
if perf_libs::api().is_none() {
|
||||
info!("perf-libs unavailable, skipped");
|
||||
return;
|
||||
}
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ticks_per_slot = 90;
|
||||
let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
blockstore
|
||||
.write_entries(
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
ticks_per_slot,
|
||||
Some(0),
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let out_path = Path::new("test_chacha_encrypt_file_many_keys_multiple_output.txt.enc");
|
||||
|
||||
let samples = [0, 1, 3, 4, 5, 150];
|
||||
let mut ivecs = Vec::new();
|
||||
let mut ref_hashes: Vec<Hash> = vec![];
|
||||
for i in 0..2 {
|
||||
let mut ivec = hex!(
|
||||
"abc123abc123abc123abc123abc123abc123abababababababababababababab
|
||||
abc123abc123abc123abc123abc123abc123abababababababababababababab"
|
||||
);
|
||||
ivec[0] = i;
|
||||
ivecs.extend(ivec.clone().iter());
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blockstore.clone(),
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
out_path,
|
||||
&mut ivec,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
ref_hashes.push(sample_file(&out_path, &samples).unwrap());
|
||||
info!(
|
||||
"ivec: {:?} hash: {:?} ivecs: {:?}",
|
||||
ivec.to_vec(),
|
||||
ref_hashes.last(),
|
||||
ivecs
|
||||
);
|
||||
}
|
||||
|
||||
let hashes = chacha_cbc_encrypt_file_many_keys(
|
||||
&blockstore,
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
&mut ivecs,
|
||||
&samples,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(hashes, ref_hashes);
|
||||
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
let _ignored = remove_file(out_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_file_many_keys_bad_key_length() {
|
||||
solana_logger::setup();
|
||||
if perf_libs::api().is_none() {
|
||||
info!("perf-libs unavailable, skipped");
|
||||
return;
|
||||
}
|
||||
|
||||
let mut keys = hex!("abc123");
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let samples = [0];
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
assert!(chacha_cbc_encrypt_file_many_keys(
|
||||
&blockstore,
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
&mut keys,
|
||||
&samples,
|
||||
)
|
||||
.is_err());
|
||||
}
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate hex_literal;
|
||||
|
||||
pub mod chacha_cuda;
|
@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.1.0"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0.49"
|
@ -1,8 +0,0 @@
|
||||
extern crate cc;
|
||||
|
||||
fn main() {
|
||||
cc::Build::new()
|
||||
.file("cpu-crypt/chacha20_core.c")
|
||||
.file("cpu-crypt/chacha_cbc.c")
|
||||
.compile("libcpu-crypt");
|
||||
}
|
1
chacha-sys/cpu-crypt/.gitignore
vendored
1
chacha-sys/cpu-crypt/.gitignore
vendored
@ -1 +0,0 @@
|
||||
release/
|
@ -1,25 +0,0 @@
|
||||
V:=debug
|
||||
|
||||
LIB:=cpu-crypt
|
||||
|
||||
CFLAGS_common:=-Wall -Werror -pedantic -fPIC
|
||||
CFLAGS_release:=-march=native -O3 $(CFLAGS_common)
|
||||
CFLAGS_debug:=-g $(CFLAGS_common)
|
||||
CFLAGS:=$(CFLAGS_$V)
|
||||
|
||||
all: $V/lib$(LIB).a
|
||||
|
||||
$V/chacha20_core.o: chacha20_core.c chacha.h
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
|
||||
$V/chacha_cbc.o: chacha_cbc.c chacha.h
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
|
||||
$V/lib$(LIB).a: $V/chacha20_core.o $V/chacha_cbc.o
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
.PHONY:clean
|
||||
clean:
|
||||
rm -rf $V
|
@ -1,35 +0,0 @@
|
||||
#ifndef HEADER_CHACHA_H
|
||||
# define HEADER_CHACHA_H
|
||||
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
# include <stddef.h>
|
||||
# ifdef __cplusplus
|
||||
extern "C" {
|
||||
# endif
|
||||
|
||||
typedef unsigned int u32;
|
||||
|
||||
#define CHACHA_KEY_SIZE 32
|
||||
#define CHACHA_NONCE_SIZE 12
|
||||
#define CHACHA_BLOCK_SIZE 64
|
||||
#define CHACHA_ROUNDS 500
|
||||
|
||||
void chacha20_encrypt(const u32 input[16],
|
||||
unsigned char output[64],
|
||||
int num_rounds);
|
||||
|
||||
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||
const uint8_t key[CHACHA_KEY_SIZE], const uint8_t nonce[CHACHA_NONCE_SIZE],
|
||||
uint32_t counter);
|
||||
|
||||
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
||||
uint32_t len, const uint8_t* key,
|
||||
unsigned char* ivec);
|
||||
|
||||
|
||||
# ifdef __cplusplus
|
||||
}
|
||||
# endif
|
||||
|
||||
#endif
|
@ -1,102 +0,0 @@
|
||||
#include "chacha.h"
|
||||
|
||||
#define ROTL32(v, n) (((v) << (n)) | ((v) >> (32 - (n))))
|
||||
|
||||
#define ROTATE(v, c) ROTL32((v), (c))
|
||||
|
||||
#define XOR(v, w) ((v) ^ (w))
|
||||
|
||||
#define PLUS(x, y) ((x) + (y))
|
||||
|
||||
#define U32TO8_LITTLE(p, v) \
|
||||
{ (p)[0] = ((v) ) & 0xff; (p)[1] = ((v) >> 8) & 0xff; \
|
||||
(p)[2] = ((v) >> 16) & 0xff; (p)[3] = ((v) >> 24) & 0xff; }
|
||||
|
||||
#define U8TO32_LITTLE(p) \
|
||||
(((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
|
||||
((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
|
||||
|
||||
#define QUARTERROUND(a,b,c,d) \
|
||||
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \
|
||||
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \
|
||||
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \
|
||||
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7);
|
||||
|
||||
// sigma contains the ChaCha constants, which happen to be an ASCII string.
|
||||
static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3',
|
||||
'2', '-', 'b', 'y', 't', 'e', ' ', 'k' };
|
||||
|
||||
void chacha20_encrypt(const u32 input[16],
|
||||
unsigned char output[64],
|
||||
int num_rounds)
|
||||
{
|
||||
u32 x[16];
|
||||
int i;
|
||||
memcpy(x, input, sizeof(u32) * 16);
|
||||
for (i = num_rounds; i > 0; i -= 2) {
|
||||
QUARTERROUND( 0, 4, 8,12)
|
||||
QUARTERROUND( 1, 5, 9,13)
|
||||
QUARTERROUND( 2, 6,10,14)
|
||||
QUARTERROUND( 3, 7,11,15)
|
||||
QUARTERROUND( 0, 5,10,15)
|
||||
QUARTERROUND( 1, 6,11,12)
|
||||
QUARTERROUND( 2, 7, 8,13)
|
||||
QUARTERROUND( 3, 4, 9,14)
|
||||
}
|
||||
for (i = 0; i < 16; ++i) {
|
||||
x[i] = PLUS(x[i], input[i]);
|
||||
}
|
||||
for (i = 0; i < 16; ++i) {
|
||||
U32TO8_LITTLE(output + 4 * i, x[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||
const uint8_t key[CHACHA_KEY_SIZE],
|
||||
const uint8_t nonce[CHACHA_NONCE_SIZE],
|
||||
uint32_t counter)
|
||||
{
|
||||
uint32_t input[16];
|
||||
uint8_t buf[64];
|
||||
size_t todo, i;
|
||||
|
||||
input[0] = U8TO32_LITTLE(sigma + 0);
|
||||
input[1] = U8TO32_LITTLE(sigma + 4);
|
||||
input[2] = U8TO32_LITTLE(sigma + 8);
|
||||
input[3] = U8TO32_LITTLE(sigma + 12);
|
||||
|
||||
input[4] = U8TO32_LITTLE(key + 0);
|
||||
input[5] = U8TO32_LITTLE(key + 4);
|
||||
input[6] = U8TO32_LITTLE(key + 8);
|
||||
input[7] = U8TO32_LITTLE(key + 12);
|
||||
|
||||
input[8] = U8TO32_LITTLE(key + 16);
|
||||
input[9] = U8TO32_LITTLE(key + 20);
|
||||
input[10] = U8TO32_LITTLE(key + 24);
|
||||
input[11] = U8TO32_LITTLE(key + 28);
|
||||
|
||||
input[12] = counter;
|
||||
input[13] = U8TO32_LITTLE(nonce + 0);
|
||||
input[14] = U8TO32_LITTLE(nonce + 4);
|
||||
input[15] = U8TO32_LITTLE(nonce + 8);
|
||||
|
||||
while (in_len > 0) {
|
||||
todo = sizeof(buf);
|
||||
if (in_len < todo) {
|
||||
todo = in_len;
|
||||
}
|
||||
|
||||
chacha20_encrypt(input, buf, 20);
|
||||
for (i = 0; i < todo; i++) {
|
||||
out[i] = in[i] ^ buf[i];
|
||||
}
|
||||
|
||||
out += todo;
|
||||
in += todo;
|
||||
in_len -= todo;
|
||||
|
||||
input[12]++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,72 +0,0 @@
|
||||
#include "chacha.h"
|
||||
|
||||
#if !defined(STRICT_ALIGNMENT) && !defined(PEDANTIC)
|
||||
# define STRICT_ALIGNMENT 0
|
||||
#endif
|
||||
|
||||
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
||||
uint32_t len, const uint8_t* key,
|
||||
unsigned char* ivec)
|
||||
{
|
||||
size_t n;
|
||||
unsigned char *iv = ivec;
|
||||
(void)key;
|
||||
|
||||
if (len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if !defined(OPENSSL_SMALL_FOOTPRINT)
|
||||
if (STRICT_ALIGNMENT &&
|
||||
((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
|
||||
while (len >= CHACHA_BLOCK_SIZE) {
|
||||
for (n = 0; n < CHACHA_BLOCK_SIZE; ++n) {
|
||||
out[n] = in[n] ^ iv[n];
|
||||
//printf("%x ", out[n]);
|
||||
}
|
||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||
iv = out;
|
||||
len -= CHACHA_BLOCK_SIZE;
|
||||
in += CHACHA_BLOCK_SIZE;
|
||||
out += CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
} else {
|
||||
while (len >= CHACHA_BLOCK_SIZE) {
|
||||
for (n = 0; n < CHACHA_BLOCK_SIZE; n += sizeof(size_t)) {
|
||||
*(size_t *)(out + n) =
|
||||
*(size_t *)(in + n) ^ *(size_t *)(iv + n);
|
||||
//printf("%zu ", *(size_t *)(iv + n));
|
||||
}
|
||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||
iv = out;
|
||||
len -= CHACHA_BLOCK_SIZE;
|
||||
in += CHACHA_BLOCK_SIZE;
|
||||
out += CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
while (len) {
|
||||
for (n = 0; n < CHACHA_BLOCK_SIZE && n < len; ++n) {
|
||||
out[n] = in[n] ^ iv[n];
|
||||
}
|
||||
for (; n < CHACHA_BLOCK_SIZE; ++n) {
|
||||
out[n] = iv[n];
|
||||
}
|
||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||
iv = out;
|
||||
if (len <= CHACHA_BLOCK_SIZE) {
|
||||
break;
|
||||
}
|
||||
len -= CHACHA_BLOCK_SIZE;
|
||||
in += CHACHA_BLOCK_SIZE;
|
||||
out += CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
memcpy(ivec, iv, CHACHA_BLOCK_SIZE);
|
||||
|
||||
}
|
||||
|
||||
void chacha20_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||
const uint8_t key[CHACHA_KEY_SIZE], uint8_t* ivec)
|
||||
{
|
||||
chacha20_cbc128_encrypt(in, out, in_len, key, ivec);
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
extern "C" {
|
||||
fn chacha20_cbc_encrypt(
|
||||
input: *const u8,
|
||||
output: *mut u8,
|
||||
in_len: usize,
|
||||
key: *const u8,
|
||||
ivec: *mut u8,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) {
|
||||
unsafe {
|
||||
chacha20_cbc_encrypt(
|
||||
input.as_ptr(),
|
||||
output.as_mut_ptr(),
|
||||
input.len(),
|
||||
key.as_ptr(),
|
||||
ivec.as_mut_ptr(),
|
||||
);
|
||||
}
|
||||
}
|
1
chacha/.gitignore
vendored
1
chacha/.gitignore
vendored
@ -1 +0,0 @@
|
||||
/farf/
|
@ -1,25 +0,0 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "1.1.0"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha"
|
@ -1,185 +0,0 @@
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use solana_chacha_sys::chacha_cbc_encrypt;
|
||||
|
||||
pub const CHACHA_BLOCK_SIZE: usize = 64;
|
||||
pub const CHACHA_KEY_SIZE: usize = 32;
|
||||
|
||||
pub fn chacha_cbc_encrypt_ledger(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
start_slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
out_path: &Path,
|
||||
ivec: &mut [u8; CHACHA_BLOCK_SIZE],
|
||||
) -> io::Result<usize> {
|
||||
let mut out_file =
|
||||
BufWriter::new(File::create(out_path).expect("Can't open ledger encrypted data file"));
|
||||
const BUFFER_SIZE: usize = 8 * 1024;
|
||||
let mut buffer = [0; BUFFER_SIZE];
|
||||
let mut encrypted_buffer = [0; BUFFER_SIZE];
|
||||
let key = [0; CHACHA_KEY_SIZE];
|
||||
let mut total_size = 0;
|
||||
let mut current_slot = start_slot;
|
||||
let mut start_index = 0;
|
||||
loop {
|
||||
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
|
||||
Ok((last_index, mut size)) => {
|
||||
debug!(
|
||||
"chacha: encrypting slice: {} num_shreds: {} data_len: {}",
|
||||
current_slot,
|
||||
last_index.saturating_sub(start_index),
|
||||
size
|
||||
);
|
||||
debug!("read {} bytes", size);
|
||||
|
||||
if size == 0 {
|
||||
if current_slot.saturating_sub(start_slot) < slots_per_segment {
|
||||
current_slot += 1;
|
||||
start_index = 0;
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if size < BUFFER_SIZE {
|
||||
// round to the nearest key_size boundary
|
||||
size = (size + CHACHA_KEY_SIZE - 1) & !(CHACHA_KEY_SIZE - 1);
|
||||
}
|
||||
total_size += size;
|
||||
|
||||
chacha_cbc_encrypt(&buffer[..size], &mut encrypted_buffer[..size], &key, ivec);
|
||||
if let Err(res) = out_file.write(&encrypted_buffer[..size]) {
|
||||
warn!("Error writing file! {:?}", res);
|
||||
return Err(res);
|
||||
}
|
||||
|
||||
start_index = last_index + 1;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Error encrypting file: {:?}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(total_size)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::chacha::chacha_cbc_encrypt_ledger;
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::hash::{hash, Hash, Hasher};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_transaction;
|
||||
use std::fs::remove_file;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn make_tiny_deterministic_test_entries(num: usize) -> Vec<Entry> {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
|
||||
let seed = [2u8; 32];
|
||||
|
||||
let mut generator = ChaChaRng::from_seed(seed);
|
||||
let keypair = Keypair::generate(&mut generator);
|
||||
|
||||
let mut id = one;
|
||||
let mut num_hashes = 0;
|
||||
(0..num)
|
||||
.map(|_| {
|
||||
Entry::new_mut(
|
||||
&mut id,
|
||||
&mut num_hashes,
|
||||
vec![system_transaction::transfer(
|
||||
&keypair,
|
||||
&keypair.pubkey(),
|
||||
1,
|
||||
one,
|
||||
)],
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
use std::{env, fs::create_dir_all, path::PathBuf};
|
||||
fn tmp_file_path(name: &str) -> PathBuf {
|
||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let mut path = PathBuf::new();
|
||||
path.push(out_dir);
|
||||
path.push("tmp");
|
||||
create_dir_all(&path).unwrap();
|
||||
|
||||
path.push(format!("{}-{}", name, Pubkey::new_rand()));
|
||||
path
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_ledger() {
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ticks_per_slot = 16;
|
||||
let slots_per_segment = 32;
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let out_path = tmp_file_path("test_encrypt_ledger");
|
||||
|
||||
let seed = [2u8; 32];
|
||||
|
||||
let mut generator = ChaChaRng::from_seed(seed);
|
||||
let keypair = Keypair::generate(&mut generator);
|
||||
|
||||
let entries = make_tiny_deterministic_test_entries(slots_per_segment);
|
||||
blockstore
|
||||
.write_entries(
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
ticks_per_slot,
|
||||
None,
|
||||
true,
|
||||
&Arc::new(keypair),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut key = hex!(
|
||||
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
|
||||
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
|
||||
);
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blockstore,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
&out_path,
|
||||
&mut key,
|
||||
)
|
||||
.unwrap();
|
||||
let mut out_file = File::open(&out_path).unwrap();
|
||||
let mut buf = vec![];
|
||||
let size = out_file.read_to_end(&mut buf).unwrap();
|
||||
let mut hasher = Hasher::default();
|
||||
hasher.hash(&buf[..size]);
|
||||
|
||||
// golden needs to be updated if shred structure changes....
|
||||
let golden: Hash = "2rq8nR6rns2T5zsbQAGBDZb41NVtacneLgkCH17CVxZm"
|
||||
.parse()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(hasher.result(), golden);
|
||||
remove_file(&out_path).unwrap();
|
||||
}
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate hex_literal;
|
||||
|
||||
pub mod chacha;
|
64
ci/README.md
64
ci/README.md
@ -2,7 +2,7 @@
|
||||
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
|
||||
additional GitHub integration provided by https://github.com/mvines/ci-gate
|
||||
|
||||
## Agent Queues
|
||||
# Agent Queues
|
||||
|
||||
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
|
||||
`queue=default` and `queue=cuda`. The `default` queue should be favored and
|
||||
@ -12,9 +12,52 @@ be run on the `default` queue, and the [buildkite artifact
|
||||
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
|
||||
products over to a GPU instance for testing.
|
||||
|
||||
## Buildkite Agent Management
|
||||
# Buildkite Agent Management
|
||||
|
||||
### Buildkite Azure Setup
|
||||
## Manual Node Setup for Colocated Hardware
|
||||
|
||||
This section describes how to set up a new machine that does not have a
|
||||
pre-configured image with all the requirements installed. Used for custom-built
|
||||
hardware at a colocation or office facility. Also works for vanilla Ubuntu cloud
|
||||
instances.
|
||||
|
||||
### Pre-Requisites
|
||||
|
||||
- Install Ubuntu 18.04 LTS Server
|
||||
- Log in as a local or remote user with `sudo` privileges
|
||||
|
||||
### Install Core Requirements
|
||||
|
||||
##### Non-GPU enabled machines
|
||||
```bash
|
||||
sudo ./setup-new-buildkite-agent/setup-new-machine.sh
|
||||
```
|
||||
|
||||
##### GPU-enabled machines
|
||||
- 1 or more NVIDIA GPUs should be installed in the machine (tested with 2080Ti)
|
||||
```bash
|
||||
sudo CUDA=1 ./setup-new-buildkite-agent/setup-new-machine.sh
|
||||
```
|
||||
|
||||
### Configure Node for Buildkite-agent based CI
|
||||
|
||||
- Install `buildkite-agent` and set up it user environment with:
|
||||
```bash
|
||||
sudo ./setup-new-buildkite-agent/setup-buildkite.sh
|
||||
```
|
||||
- Copy the pubkey contents from `~buildkite-agent/.ssh/id_ecdsa.pub` and
|
||||
add the pubkey as an authorized SSH key on github.
|
||||
- Edit `/etc/buildkite-agent/buildkite-agent.cfg` and/or `/etc/systemd/system/buildkite-agent@*` to the desired configuration of the agent(s)
|
||||
- Copy `ejson` keys from another CI node at `/opt/ejson/keys/`
|
||||
to the same location on the new node.
|
||||
- Start the new agent(s) with `sudo systemctl enable --now buildkite-agent`
|
||||
|
||||
# Reference
|
||||
|
||||
This section contains details regarding previous CI setups that have been used,
|
||||
and that we may return to one day.
|
||||
|
||||
## Buildkite Azure Setup
|
||||
|
||||
Create a new Azure-based "queue=default" agent by running the following command:
|
||||
```
|
||||
@ -35,7 +78,7 @@ Creating a "queue=cuda" agent follows the same process but additionally:
|
||||
2. Edit the tags field in /etc/buildkite-agent/buildkite-agent.cfg to `tags="queue=cuda,queue=default"`
|
||||
and decrease the value of the priority field by one
|
||||
|
||||
#### Updating the CI Disk Image
|
||||
### Updating the CI Disk Image
|
||||
|
||||
1. Create a new VM Instance as described above
|
||||
1. Modify it as required
|
||||
@ -48,12 +91,7 @@ Creating a "queue=cuda" agent follows the same process but additionally:
|
||||
1. Goto the `ci` resource group in the Azure portal and remove all resources
|
||||
with the XYZ name in them
|
||||
|
||||
## Reference
|
||||
|
||||
This section contains details regarding previous CI setups that have been used,
|
||||
and that we may return to one day.
|
||||
|
||||
### Buildkite AWS CloudFormation Setup
|
||||
## Buildkite AWS CloudFormation Setup
|
||||
|
||||
**AWS CloudFormation is currently inactive, although it may be restored in the
|
||||
future**
|
||||
@ -62,7 +100,7 @@ AWS CloudFormation can be used to scale machines up and down based on the
|
||||
current CI load. If no machine is currently running it can take up to 60
|
||||
seconds to spin up a new instance, please remain calm during this time.
|
||||
|
||||
#### AMI
|
||||
### AMI
|
||||
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
|
||||
|
||||
Use the following process to update this AMI as dependencies change:
|
||||
@ -84,13 +122,13 @@ The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
|
||||
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
|
||||
*apply* the stack changes.
|
||||
|
||||
### Buildkite GCP Setup
|
||||
## Buildkite GCP Setup
|
||||
|
||||
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
|
||||
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
|
||||
VM Instances in each group is manually adjusted.
|
||||
|
||||
#### Updating a CI Disk Image
|
||||
### Updating a CI Disk Image
|
||||
|
||||
Each Instance group has its own disk image, `ci-default-vX` and
|
||||
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
|
||||
|
259
ci/buildkite-pipeline.sh
Executable file
259
ci/buildkite-pipeline.sh
Executable file
@ -0,0 +1,259 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Builds a buildkite pipeline based on the environment variables
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
output_file=${1:-/dev/stderr}
|
||||
|
||||
if [[ -n $CI_PULL_REQUEST ]]; then
|
||||
IFS=':' read -ra affected_files <<< "$(buildkite-agent meta-data get affected_files)"
|
||||
if [[ ${#affected_files[*]} -eq 0 ]]; then
|
||||
echo "Unable to determine the files affected by this PR"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
affected_files=()
|
||||
fi
|
||||
|
||||
annotate() {
|
||||
if [[ -n $BUILDKITE ]]; then
|
||||
buildkite-agent annotate "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks if a CI pull request affects one or more path patterns. Each
|
||||
# pattern argument is checked in series. If one of them found to be affected,
|
||||
# return immediately as such.
|
||||
#
|
||||
# Bash regular expressions are permitted in the pattern:
|
||||
# affects .rs$ -- any file or directory ending in .rs
|
||||
# affects .rs -- also matches foo.rs.bar
|
||||
# affects ^snap/ -- anything under the snap/ subdirectory
|
||||
# affects snap/ -- also matches foo/snap/
|
||||
# Any pattern starting with the ! character will be negated:
|
||||
# affects !^docs/ -- anything *not* under the docs/ subdirectory
|
||||
#
|
||||
affects() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
# Assume everyting needs to be tested when any Dockerfile changes
|
||||
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
|
||||
if [[ ${pattern:0:1} = "!" ]]; then
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ ! $file =~ ${pattern:1} ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
else
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
# Checks if a CI pull request affects anything other than the provided path patterns
|
||||
#
|
||||
# Syntax is the same as `affects()` except that the negation prefix is not
|
||||
# supported
|
||||
#
|
||||
affects_other_than() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
|
||||
for file in "${affected_files[@]}"; do
|
||||
declare matched=false
|
||||
for pattern in "$@"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
matched=true
|
||||
fi
|
||||
done
|
||||
if ! $matched; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
start_pipeline() {
|
||||
echo "# $*" > "$output_file"
|
||||
echo "steps:" >> "$output_file"
|
||||
}
|
||||
|
||||
command_step() {
|
||||
cat >> "$output_file" <<EOF
|
||||
- name: "$1"
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
trigger_secondary_step() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
||||
EOF
|
||||
}
|
||||
|
||||
wait_step() {
|
||||
echo " - wait" >> "$output_file"
|
||||
}
|
||||
|
||||
all_test_steps() {
|
||||
command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20
|
||||
wait_step
|
||||
|
||||
# Coverage...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
"Coverage skipped as no .rs files were modified"
|
||||
fi
|
||||
|
||||
# Full test suite
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-perf skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^ci/test-bench.sh \
|
||||
; then
|
||||
command_step bench "ci/test-bench.sh" 30
|
||||
else
|
||||
annotate --style info --context test-bench \
|
||||
"Bench skipped as no .rs files were modified"
|
||||
fi
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
45
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
command_step sanity "ci/test-sanity.sh" 5
|
||||
wait_step
|
||||
|
||||
# Check for any .sh file changes
|
||||
if affects .sh$; then
|
||||
command_step shellcheck "ci/shellcheck.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
|
||||
# Run the full test suite by default, skipping only if modifications are local
|
||||
# to some particular areas of the tree
|
||||
if affects_other_than ^.buildkite ^.travis .md$ ^docs/ ^web3.js/ ^explorer/ ^.gitbook; then
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# doc/ changes:
|
||||
if affects ^docs/; then
|
||||
command_step docs ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image docs/build.sh" 5
|
||||
fi
|
||||
|
||||
# web3.js and explorer changes run on Travis...
|
||||
}
|
||||
|
||||
|
||||
if [[ -n $BUILDKITE_TAG ]]; then
|
||||
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
|
||||
|
||||
annotate --style info --context release-tag \
|
||||
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
||||
|
||||
# Jump directly to the secondary build to publish release artifacts quickly
|
||||
trigger_secondary_step
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
echo "+++ Affected files in this PR"
|
||||
for file in "${affected_files[@]}"; do
|
||||
echo "- $file"
|
||||
done
|
||||
|
||||
start_pipeline "Pull request pipeline for $BUILDKITE_BRANCH"
|
||||
|
||||
# Add helpful link back to the corresponding Github Pull Request
|
||||
annotate --style info --context pr-backlink \
|
||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||
|
||||
if [[ $GITHUB_USER = "dependabot-preview[bot]" ]]; then
|
||||
command_step dependabot "ci/dependabot-pr.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
pull_or_push_steps
|
||||
exit 0
|
||||
fi
|
||||
|
||||
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
|
||||
pull_or_push_steps
|
||||
wait_step
|
||||
trigger_secondary_step
|
||||
exit 0
|
@ -2,6 +2,16 @@
|
||||
# Build steps that run after the primary pipeline on pushes and tags.
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- wait
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
@ -9,12 +19,6 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
# name: "move"
|
||||
# timeout_in_minutes: 20
|
||||
|
26
ci/buildkite-tests.yml
Normal file
26
ci/buildkite-tests.yml
Normal file
@ -0,0 +1,26 @@
|
||||
# These steps are conditionally triggered by ci/buildkite.yml when files
|
||||
# other than those in docs/ are modified
|
||||
|
||||
steps:
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
- wait
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
- wait
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
@ -1,42 +1,35 @@
|
||||
# Build steps that run on pushes and pull requests.
|
||||
# If files other than those in docs/ were modified, this will be followed up by
|
||||
# ci/buildkite-tests.yml
|
||||
#
|
||||
# Release tags use buildkite-release.yml instead
|
||||
|
||||
steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
- command: "ci/test-sanity.sh"
|
||||
name: "sanity"
|
||||
timeout_in_minutes: 5
|
||||
- command: "ci/dependabot-pr.sh"
|
||||
name: "dependabot"
|
||||
timeout_in_minutes: 5
|
||||
if: build.env("GITHUB_USER") == "dependabot-preview[bot]"
|
||||
|
||||
- wait
|
||||
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
|
||||
- wait
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
|
||||
- command: "ci/maybe-trigger-tests.sh"
|
||||
name: "maybe-trigger-tests"
|
||||
timeout_in_minutes: 2
|
||||
|
||||
- wait
|
||||
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
|
36
ci/dependabot-pr.sh
Executable file
36
ci/dependabot-pr.sh
Executable file
@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if ! echo "$BUILDKITE_BRANCH" | grep -E '^pull/[0-9]+/head$'; then
|
||||
echo "not pull request!?" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
|
||||
ci/docker-run.sh $rust_nightly_docker_image ci/dependabot-updater.sh
|
||||
|
||||
if [[ $(git status --short :**/Cargo.lock | wc -l) -eq 0 ]]; then
|
||||
echo --- ok
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo --- "(FAILING) Backpropagating dependabot-triggered Cargo.lock updates"
|
||||
|
||||
name="dependabot-buildkite"
|
||||
api_base="https://api.github.com/repos/solana-labs/solana/pulls"
|
||||
pr_num=$(echo "$BUILDKITE_BRANCH" | grep -Eo '[0-9]+')
|
||||
branch=$(curl -s "$api_base/$pr_num" | python -c 'import json,sys;print json.load(sys.stdin)["head"]["ref"]')
|
||||
|
||||
git add :**/Cargo.lock
|
||||
EMAIL="dependabot-buildkite@noreply.solana.com" \
|
||||
GIT_AUTHOR_NAME="$name" \
|
||||
GIT_COMMITTER_NAME="$name" \
|
||||
git commit -m "[auto-commit] Update all Cargo lock files"
|
||||
git push origin "HEAD:$branch"
|
||||
|
||||
echo "Source branch is updated; failing this build for the next"
|
||||
exit 1
|
35
ci/dependabot-updater.sh
Executable file
35
ci/dependabot-updater.sh
Executable file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
cd "$(dirname "$0")/.."
|
||||
source ci/_
|
||||
|
||||
commit_range="$(git merge-base HEAD origin/master)..HEAD"
|
||||
parsed_update_args="$(
|
||||
git log "$commit_range" --author "dependabot-preview" --oneline -n1 |
|
||||
grep -o 'Bump.*$' |
|
||||
sed -r 's/Bump ([^ ]+) from ([^ ]+) to ([^ ]+)/-p \1:\2 --precise \3/'
|
||||
)"
|
||||
# relaxed_parsed_update_args is temporal measure...
|
||||
relaxed_parsed_update_args="$(
|
||||
git log "$commit_range" --author "dependabot-preview" --oneline -n1 |
|
||||
grep -o 'Bump.*$' |
|
||||
sed -r 's/Bump ([^ ]+) from [^ ]+ to ([^ ]+)/-p \1 --precise \2/'
|
||||
)"
|
||||
package=$(echo "$parsed_update_args" | awk '{print $2}' | grep -o "^[^:]*")
|
||||
if [[ -n $parsed_update_args ]]; then
|
||||
# find other Cargo.lock files and update them, excluding the default Cargo.lock
|
||||
# shellcheck disable=SC2086
|
||||
for lock in $(git grep --files-with-matches '^name = "'$package'"$' :**/Cargo.lock); do
|
||||
# it's possible our current versions are out of sync across lock files,
|
||||
# in that case try to sync them up with $relaxed_parsed_update_args
|
||||
_ scripts/cargo-for-all-lock-files.sh \
|
||||
"$lock" -- \
|
||||
update $parsed_update_args ||
|
||||
_ scripts/cargo-for-all-lock-files.sh \
|
||||
"$lock" -- \
|
||||
update $relaxed_parsed_update_args
|
||||
done
|
||||
fi
|
||||
|
||||
echo --- ok
|
@ -49,7 +49,7 @@ else
|
||||
# ~/.cargo
|
||||
ARGS+=(--volume "$PWD:/home")
|
||||
fi
|
||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
||||
ARGS+=(--env "HOME=/home" --env "CARGO_HOME=/home/.cargo")
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
@ -67,6 +67,7 @@ ARGS+=(
|
||||
--env BUILDKITE_JOB_ID
|
||||
--env CI
|
||||
--env CI_BRANCH
|
||||
--env CI_BASE_BRANCH
|
||||
--env CI_TAG
|
||||
--env CI_BUILD_ID
|
||||
--env CI_COMMIT
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.42.0
|
||||
FROM solanalabs/rust:1.43.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.42.0
|
||||
FROM rust:1.43.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
@ -8,6 +8,7 @@ if [[ -n $CI ]]; then
|
||||
export CI=1
|
||||
if [[ -n $TRAVIS ]]; then
|
||||
export CI_BRANCH=$TRAVIS_BRANCH
|
||||
export CI_BASE_BRANCH=$TRAVIS_BRANCH
|
||||
export CI_BUILD_ID=$TRAVIS_BUILD_ID
|
||||
export CI_COMMIT=$TRAVIS_COMMIT
|
||||
export CI_JOB_ID=$TRAVIS_JOB_ID
|
||||
@ -28,8 +29,10 @@ if [[ -n $CI ]]; then
|
||||
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
||||
# standard Buildkite PR trigger.
|
||||
if [[ $CI_BRANCH =~ pull/* ]]; then
|
||||
export CI_BASE_BRANCH=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
export CI_PULL_REQUEST=true
|
||||
else
|
||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_PULL_REQUEST=
|
||||
fi
|
||||
export CI_OS_NAME=linux
|
||||
|
@ -1,61 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
testCmd="$*"
|
||||
genPipeline=false
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
# Clear cached json keypair files
|
||||
rm -rf "$HOME/.config/solana"
|
||||
|
||||
source ci/_
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
export PATH=$PWD/target/debug:$PATH
|
||||
export USE_INSTALL=1
|
||||
|
||||
if [[ -n $BUILDKITE && -z $testCmd ]]; then
|
||||
genPipeline=true
|
||||
echo "
|
||||
steps:
|
||||
"
|
||||
fi
|
||||
|
||||
build() {
|
||||
$genPipeline && return
|
||||
source ci/rust-version.sh stable
|
||||
source scripts/ulimit-n.sh
|
||||
_ cargo +$rust_stable build
|
||||
}
|
||||
|
||||
runTest() {
|
||||
declare runTestName="$1"
|
||||
declare runTestCmd="$2"
|
||||
if $genPipeline; then
|
||||
echo "
|
||||
- command: \"$0 '$runTestCmd'\"
|
||||
name: \"$runTestName\"
|
||||
timeout_in_minutes: 45
|
||||
"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -n $testCmd && "$testCmd" != "$runTestCmd" ]]; then
|
||||
echo Skipped "$runTestName"...
|
||||
return
|
||||
fi
|
||||
#shellcheck disable=SC2068 # Don't want to double quote $runTestCmd
|
||||
$runTestCmd
|
||||
}
|
||||
|
||||
build
|
||||
|
||||
runTest "basic" \
|
||||
"ci/localnet-sanity.sh -i 128"
|
||||
|
||||
runTest "restart" \
|
||||
"ci/localnet-sanity.sh -i 128 -k 16"
|
||||
|
||||
runTest "incremental restart, extra node" \
|
||||
"ci/localnet-sanity.sh -i 128 -k 16 -R -x"
|
@ -73,16 +73,15 @@ source scripts/configure-metrics.sh
|
||||
source multinode-demo/common.sh
|
||||
|
||||
nodes=(
|
||||
"multinode-demo/faucet.sh"
|
||||
"multinode-demo/bootstrap-validator.sh \
|
||||
--no-restart \
|
||||
--init-complete-file init-complete-node1.log \
|
||||
--init-complete-file init-complete-node0.log \
|
||||
--dynamic-port-range 8000-8050"
|
||||
"multinode-demo/validator.sh \
|
||||
--enable-rpc-exit \
|
||||
--no-restart \
|
||||
--dynamic-port-range 8050-8100
|
||||
--init-complete-file init-complete-node2.log \
|
||||
--init-complete-file init-complete-node1.log \
|
||||
--rpc-port 18899"
|
||||
)
|
||||
|
||||
@ -95,7 +94,7 @@ if [[ extraNodes -gt 0 ]]; then
|
||||
--no-restart \
|
||||
--dynamic-port-range $portStart-$portEnd
|
||||
--label dyn$i \
|
||||
--init-complete-file init-complete-node$((2 + i)).log"
|
||||
--init-complete-file init-complete-node$((1 + i)).log"
|
||||
)
|
||||
done
|
||||
fi
|
||||
@ -160,11 +159,10 @@ startNodes() {
|
||||
for i in $(seq 0 $((${#nodes[@]} - 1))); do
|
||||
declare cmd=${nodes[$i]}
|
||||
|
||||
if [[ "$i" -ne 0 ]]; then # 0 == faucet, skip it
|
||||
declare initCompleteFile="init-complete-node$i.log"
|
||||
rm -f "$initCompleteFile"
|
||||
initCompleteFiles+=("$initCompleteFile")
|
||||
fi
|
||||
declare initCompleteFile="init-complete-node$i.log"
|
||||
rm -f "$initCompleteFile"
|
||||
initCompleteFiles+=("$initCompleteFile")
|
||||
|
||||
startNode "$i" "$cmd $maybeExpectedGenesisHash"
|
||||
if $addLogs; then
|
||||
logs+=("$(getNodeLogFile "$i" "$cmd")")
|
||||
|
21
ci/maybe-trigger-tests.sh
Executable file
21
ci/maybe-trigger-tests.sh
Executable file
@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
annotate() {
|
||||
${BUILDKITE:-false} && {
|
||||
buildkite-agent annotate "$@"
|
||||
}
|
||||
}
|
||||
|
||||
# Skip if only the docs have been modified
|
||||
ci/affects-files.sh \
|
||||
\!^docs/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipping all further tests as only docs/ files were modified"
|
||||
exit 0
|
||||
}
|
||||
|
||||
annotate --style info "Triggering tests"
|
||||
buildkite-agent pipeline upload ci/buildkite-tests.yml
|
@ -3,21 +3,22 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
me=$(basename "$0")
|
||||
echo --- build docs
|
||||
(
|
||||
set -x
|
||||
. ci/rust-version.sh stable
|
||||
ci/docker-run.sh "$rust_stable_docker_image" docs/build.sh
|
||||
)
|
||||
|
||||
echo --- update gitbook-cage
|
||||
if [[ -n $CI_BRANCH ]]; then
|
||||
(
|
||||
set -x
|
||||
(
|
||||
. ci/rust-version.sh stable
|
||||
ci/docker-run.sh "$rust_stable_docker_image" make -C docs
|
||||
)
|
||||
# make a local commit for the svgs and generated/updated markdown
|
||||
set -x
|
||||
git add -f docs/src
|
||||
if ! git diff-index --quiet HEAD; then
|
||||
git config user.email maintainers@solana.com
|
||||
git config user.name "$me"
|
||||
git config user.name "$(basename "$0")"
|
||||
git commit -m "gitbook-cage update $(date -Is)"
|
||||
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
|
||||
# pop off the local commit
|
||||
|
@ -71,7 +71,7 @@ echo --- Creating release tarball
|
||||
export CHANNEL
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
scripts/cargo-install-all.sh +"$rust_stable" --use-move solana-release
|
||||
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
||||
|
||||
tar cvf solana-release-$TARGET.tar solana-release
|
||||
bzip2 solana-release-$TARGET.tar
|
||||
@ -95,9 +95,8 @@ fi
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
|
||||
upload-ci-artifact "$file"
|
||||
|
||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||
upload-ci-artifact "$file"
|
||||
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
||||
continue
|
||||
fi
|
||||
|
@ -2,8 +2,10 @@
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source multinode-demo/common.sh
|
||||
|
||||
rm -f config/run/init-completed
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
timeout 15 ./run.sh &
|
||||
pid=$!
|
||||
@ -17,6 +19,16 @@ while [[ ! -f config/run/init-completed ]]; do
|
||||
fi
|
||||
done
|
||||
|
||||
snapshot_slot=1
|
||||
|
||||
# wait a bit longer than snapshot_slot
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do
|
||||
sleep 1
|
||||
done
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
|
||||
|
||||
wait $pid
|
||||
|
||||
$solana_ledger_tool create-snapshot --ledger config/ledger "$snapshot_slot" config/snapshot-ledger
|
||||
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
|
||||
$solana_ledger_tool verify --ledger config/snapshot-ledger
|
||||
|
@ -1,28 +1,30 @@
|
||||
#
|
||||
# This file maintains the rust versions for use by CI.
|
||||
#
|
||||
# Build with stable rust, updating the stable toolchain if necessary:
|
||||
# $ source ci/rust-version.sh stable
|
||||
# $ cargo +"$rust_stable" build
|
||||
#
|
||||
# Build with nightly rust, updating the nightly toolchain if necessary:
|
||||
# $ source ci/rust-version.sh nightly
|
||||
# $ cargo +"$rust_nightly" build
|
||||
#
|
||||
# Obtain the environment variables without any automatic toolchain updating:
|
||||
# $ source ci/rust-version.sh
|
||||
#
|
||||
# Obtain the environment variables updating both stable and nightly, only stable, or
|
||||
# only nightly:
|
||||
# $ source ci/rust-version.sh all
|
||||
# $ source ci/rust-version.sh stable
|
||||
# $ source ci/rust-version.sh nightly
|
||||
|
||||
# Then to build with either stable or nightly:
|
||||
# $ cargo +"$rust_stable" build
|
||||
# $ cargo +"$rust_nightly" build
|
||||
#
|
||||
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.42.0
|
||||
stable_version=1.43.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2020-03-12
|
||||
nightly_version=2020-04-23
|
||||
fi
|
||||
|
||||
|
||||
@ -51,6 +53,10 @@ export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
|
||||
nightly)
|
||||
rustup_install "$rust_nightly"
|
||||
;;
|
||||
all)
|
||||
rustup_install "$rust_stable"
|
||||
rustup_install "$rust_nightly"
|
||||
;;
|
||||
*)
|
||||
echo "Note: ignoring unknown argument: $1"
|
||||
;;
|
||||
|
@ -27,5 +27,5 @@ Alternatively, you can source it from within a script:
|
||||
local PATCH=0
|
||||
local SPECIAL=""
|
||||
|
||||
semverParseInto "1.2.3" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "3.2.1" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "1.2.8" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "3.2.1" MAJOR MINOR PATCH SPECIAL
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
2
net/datacenter-node-install/disable-nouveau.sh → ci/setup-new-buildkite-agent/disable-nouveau.sh
Normal file → Executable file
2
net/datacenter-node-install/disable-nouveau.sh → ci/setup-new-buildkite-agent/disable-nouveau.sh
Normal file → Executable file
@ -2,7 +2,7 @@
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
4
ci/setup-new-buildkite-agent/enable-buildkite.sh
Executable file
4
ci/setup-new-buildkite-agent/enable-buildkite.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable --now buildkite-agent
|
2
net/datacenter-node-install/set-hostname.sh → ci/setup-new-buildkite-agent/set-hostname.sh
Normal file → Executable file
2
net/datacenter-node-install/set-hostname.sh → ci/setup-new-buildkite-agent/set-hostname.sh
Normal file → Executable file
@ -2,7 +2,7 @@
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
84
ci/setup-new-buildkite-agent/setup-buildkite.sh
Executable file
84
ci/setup-new-buildkite-agent/setup-buildkite.sh
Executable file
@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
||||
|
||||
set -e
|
||||
|
||||
# Install buildkite-agent
|
||||
echo "deb https://apt.buildkite.com/buildkite-agent stable main" | tee /etc/apt/sources.list.d/buildkite-agent.list
|
||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198
|
||||
apt-get update
|
||||
apt-get install -y buildkite-agent
|
||||
|
||||
|
||||
# Configure the installation
|
||||
echo "Go to https://buildkite.com/organizations/solana-labs/agents"
|
||||
echo "Click Reveal Agent Token"
|
||||
echo "Paste the Agent Token, then press Enter:"
|
||||
|
||||
read -r agent_token
|
||||
sudo sed -i "s/xxx/$agent_token/g" /etc/buildkite-agent/buildkite-agent.cfg
|
||||
|
||||
cat > /etc/buildkite-agent/hooks/environment <<EOF
|
||||
set -e
|
||||
|
||||
export BUILDKITE_GIT_CLEAN_FLAGS="-ffdqx"
|
||||
|
||||
# Hack for non-docker rust builds
|
||||
export PATH='$PATH':~buildkite-agent/.cargo/bin
|
||||
|
||||
# Add path to snaps
|
||||
source /etc/profile.d/apps-bin-path.sh
|
||||
|
||||
if [[ '$BUILDKITE_BRANCH' =~ pull/* ]]; then
|
||||
export BUILDKITE_REFSPEC="+'$BUILDKITE_BRANCH':refs/remotes/origin/'$BUILDKITE_BRANCH'"
|
||||
fi
|
||||
EOF
|
||||
|
||||
chown buildkite-agent:buildkite-agent /etc/buildkite-agent/hooks/environment
|
||||
|
||||
# Create SSH key
|
||||
sudo -u buildkite-agent mkdir -p ~buildkite-agent/.ssh
|
||||
sudo -u buildkite-agent ssh-keygen -t ecdsa -q -N "" -f ~buildkite-agent/.ssh/id_ecdsa
|
||||
|
||||
# Set buildkite-agent user's shell
|
||||
sudo usermod --shell /bin/bash buildkite-agent
|
||||
|
||||
# Install Rust for buildkite-agent
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs -o /tmp/rustup-init.sh
|
||||
sudo -u buildkite-agent HOME=~buildkite-agent sh /tmp/rustup-init.sh -y
|
||||
|
||||
# Add to docker and sudoers group
|
||||
addgroup buildkite-agent docker
|
||||
addgroup buildkite-agent sudo
|
||||
|
||||
# Edit the systemd unit file to include LimitNOFILE
|
||||
cat > /lib/systemd/system/buildkite-agent.service <<EOF
|
||||
[Unit]
|
||||
Description=Buildkite Agent
|
||||
Documentation=https://buildkite.com/agent
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=buildkite-agent
|
||||
Environment=HOME=/var/lib/buildkite-agent
|
||||
ExecStart=/usr/bin/buildkite-agent start
|
||||
RestartSec=5
|
||||
Restart=on-failure
|
||||
RestartForceExitStatus=SIGPIPE
|
||||
TimeoutStartSec=10
|
||||
TimeoutStopSec=0
|
||||
KillMode=process
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
DefaultInstance=1
|
||||
EOF
|
18
net/datacenter-node-install/setup-cuda.sh → ci/setup-new-buildkite-agent/setup-cuda.sh
Normal file → Executable file
18
net/datacenter-node-install/setup-cuda.sh → ci/setup-new-buildkite-agent/setup-cuda.sh
Normal file → Executable file
@ -2,12 +2,13 @@
|
||||
|
||||
# https://developer.nvidia.com/cuda-toolkit-archive
|
||||
VERSIONS=()
|
||||
VERSIONS+=("https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_410.48_linux")
|
||||
VERSIONS+=("https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.168_418.67_linux.run")
|
||||
#VERSIONS+=("https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_410.48_linux")
|
||||
#VERSIONS+=("https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.168_418.67_linux.run")
|
||||
VERSIONS+=("http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run")
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
||||
@ -51,3 +52,14 @@ done
|
||||
|
||||
# Allow normal users to use CUDA profiler
|
||||
echo 'options nvidia "NVreg_RestrictProfilingToAdminUsers=0"' > /etc/modprobe.d/nvidia-enable-user-profiling.conf
|
||||
|
||||
# setup persistence mode across reboots
|
||||
TMPDIR="$(mktemp -d)"
|
||||
if pushd "$TMPDIR"; then
|
||||
tar -xvf /usr/share/doc/NVIDIA_GLX-1.0/samples/nvidia-persistenced-init.tar.bz2
|
||||
./nvidia-persistenced-init/install.sh systemd
|
||||
popd
|
||||
rm -rf "$TMPDIR"
|
||||
fi
|
||||
|
||||
nvidia-smi -pm ENABLED
|
2
net/datacenter-node-install/setup-limits.sh → ci/setup-new-buildkite-agent/setup-limits.sh
Normal file → Executable file
2
net/datacenter-node-install/setup-limits.sh → ci/setup-new-buildkite-agent/setup-limits.sh
Normal file → Executable file
@ -2,7 +2,7 @@
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
49
ci/setup-new-buildkite-agent/setup-new-machine.sh
Executable file
49
ci/setup-new-buildkite-agent/setup-new-machine.sh
Executable file
@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
SOLANA_ROOT="$HERE"/../..
|
||||
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
||||
|
||||
set -ex
|
||||
|
||||
apt update
|
||||
apt upgrade -y
|
||||
|
||||
cat >/etc/apt/apt.conf.d/99-solana <<'EOF'
|
||||
// Set and persist extra caps on iftop binary
|
||||
Dpkg::Post-Invoke { "which iftop 2>&1 >/dev/null && setcap cap_net_raw=eip $(which iftop) || true"; };
|
||||
EOF
|
||||
|
||||
apt install -y build-essential pkg-config clang cmake sysstat linux-tools-common \
|
||||
linux-generic-hwe-18.04-edge linux-tools-generic-hwe-18.04-edge \
|
||||
iftop heaptrack jq ruby python3-venv gcc-multilib libudev-dev
|
||||
|
||||
gem install ejson ejson2env
|
||||
mkdir -p /opt/ejson/keys
|
||||
|
||||
"$SOLANA_ROOT"/net/scripts/install-docker.sh
|
||||
usermod -aG docker "$SETUP_USER"
|
||||
"$SOLANA_ROOT"/net/scripts/install-certbot.sh
|
||||
"$HERE"/setup-sudoers.sh
|
||||
"$HERE"/setup-ssh.sh
|
||||
|
||||
"$HERE"/disable-nouveau.sh
|
||||
"$HERE"/disable-networkd-wait.sh
|
||||
|
||||
"$SOLANA_ROOT"/net/scripts/install-earlyoom.sh
|
||||
"$SOLANA_ROOT"/net/scripts/install-nodejs.sh
|
||||
"$SOLANA_ROOT"/net/scripts/localtime.sh
|
||||
"$SOLANA_ROOT"/net/scripts/install-redis.sh
|
||||
"$SOLANA_ROOT"/net/scripts/install-rsync.sh
|
||||
"$SOLANA_ROOT"/net/scripts/install-libssl-compatability.sh
|
||||
|
||||
"$HERE"/setup-procfs-knobs.sh
|
||||
"$HERE"/setup-limits.sh
|
||||
|
||||
[[ -n $CUDA ]] && "$HERE"/setup-cuda.sh
|
||||
|
||||
exit 0
|
4
net/datacenter-node-install/setup-partner-node.sh → ci/setup-new-buildkite-agent/setup-partner-node.sh
Normal file → Executable file
4
net/datacenter-node-install/setup-partner-node.sh → ci/setup-new-buildkite-agent/setup-partner-node.sh
Normal file → Executable file
@ -2,12 +2,12 @@
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
||||
|
||||
set -xe
|
||||
set -ex
|
||||
|
||||
"$HERE"/disable-nouveau.sh
|
||||
"$HERE"/disable-networkd-wait.sh
|
2
net/datacenter-node-install/setup-procfs-knobs.sh → ci/setup-new-buildkite-agent/setup-procfs-knobs.sh
Normal file → Executable file
2
net/datacenter-node-install/setup-procfs-knobs.sh → ci/setup-new-buildkite-agent/setup-procfs-knobs.sh
Normal file → Executable file
@ -2,7 +2,7 @@
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
2
net/datacenter-node-install/setup-ssh.sh → ci/setup-new-buildkite-agent/setup-ssh.sh
Normal file → Executable file
2
net/datacenter-node-install/setup-ssh.sh → ci/setup-new-buildkite-agent/setup-ssh.sh
Normal file → Executable file
@ -2,7 +2,7 @@
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
2
net/datacenter-node-install/setup-sudoers.sh → ci/setup-new-buildkite-agent/setup-sudoers.sh
Normal file → Executable file
2
net/datacenter-node-install/setup-sudoers.sh → ci/setup-new-buildkite-agent/setup-sudoers.sh
Normal file → Executable file
@ -2,7 +2,7 @@
|
||||
|
||||
HERE="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
||||
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
0
net/datacenter-node-install/utils.sh → ci/setup-new-buildkite-agent/utils.sh
Normal file → Executable file
0
net/datacenter-node-install/utils.sh → ci/setup-new-buildkite-agent/utils.sh
Normal file → Executable file
@ -25,7 +25,7 @@ source ci/_
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
source ci/rust-version.sh nightly
|
||||
source ci/rust-version.sh all
|
||||
|
||||
set -o pipefail
|
||||
export RUST_BACKTRACE=1
|
||||
|
@ -6,27 +6,35 @@ cd "$(dirname "$0")/.."
|
||||
source ci/_
|
||||
source ci/rust-version.sh stable
|
||||
source ci/rust-version.sh nightly
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
# Look for failed mergify.io backports
|
||||
_ git show HEAD --check --oneline
|
||||
|
||||
# Only force up-to-date lock files on edge
|
||||
if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
if _ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets; then
|
||||
true
|
||||
else
|
||||
check_status=$?
|
||||
echo "Some Cargo.lock is outdated; please update them as well"
|
||||
echo "protip: you can use ./scripts/cargo-for-all-lock-files.sh update ..."
|
||||
exit "$check_status"
|
||||
fi
|
||||
else
|
||||
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
|
||||
fi
|
||||
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
|
||||
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
|
||||
# See https://github.com/solana-labs/solana/issues/5503
|
||||
_ cargo +"$rust_stable" clippy --version
|
||||
_ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
|
||||
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
|
||||
_ cargo +"$rust_stable" clippy --workspace -- --deny=warnings
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0006
|
||||
_ ci/nits.sh
|
||||
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
|
22
ci/test-sanity.sh
Executable file
22
ci/test-sanity.sh
Executable file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
source ci/_
|
||||
|
||||
(
|
||||
echo --- git diff --check
|
||||
set -x
|
||||
# Look for failed mergify.io backports by searching leftover conflict markers
|
||||
# Also check for any trailing whitespaces!
|
||||
git fetch origin "$CI_BASE_BRANCH"
|
||||
git diff "$(git merge-base HEAD "origin/$CI_BASE_BRANCH")..HEAD" --check --oneline
|
||||
)
|
||||
|
||||
echo
|
||||
|
||||
_ ci/nits.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
echo --- ok
|
@ -38,10 +38,15 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
# Clear the BPF sysroot files, they are not automatically rebuilt
|
||||
rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC>14 ? 14 : NPROC))
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
@ -86,7 +91,7 @@ test-stable-perf)
|
||||
fi
|
||||
|
||||
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --package solana-chacha-cuda --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-move)
|
||||
ci/affects-files.sh \
|
||||
|
@ -1,429 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
zone=
|
||||
bootstrapValidatorAddress=
|
||||
bootstrapValidatorMachineType=
|
||||
clientNodeCount=0
|
||||
idleClients=false
|
||||
additionalValidatorCount=10
|
||||
publicNetwork=false
|
||||
stopNetwork=false
|
||||
reuseLedger=false
|
||||
skipCreate=false
|
||||
skipStart=false
|
||||
externalNode=false
|
||||
failOnValidatorBootupFailure=true
|
||||
tarChannelOrTag=edge
|
||||
delete=false
|
||||
enableGpu=false
|
||||
bootDiskType=""
|
||||
blockstreamer=false
|
||||
fetchLogs=true
|
||||
maybeHashesPerTick=
|
||||
maybeDisableAirdrops=
|
||||
maybeInternalNodesStakeLamports=
|
||||
maybeInternalNodesLamports=
|
||||
maybeExternalPrimordialAccountsFile=
|
||||
maybeSlotsPerEpoch=
|
||||
maybeTargetLamportsPerSignature=
|
||||
maybeSlotsPerEpoch=
|
||||
maybeLetsEncrypt=
|
||||
maybeValidatorAdditionalDiskSize=
|
||||
maybeNoSnapshot=
|
||||
maybeLimitLedgerSize=
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 -p network-name -C cloud -z zone1 [-z zone2] ... [-z zoneN] [options...]
|
||||
|
||||
Deploys a CD testnet
|
||||
|
||||
mandatory arguments:
|
||||
-p [network-name] - name of the network
|
||||
-C [cloud] - cloud provider to use (gce, ec2)
|
||||
-z [zone] - cloud provider zone to deploy the network into. Must specify at least one zone
|
||||
|
||||
options:
|
||||
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||
specified release channel (edge|beta|stable) or release tag
|
||||
(vX.Y.Z)
|
||||
(default: $tarChannelOrTag)
|
||||
-n [number] - Number of additional validators (default: $additionalValidatorCount)
|
||||
-c [number] - Number of client bencher nodes (default: $clientNodeCount)
|
||||
-u - Include a Blockstreamer (default: $blockstreamer)
|
||||
-P - Use public network IP addresses (default: $publicNetwork)
|
||||
-G - Enable GPU, and set count/type of GPUs to use (e.g n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100)
|
||||
-g - Enable GPU (default: $enableGpu)
|
||||
-a [address] - Set the bootstrap validator's external IP address to this GCE address
|
||||
-d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE.
|
||||
-D - Delete the network
|
||||
-r - Reuse existing node/ledger configuration from a
|
||||
previous |start| (ie, don't run ./multinode-demo/setup.sh).
|
||||
-x - External node. Default: false
|
||||
-e - Skip create. Assume the nodes have already been created
|
||||
-s - Skip start. Nodes will still be created or configured, but network software will not be started.
|
||||
-S - Stop network software without tearing down nodes.
|
||||
-f - Discard validator nodes that didn't bootup successfully
|
||||
--no-airdrop
|
||||
- If set, disables airdrops. Nodes must be funded in genesis config when airdrops are disabled.
|
||||
--internal-nodes-stake-lamports NUM_LAMPORTS
|
||||
- Amount to stake internal nodes.
|
||||
--internal-nodes-lamports NUM_LAMPORTS
|
||||
- Amount to fund internal nodes in genesis config
|
||||
--external-accounts-file FILE_PATH
|
||||
- Path to external Primordial Accounts file, if it exists.
|
||||
--hashes-per-tick NUM_HASHES|sleep|auto
|
||||
- Override the default --hashes-per-tick for the cluster
|
||||
--lamports NUM_LAMPORTS
|
||||
- Specify the number of lamports to mint (default 500000000000000000)
|
||||
--skip-deploy-update
|
||||
- If set, will skip software update deployment
|
||||
--skip-remote-log-retrieval
|
||||
- If set, will not fetch logs from remote nodes
|
||||
--letsencrypt [dns name]
|
||||
- Attempt to generate a TLS certificate using this DNS name
|
||||
--validator-additional-disk-size-gb [number]
|
||||
- Size of additional disk in GB for all validators
|
||||
--no-snapshot-fetch
|
||||
- If set, disables booting validators from a snapshot
|
||||
|
||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||
metrics
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
zone=()
|
||||
|
||||
shortArgs=()
|
||||
while [[ -n $1 ]]; do
|
||||
if [[ ${1:0:2} = -- ]]; then
|
||||
if [[ $1 = --hashes-per-tick ]]; then
|
||||
maybeHashesPerTick="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 = --slots-per-epoch ]]; then
|
||||
maybeSlotsPerEpoch="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 = --target-lamports-per-signature ]]; then
|
||||
maybeTargetLamportsPerSignature="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 = --no-airdrop ]]; then
|
||||
maybeDisableAirdrops=$1
|
||||
shift 1
|
||||
elif [[ $1 = --internal-nodes-stake-lamports ]]; then
|
||||
maybeInternalNodesStakeLamports="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 = --internal-nodes-lamports ]]; then
|
||||
maybeInternalNodesLamports="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 = --external-accounts-file ]]; then
|
||||
maybeExternalPrimordialAccountsFile="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 = --skip-remote-log-retrieval ]]; then
|
||||
fetchLogs=false
|
||||
shift 1
|
||||
elif [[ $1 = --letsencrypt ]]; then
|
||||
maybeLetsEncrypt="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 = --validator-additional-disk-size-gb ]]; then
|
||||
maybeValidatorAdditionalDiskSize="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
|
||||
shortArgs+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --no-snapshot-fetch ]]; then
|
||||
maybeNoSnapshot=$1
|
||||
shift 1
|
||||
elif [[ $1 = --limit-ledger-size ]]; then
|
||||
maybeLimitLedgerSize=$1
|
||||
shift 1
|
||||
elif [[ $1 = --idle-clients ]]; then
|
||||
idleClients=true
|
||||
shift 1
|
||||
else
|
||||
usage "Unknown long option: $1"
|
||||
fi
|
||||
else
|
||||
shortArgs+=("$1")
|
||||
shift
|
||||
fi
|
||||
done
|
||||
|
||||
while getopts "h?p:Pn:c:t:gG:a:Dd:rusxz:p:C:Sfe" opt "${shortArgs[@]}"; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
p)
|
||||
netName=$OPTARG
|
||||
;;
|
||||
C)
|
||||
cloudProvider=$OPTARG
|
||||
;;
|
||||
z)
|
||||
zone+=("$OPTARG")
|
||||
;;
|
||||
P)
|
||||
publicNetwork=true
|
||||
;;
|
||||
n)
|
||||
additionalValidatorCount=$OPTARG
|
||||
;;
|
||||
c)
|
||||
clientNodeCount=$OPTARG
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable|v*)
|
||||
tarChannelOrTag=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
g)
|
||||
enableGpu=true
|
||||
;;
|
||||
G)
|
||||
enableGpu=true
|
||||
bootstrapValidatorMachineType=$OPTARG
|
||||
;;
|
||||
a)
|
||||
bootstrapValidatorAddress=$OPTARG
|
||||
;;
|
||||
d)
|
||||
bootDiskType=$OPTARG
|
||||
;;
|
||||
D)
|
||||
delete=true
|
||||
;;
|
||||
r)
|
||||
reuseLedger=true
|
||||
;;
|
||||
e)
|
||||
skipCreate=true
|
||||
;;
|
||||
s)
|
||||
skipStart=true
|
||||
;;
|
||||
x)
|
||||
externalNode=true
|
||||
;;
|
||||
f)
|
||||
failOnValidatorBootupFailure=false
|
||||
;;
|
||||
u)
|
||||
blockstreamer=true
|
||||
;;
|
||||
S)
|
||||
stopNetwork=true
|
||||
;;
|
||||
*)
|
||||
usage "Unknown option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
[[ -n $netName ]] || usage
|
||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||
[[ -n ${zone[*]} ]] || usage "At least one zone must be specified"
|
||||
|
||||
shutdown() {
|
||||
exitcode=$?
|
||||
|
||||
set +e
|
||||
if [[ -d net/log ]]; then
|
||||
mv net/log net/log-deploy
|
||||
for logfile in net/log-deploy/*; do
|
||||
if [[ -f $logfile ]]; then
|
||||
upload-ci-artifact "$logfile"
|
||||
tail "$logfile"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
exit $exitcode
|
||||
}
|
||||
rm -rf net/{log,-deploy}
|
||||
trap shutdown EXIT INT
|
||||
|
||||
set -x
|
||||
|
||||
# Fetch reusable testnet keypairs
|
||||
if [[ ! -d net/keypairs ]]; then
|
||||
git clone git@github.com:solana-labs/testnet-keypairs.git net/keypairs
|
||||
fi
|
||||
|
||||
# Build a string to pass zone opts to $cloudProvider.sh: "-z zone1 -z zone2 ..."
|
||||
zone_args=()
|
||||
for val in "${zone[@]}"; do
|
||||
zone_args+=("-z $val")
|
||||
done
|
||||
|
||||
if $stopNetwork; then
|
||||
skipCreate=true
|
||||
fi
|
||||
|
||||
if $delete; then
|
||||
skipCreate=false
|
||||
fi
|
||||
|
||||
# Create the network
|
||||
if ! $skipCreate; then
|
||||
echo "--- $cloudProvider.sh delete"
|
||||
# shellcheck disable=SC2068
|
||||
time net/"$cloudProvider".sh delete ${zone_args[@]} -p "$netName" ${externalNode:+-x}
|
||||
if $delete; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "--- $cloudProvider.sh create"
|
||||
create_args=(
|
||||
-p "$netName"
|
||||
-c "$clientNodeCount"
|
||||
-n "$additionalValidatorCount"
|
||||
--dedicated
|
||||
--self-destruct-hours 0
|
||||
)
|
||||
|
||||
if [[ -n $bootstrapValidatorAddress ]]; then
|
||||
create_args+=(-a "$bootstrapValidatorAddress")
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2206
|
||||
create_args+=(${zone_args[@]})
|
||||
|
||||
if [[ -n $maybeLetsEncrypt ]]; then
|
||||
# shellcheck disable=SC2206 # Do not want to quote $maybeLetsEncrypt
|
||||
create_args+=($maybeLetsEncrypt)
|
||||
fi
|
||||
|
||||
if $blockstreamer; then
|
||||
create_args+=(-u)
|
||||
fi
|
||||
|
||||
if [[ -n $bootDiskType ]]; then
|
||||
create_args+=(-d "$bootDiskType")
|
||||
fi
|
||||
|
||||
if $enableGpu; then
|
||||
if [[ -z $bootstrapValidatorMachineType ]]; then
|
||||
create_args+=(-g)
|
||||
else
|
||||
create_args+=(-G "$bootstrapValidatorMachineType")
|
||||
fi
|
||||
fi
|
||||
|
||||
if $publicNetwork; then
|
||||
create_args+=(-P)
|
||||
fi
|
||||
|
||||
if $externalNode; then
|
||||
create_args+=(-x)
|
||||
fi
|
||||
|
||||
if ! $failOnValidatorBootupFailure; then
|
||||
create_args+=(-f)
|
||||
fi
|
||||
|
||||
if [[ -n $maybeValidatorAdditionalDiskSize ]]; then
|
||||
# shellcheck disable=SC2206 # Do not want to quote
|
||||
create_args+=($maybeValidatorAdditionalDiskSize)
|
||||
fi
|
||||
|
||||
time net/"$cloudProvider".sh create "${create_args[@]}"
|
||||
else
|
||||
echo "--- $cloudProvider.sh config"
|
||||
config_args=(
|
||||
-p "$netName"
|
||||
)
|
||||
# shellcheck disable=SC2206
|
||||
config_args+=(${zone_args[@]})
|
||||
if $publicNetwork; then
|
||||
config_args+=(-P)
|
||||
fi
|
||||
|
||||
if $externalNode; then
|
||||
config_args+=(-x)
|
||||
fi
|
||||
|
||||
if ! $failOnValidatorBootupFailure; then
|
||||
config_args+=(-f)
|
||||
fi
|
||||
|
||||
time net/"$cloudProvider".sh config "${config_args[@]}"
|
||||
fi
|
||||
net/init-metrics.sh -e
|
||||
|
||||
echo "+++ $cloudProvider.sh info"
|
||||
net/"$cloudProvider".sh info
|
||||
|
||||
if $stopNetwork; then
|
||||
echo --- net.sh stop
|
||||
time net/net.sh stop
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ok=true
|
||||
if ! $skipStart; then
|
||||
(
|
||||
if $skipCreate; then
|
||||
op=restart
|
||||
else
|
||||
op=start
|
||||
fi
|
||||
echo "--- net.sh $op"
|
||||
args=(
|
||||
"$op"
|
||||
-t "$tarChannelOrTag"
|
||||
)
|
||||
|
||||
if ! $publicNetwork; then
|
||||
args+=(-o rejectExtraNodes)
|
||||
fi
|
||||
if [[ -n $NO_INSTALL_CHECK ]]; then
|
||||
args+=(-o noInstallCheck)
|
||||
fi
|
||||
if $reuseLedger; then
|
||||
args+=(-r)
|
||||
fi
|
||||
|
||||
if ! $failOnValidatorBootupFailure; then
|
||||
args+=(-F)
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2206 # Do not want to quote
|
||||
args+=(
|
||||
$maybeHashesPerTick
|
||||
$maybeDisableAirdrops
|
||||
$maybeInternalNodesStakeLamports
|
||||
$maybeInternalNodesLamports
|
||||
$maybeExternalPrimordialAccountsFile
|
||||
$maybeSlotsPerEpoch
|
||||
$maybeTargetLamportsPerSignature
|
||||
$maybeNoSnapshot
|
||||
$maybeLimitLedgerSize
|
||||
)
|
||||
|
||||
if $idleClients; then
|
||||
args+=(-c "idle=$clientNodeCount=")
|
||||
fi
|
||||
|
||||
time net/net.sh "${args[@]}"
|
||||
) || ok=false
|
||||
|
||||
if $fetchLogs; then
|
||||
net/net.sh logs
|
||||
fi
|
||||
fi
|
||||
|
||||
$ok
|
@ -1,401 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
if [[ -z $BUILDKITE ]]; then
|
||||
echo BUILDKITE not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $SOLANA_METRICS_PARTIAL_CONFIG ]]; then
|
||||
echo SOLANA_METRICS_PARTIAL_CONFIG not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET ]]; then
|
||||
TESTNET=$(buildkite-agent meta-data get "testnet" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET_OP ]]; then
|
||||
TESTNET_OP=$(buildkite-agent meta-data get "testnet-operation" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET || -z $TESTNET_OP ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- block: "Manage Testnet"
|
||||
fields:
|
||||
- select: "Network"
|
||||
key: "testnet"
|
||||
options:
|
||||
- label: "testnet"
|
||||
value: "testnet"
|
||||
- label: "testnet-edge"
|
||||
value: "testnet-edge"
|
||||
- label: "testnet-beta"
|
||||
value: "testnet-beta"
|
||||
- select: "Operation"
|
||||
key: "testnet-operation"
|
||||
default: "sanity-or-restart"
|
||||
options:
|
||||
- label: "Create testnet and then start software. If the testnet already exists it will be deleted and re-created"
|
||||
value: "create-and-start"
|
||||
- label: "Create testnet, but do not start software. If the testnet already exists it will be deleted and re-created"
|
||||
value: "create"
|
||||
- label: "Start network software on an existing testnet. If software is already running it will be restarted"
|
||||
value: "start"
|
||||
- label: "Stop network software without deleting testnet nodes"
|
||||
value: "stop"
|
||||
- label: "Update the network software. Restart network software on failure"
|
||||
value: "update-or-restart"
|
||||
- label: "Sanity check. Restart network software on failure"
|
||||
value: "sanity-or-restart"
|
||||
- label: "Sanity check only"
|
||||
value: "sanity"
|
||||
- label: "Delete the testnet"
|
||||
value: "delete"
|
||||
- label: "Enable/unlock the testnet"
|
||||
value: "enable"
|
||||
- label: "Delete and then lock the testnet from further operation until it is re-enabled"
|
||||
value: "disable"
|
||||
- command: "ci/$(basename "$0")"
|
||||
agents:
|
||||
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ci/channel-info.sh
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
|
||||
EC2_ZONES=(
|
||||
us-west-1a
|
||||
us-west-2a
|
||||
us-east-1a
|
||||
us-east-2a
|
||||
sa-east-1a
|
||||
eu-west-1a
|
||||
eu-west-2a
|
||||
eu-central-1a
|
||||
ap-northeast-2a
|
||||
ap-southeast-2a
|
||||
ap-south-1a
|
||||
ca-central-1a
|
||||
)
|
||||
|
||||
# GCE zones with _lots_ of quota
|
||||
GCE_ZONES=(
|
||||
us-west1-a
|
||||
us-central1-a
|
||||
us-east1-b
|
||||
europe-west4-a
|
||||
|
||||
us-west1-b
|
||||
us-central1-b
|
||||
us-east1-c
|
||||
europe-west4-b
|
||||
|
||||
us-west1-c
|
||||
us-east1-d
|
||||
europe-west4-c
|
||||
)
|
||||
|
||||
# GCE zones with enough quota for one CPU-only validator
|
||||
GCE_LOW_QUOTA_ZONES=(
|
||||
asia-east2-a
|
||||
asia-northeast1-b
|
||||
asia-northeast2-b
|
||||
asia-south1-c
|
||||
asia-southeast1-b
|
||||
australia-southeast1-b
|
||||
europe-north1-a
|
||||
europe-west2-b
|
||||
europe-west3-c
|
||||
europe-west6-a
|
||||
northamerica-northeast1-a
|
||||
southamerica-east1-b
|
||||
)
|
||||
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
CHANNEL_OR_TAG=edge
|
||||
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||
;;
|
||||
testnet-beta)
|
||||
CHANNEL_OR_TAG=beta
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
testnet)
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
export CLOUDSDK_CORE_PROJECT=testnet-solana-com
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
EC2_ZONE_ARGS=()
|
||||
for val in "${EC2_ZONES[@]}"; do
|
||||
EC2_ZONE_ARGS+=("-z $val")
|
||||
done
|
||||
GCE_ZONE_ARGS=()
|
||||
for val in "${GCE_ZONES[@]}"; do
|
||||
GCE_ZONE_ARGS+=("-z $val")
|
||||
done
|
||||
GCE_LOW_QUOTA_ZONE_ARGS=()
|
||||
for val in "${GCE_LOW_QUOTA_ZONES[@]}"; do
|
||||
GCE_LOW_QUOTA_ZONE_ARGS+=("-z $val")
|
||||
done
|
||||
|
||||
if [[ -z $TESTNET_DB_HOST ]]; then
|
||||
TESTNET_DB_HOST="https://metrics.solana.com:8086"
|
||||
fi
|
||||
|
||||
export SOLANA_METRICS_CONFIG="db=$TESTNET,host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
||||
source scripts/configure-metrics.sh
|
||||
|
||||
if [[ -n $TESTNET_TAG ]]; then
|
||||
CHANNEL_OR_TAG=$TESTNET_TAG
|
||||
else
|
||||
|
||||
if [[ $CI_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||
async: true
|
||||
build:
|
||||
message: "$BUILDKITE_MESSAGE"
|
||||
branch: "$CHANNEL_BRANCH"
|
||||
env:
|
||||
TESTNET: "$TESTNET"
|
||||
TESTNET_OP: "$TESTNET_OP"
|
||||
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
|
||||
GCE_NODE_COUNT: "$GCE_NODE_COUNT"
|
||||
GCE_LOW_QUOTA_NODE_COUNT: "$GCE_LOW_QUOTA_NODE_COUNT"
|
||||
RUST_LOG: "$RUST_LOG"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
maybe_deploy_software() {
|
||||
declare arg=$1
|
||||
declare ok=true
|
||||
(
|
||||
echo "--- net.sh restart"
|
||||
set -x
|
||||
time net/net.sh restart --skip-setup -t "$CHANNEL_OR_TAG" --skip-poh-verify "$arg"
|
||||
) || ok=false
|
||||
if ! $ok; then
|
||||
net/net.sh logs
|
||||
fi
|
||||
$ok
|
||||
}
|
||||
|
||||
sanity() {
|
||||
echo "--- sanity $TESTNET"
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
(
|
||||
set -x
|
||||
NO_INSTALL_CHECK=1 \
|
||||
ci/testnet-sanity.sh edge-devnet-solana-com gce -P us-west1-b
|
||||
maybe_deploy_software
|
||||
)
|
||||
;;
|
||||
testnet-beta)
|
||||
(
|
||||
set -x
|
||||
NO_INSTALL_CHECK=1 \
|
||||
ci/testnet-sanity.sh beta-devnet-solana-com gce -P us-west1-b
|
||||
maybe_deploy_software --deploy-if-newer
|
||||
)
|
||||
;;
|
||||
testnet)
|
||||
(
|
||||
set -x
|
||||
ci/testnet-sanity.sh devnet-solana-com gce -P us-west1-b
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
deploy() {
|
||||
declare maybeCreate=$1
|
||||
declare maybeStart=$2
|
||||
declare maybeStop=$3
|
||||
declare maybeDelete=$4
|
||||
|
||||
echo "--- deploy \"$maybeCreate\" \"$maybeStart\" \"$maybeStop\" \"$maybeDelete\""
|
||||
|
||||
# Create or recreate the nodes
|
||||
if [[ -z $maybeCreate ]]; then
|
||||
skipCreate=skip
|
||||
else
|
||||
skipCreate=""
|
||||
fi
|
||||
|
||||
# Start or restart the network software on the nodes
|
||||
if [[ -z $maybeStart ]]; then
|
||||
skipStart=skip
|
||||
else
|
||||
skipStart=""
|
||||
fi
|
||||
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
(
|
||||
set -x
|
||||
ci/testnet-deploy.sh -p edge-devnet-solana-com -C gce -z us-west1-b \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
|
||||
-a edge-devnet-solana-com --letsencrypt edge.devnet.solana.com \
|
||||
--limit-ledger-size \
|
||||
${skipCreate:+-e} \
|
||||
${skipStart:+-s} \
|
||||
${maybeStop:+-S} \
|
||||
${maybeDelete:+-D}
|
||||
)
|
||||
;;
|
||||
testnet-beta)
|
||||
(
|
||||
set -x
|
||||
ci/testnet-deploy.sh -p beta-devnet-solana-com -C gce -z us-west1-b \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
|
||||
-a beta-devnet-solana-com --letsencrypt beta.devnet.solana.com \
|
||||
--limit-ledger-size \
|
||||
${skipCreate:+-e} \
|
||||
${skipStart:+-s} \
|
||||
${maybeStop:+-S} \
|
||||
${maybeDelete:+-D}
|
||||
)
|
||||
;;
|
||||
testnet)
|
||||
(
|
||||
set -x
|
||||
ci/testnet-deploy.sh -p devnet-solana-com -C gce -z us-west1-b \
|
||||
-t "$CHANNEL_OR_TAG" -n 0 -c 0 -u -P \
|
||||
-a testnet-solana-com --letsencrypt devnet.solana.com \
|
||||
--limit-ledger-size \
|
||||
${skipCreate:+-e} \
|
||||
${skipStart:+-s} \
|
||||
${maybeStop:+-S} \
|
||||
${maybeDelete:+-D}
|
||||
)
|
||||
(
|
||||
echo "--- net.sh update"
|
||||
set -x
|
||||
time net/net.sh update -t "$CHANNEL_OR_TAG" --platform linux --platform osx #--platform windows
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
ENABLED_LOCKFILE="${HOME}/${TESTNET}.is_enabled"
|
||||
|
||||
create-and-start() {
|
||||
deploy create start
|
||||
}
|
||||
create() {
|
||||
deploy create
|
||||
}
|
||||
start() {
|
||||
deploy "" start
|
||||
}
|
||||
stop() {
|
||||
deploy "" ""
|
||||
}
|
||||
delete() {
|
||||
deploy "" "" "" delete
|
||||
}
|
||||
enable_testnet() {
|
||||
touch "${ENABLED_LOCKFILE}"
|
||||
echo "+++ $TESTNET now enabled"
|
||||
}
|
||||
disable_testnet() {
|
||||
rm -f "${ENABLED_LOCKFILE}"
|
||||
echo "+++ $TESTNET now disabled"
|
||||
}
|
||||
is_testnet_enabled() {
|
||||
if [[ ! -f ${ENABLED_LOCKFILE} ]]; then
|
||||
echo "+++ ${TESTNET} is currently disabled. Enable ${TESTNET} by running ci/testnet-manager.sh with \$TESTNET_OP=enable, then re-run with current settings."
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
case $TESTNET_OP in
|
||||
enable)
|
||||
enable_testnet
|
||||
;;
|
||||
disable)
|
||||
disable_testnet
|
||||
delete
|
||||
;;
|
||||
create-and-start)
|
||||
is_testnet_enabled
|
||||
create-and-start
|
||||
;;
|
||||
create)
|
||||
is_testnet_enabled
|
||||
create
|
||||
;;
|
||||
start)
|
||||
is_testnet_enabled
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
is_testnet_enabled
|
||||
stop
|
||||
;;
|
||||
sanity)
|
||||
is_testnet_enabled
|
||||
sanity
|
||||
;;
|
||||
delete)
|
||||
is_testnet_enabled
|
||||
delete
|
||||
;;
|
||||
update-or-restart)
|
||||
is_testnet_enabled
|
||||
if start; then
|
||||
echo Update successful
|
||||
else
|
||||
echo "+++ Update failed, restarting the network"
|
||||
$metricsWriteDatapoint "testnet-manager update-failure=1"
|
||||
create-and-start
|
||||
fi
|
||||
;;
|
||||
sanity-or-restart)
|
||||
is_testnet_enabled
|
||||
if sanity; then
|
||||
echo Pass
|
||||
else
|
||||
echo "+++ Sanity failed, updating the network"
|
||||
$metricsWriteDatapoint "testnet-manager sanity-failure=1"
|
||||
create-and-start
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET_OP=$TESTNET_OP"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo --- fin
|
||||
exit 0
|
@ -1,78 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name] [cloud] [zone1] ... [zoneN]
|
||||
|
||||
Sanity check a testnet
|
||||
|
||||
name - name of the network
|
||||
cloud - cloud provider to use (gce, ec2)
|
||||
zone1 .. zoneN - cloud provider zones to check
|
||||
|
||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||
metrics
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
netName=$1
|
||||
cloudProvider=$2
|
||||
[[ -n $netName ]] || usage ""
|
||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||
shift 2
|
||||
|
||||
maybePublicNetwork=
|
||||
if [[ $1 = -P ]]; then
|
||||
maybePublicNetwork=-P
|
||||
shift
|
||||
fi
|
||||
[[ -n $1 ]] || usage "zone1 not specified"
|
||||
|
||||
shutdown() {
|
||||
exitcode=$?
|
||||
|
||||
set +e
|
||||
if [[ -d net/log ]]; then
|
||||
mv net/log net/log-sanity
|
||||
for logfile in net/log-sanity/*; do
|
||||
if [[ -f $logfile ]]; then
|
||||
upload-ci-artifact "$logfile"
|
||||
tail "$logfile"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
exit $exitcode
|
||||
}
|
||||
rm -rf net/{log,-sanity}
|
||||
rm -f net/config/config
|
||||
trap shutdown EXIT INT
|
||||
|
||||
set -x
|
||||
for zone in "$@"; do
|
||||
echo "--- $cloudProvider config [$zone]"
|
||||
timeout 5m net/"$cloudProvider".sh config $maybePublicNetwork -n 1 -p "$netName" -z "$zone"
|
||||
net/init-metrics.sh -e
|
||||
echo "+++ $cloudProvider.sh info"
|
||||
net/"$cloudProvider".sh info
|
||||
echo "--- net.sh sanity [$cloudProvider:$zone]"
|
||||
ok=true
|
||||
timeout 5m net/net.sh sanity \
|
||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||
${NO_INSTALL_CHECK:+-o noInstallCheck} \
|
||||
$zone || ok=false
|
||||
|
||||
if ! $ok; then
|
||||
net/net.sh logs
|
||||
fi
|
||||
$ok
|
||||
done
|
@ -23,10 +23,14 @@ if [[ -z $CI_TAG ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $CI_REPO_SLUG ]]; then
|
||||
echo Error: CI_REPO_SLUG not defined
|
||||
exit 1
|
||||
fi
|
||||
# Force CI_REPO_SLUG since sometimes
|
||||
# BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the
|
||||
# artifact upload to fail
|
||||
CI_REPO_SLUG=solana-labs/solana
|
||||
#if [[ -z $CI_REPO_SLUG ]]; then
|
||||
# echo Error: CI_REPO_SLUG not defined
|
||||
# exit 1
|
||||
#fi
|
||||
|
||||
releaseId=$( \
|
||||
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \
|
||||
@ -38,6 +42,7 @@ echo "Github release id for $CI_TAG is $releaseId"
|
||||
for file in "$@"; do
|
||||
echo "--- Uploading $file to tag $CI_TAG of $CI_REPO_SLUG"
|
||||
curl \
|
||||
--verbose \
|
||||
--data-binary @"$file" \
|
||||
-H "Authorization: token $GITHUB_TOKEN" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.1.0"
|
||||
version = "1.2.8"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
@ -20,3 +20,6 @@ chrono = "0.4"
|
||||
|
||||
[lib]
|
||||
name = "solana_clap_utils"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
22
clap-utils/src/commitment.rs
Normal file
22
clap-utils/src/commitment.rs
Normal file
@ -0,0 +1,22 @@
|
||||
use crate::ArgConstant;
|
||||
use clap::Arg;
|
||||
|
||||
pub const COMMITMENT_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "commitment",
|
||||
long: "commitment",
|
||||
help: "Return information at the selected commitment level",
|
||||
};
|
||||
|
||||
pub fn commitment_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
commitment_arg_with_default("recent")
|
||||
}
|
||||
|
||||
pub fn commitment_arg_with_default<'a, 'b>(default_value: &'static str) -> Arg<'a, 'b> {
|
||||
Arg::with_name(COMMITMENT_ARG.name)
|
||||
.long(COMMITMENT_ARG.long)
|
||||
.takes_value(true)
|
||||
.possible_values(&["recent", "single", "root", "max"])
|
||||
.default_value(default_value)
|
||||
.value_name("COMMITMENT_LEVEL")
|
||||
.help(COMMITMENT_ARG.help)
|
||||
}
|
@ -7,6 +7,7 @@ use clap::ArgMatches;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
clock::UnixTimestamp,
|
||||
commitment_config::CommitmentConfig,
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
@ -62,6 +63,21 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keypairs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Keypair>> {
|
||||
matches.values_of(name).map(|values| {
|
||||
values
|
||||
.filter_map(|value| {
|
||||
if value == ASK_KEYWORD {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase(name, skip_validation, true).ok()
|
||||
} else {
|
||||
read_keypair_file(value).ok()
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
// Return a pubkey for an argument that can itself be parsed into a pubkey,
|
||||
// or is a filename that can be read as a keypair
|
||||
pub fn pubkey_of(matches: &ArgMatches<'_>, name: &str) -> Option<Pubkey> {
|
||||
@ -101,7 +117,7 @@ pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubk
|
||||
pub fn signer_of(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<(Option<Box<dyn Signer>>, Option<Pubkey>), Box<dyn std::error::Error>> {
|
||||
if let Some(location) = matches.value_of(name) {
|
||||
let signer = signer_from_path(matches, location, name, wallet_manager)?;
|
||||
@ -115,7 +131,7 @@ pub fn signer_of(
|
||||
pub fn pubkey_of_signer(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<Pubkey>, Box<dyn std::error::Error>> {
|
||||
if let Some(location) = matches.value_of(name) {
|
||||
Ok(Some(pubkey_from_path(
|
||||
@ -132,7 +148,7 @@ pub fn pubkey_of_signer(
|
||||
pub fn pubkeys_of_multiple_signers(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<Vec<Pubkey>>, Box<dyn std::error::Error>> {
|
||||
if let Some(pubkey_matches) = matches.values_of(name) {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
@ -148,7 +164,7 @@ pub fn pubkeys_of_multiple_signers(
|
||||
pub fn resolve_signer(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<String>, Box<dyn std::error::Error>> {
|
||||
Ok(resolve_signer_from_path(
|
||||
matches,
|
||||
@ -162,6 +178,16 @@ pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
|
||||
value_of(matches, name).map(sol_to_lamports)
|
||||
}
|
||||
|
||||
pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentConfig> {
|
||||
matches.value_of(name).map(|value| match value {
|
||||
"max" => CommitmentConfig::max(),
|
||||
"recent" => CommitmentConfig::recent(),
|
||||
"root" => CommitmentConfig::root(),
|
||||
"single" => CommitmentConfig::single(),
|
||||
_ => CommitmentConfig::default(),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -324,16 +350,16 @@ mod tests {
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "50"]);
|
||||
assert_eq!(lamports_of_sol(&matches, "single"), Some(50000000000));
|
||||
assert_eq!(lamports_of_sol(&matches, "single"), Some(50_000_000_000));
|
||||
assert_eq!(lamports_of_sol(&matches, "multiple"), None);
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "1.5"]);
|
||||
assert_eq!(lamports_of_sol(&matches, "single"), Some(1500000000));
|
||||
assert_eq!(lamports_of_sol(&matches, "single"), Some(1_500_000_000));
|
||||
assert_eq!(lamports_of_sol(&matches, "multiple"), None);
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "0.03"]);
|
||||
assert_eq!(lamports_of_sol(&matches, "single"), Some(30000000));
|
||||
assert_eq!(lamports_of_sol(&matches, "single"), Some(30_000_000));
|
||||
}
|
||||
}
|
||||
|
@ -6,50 +6,86 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Signature},
|
||||
};
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
U: FromStr,
|
||||
U::Err: Display,
|
||||
{
|
||||
string
|
||||
.as_ref()
|
||||
.parse::<U>()
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("error parsing '{}': {}", string, err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as type T.
|
||||
// Takes a String to avoid second type parameter when used as a clap validator
|
||||
pub fn is_parsable<T>(string: String) -> Result<(), String>
|
||||
where
|
||||
T: FromStr,
|
||||
T::Err: Display,
|
||||
{
|
||||
is_parsable_generic::<T, String>(string)
|
||||
}
|
||||
|
||||
// Return an error if a pubkey cannot be parsed.
|
||||
pub fn is_pubkey(string: String) -> Result<(), String> {
|
||||
match string.parse::<Pubkey>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
pub fn is_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Pubkey, _>(string)
|
||||
}
|
||||
|
||||
// Return an error if a hash cannot be parsed.
|
||||
pub fn is_hash(string: String) -> Result<(), String> {
|
||||
match string.parse::<Hash>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
pub fn is_hash<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Hash, _>(string)
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed.
|
||||
pub fn is_keypair(string: String) -> Result<(), String> {
|
||||
read_keypair_file(&string)
|
||||
pub fn is_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
read_keypair_file(string.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed
|
||||
pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
|
||||
if string.as_str() == ASK_KEYWORD {
|
||||
pub fn is_keypair_or_ask_keyword<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if string.as_ref() == ASK_KEYWORD {
|
||||
return Ok(());
|
||||
}
|
||||
read_keypair_file(&string)
|
||||
read_keypair_file(string.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey string or keypair file location
|
||||
pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
|
||||
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_pubkey(string.as_ref()).or_else(|_| is_keypair(string))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as a pubkey string, or a valid Signer that can
|
||||
// produce a pubkey()
|
||||
pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
match parse_keypair_path(&string) {
|
||||
pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match parse_keypair_path(string.as_ref()) {
|
||||
KeypairUrl::Filepath(path) => is_keypair(path),
|
||||
_ => Ok(()),
|
||||
}
|
||||
@ -63,13 +99,19 @@ pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
// when paired with an offline `--signer` argument to provide a Presigner (pubkey + signature).
|
||||
// Clap validators can't check multiple fields at once, so the verification that a `--signer` is
|
||||
// also provided and correct happens in parsing, not in validation.
|
||||
pub fn is_valid_signer(string: String) -> Result<(), String> {
|
||||
pub fn is_valid_signer<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_valid_pubkey(string)
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey=signature string
|
||||
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
let mut signer = string.split('=');
|
||||
pub fn is_pubkey_sig<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let mut signer = string.as_ref().split('=');
|
||||
match Pubkey::from_str(
|
||||
signer
|
||||
.next()
|
||||
@ -90,8 +132,11 @@ pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
}
|
||||
|
||||
// Return an error if a url cannot be parsed.
|
||||
pub fn is_url(string: String) -> Result<(), String> {
|
||||
match url::Url::parse(&string) {
|
||||
pub fn is_url<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match url::Url::parse(string.as_ref()) {
|
||||
Ok(url) => {
|
||||
if url.has_host() {
|
||||
Ok(())
|
||||
@ -103,20 +148,26 @@ pub fn is_url(string: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_slot(slot: String) -> Result<(), String> {
|
||||
slot.parse::<Slot>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
pub fn is_slot<T>(slot: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Slot, _>(slot)
|
||||
}
|
||||
|
||||
pub fn is_port(port: String) -> Result<(), String> {
|
||||
port.parse::<u16>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
pub fn is_port<T>(port: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<u16, _>(port)
|
||||
}
|
||||
|
||||
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
pub fn is_valid_percentage<T>(percentage: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
percentage
|
||||
.as_ref()
|
||||
.parse::<u8>()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
@ -136,8 +187,11 @@ pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
if amount.parse::<u64>().is_ok() || amount.parse::<f64>().is_ok() {
|
||||
pub fn is_amount<T>(amount: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if amount.as_ref().parse::<u64>().is_ok() || amount.as_ref().parse::<f64>().is_ok() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!(
|
||||
@ -147,14 +201,37 @@ pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
|
||||
DateTime::parse_from_rfc3339(&value)
|
||||
pub fn is_amount_or_all<T>(amount: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if amount.as_ref().parse::<u64>().is_ok()
|
||||
|| amount.as_ref().parse::<f64>().is_ok()
|
||||
|| amount.as_ref() == "ALL"
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!(
|
||||
"Unable to parse input amount as integer or float, provided: {}",
|
||||
amount
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_rfc3339_datetime<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
DateTime::parse_from_rfc3339(value.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
}
|
||||
|
||||
pub fn is_derivation(value: String) -> Result<(), String> {
|
||||
let value = value.replace("'", "");
|
||||
pub fn is_derivation<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let value = value.as_ref().replace("'", "");
|
||||
let mut parts = value.split('/');
|
||||
let account = parts.next().unwrap();
|
||||
account
|
||||
@ -186,14 +263,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_is_derivation() {
|
||||
assert_eq!(is_derivation("2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("65537".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0/2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'".to_string()), Ok(()));
|
||||
assert!(is_derivation("a".to_string()).is_err());
|
||||
assert!(is_derivation("4294967296".to_string()).is_err());
|
||||
assert!(is_derivation("a/b".to_string()).is_err());
|
||||
assert!(is_derivation("0/4294967296".to_string()).is_err());
|
||||
assert_eq!(is_derivation("2"), Ok(()));
|
||||
assert_eq!(is_derivation("0"), Ok(()));
|
||||
assert_eq!(is_derivation("65537"), Ok(()));
|
||||
assert_eq!(is_derivation("0/2"), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'"), Ok(()));
|
||||
assert!(is_derivation("a").is_err());
|
||||
assert!(is_derivation("4294967296").is_err());
|
||||
assert!(is_derivation("a/b").is_err());
|
||||
assert!(is_derivation("0/4294967296").is_err());
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ use clap::ArgMatches;
|
||||
use rpassword::prompt_password_stderr;
|
||||
use solana_remote_wallet::{
|
||||
remote_keypair::generate_remote_keypair,
|
||||
remote_wallet::{RemoteWalletError, RemoteWalletManager},
|
||||
remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager},
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
@ -64,7 +64,7 @@ pub fn signer_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
keypair_name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Box<dyn Signer>, Box<dyn error::Error>> {
|
||||
match parse_keypair_path(path) {
|
||||
KeypairUrl::Ask => {
|
||||
@ -88,6 +88,9 @@ pub fn signer_from_path(
|
||||
Ok(Box::new(read_keypair(&mut stdin)?))
|
||||
}
|
||||
KeypairUrl::Usb(path) => {
|
||||
if wallet_manager.is_none() {
|
||||
*wallet_manager = maybe_wallet_manager()?;
|
||||
}
|
||||
if let Some(wallet_manager) = wallet_manager {
|
||||
Ok(Box::new(generate_remote_keypair(
|
||||
path,
|
||||
@ -122,7 +125,7 @@ pub fn pubkey_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
keypair_name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Pubkey, Box<dyn error::Error>> {
|
||||
match parse_keypair_path(path) {
|
||||
KeypairUrl::Pubkey(pubkey) => Ok(pubkey),
|
||||
@ -134,7 +137,7 @@ pub fn resolve_signer_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
keypair_name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<String>, Box<dyn error::Error>> {
|
||||
match parse_keypair_path(path) {
|
||||
KeypairUrl::Ask => {
|
||||
@ -158,6 +161,9 @@ pub fn resolve_signer_from_path(
|
||||
read_keypair(&mut stdin).map(|_| None)
|
||||
}
|
||||
KeypairUrl::Usb(path) => {
|
||||
if wallet_manager.is_none() {
|
||||
*wallet_manager = maybe_wallet_manager()?;
|
||||
}
|
||||
if let Some(wallet_manager) = wallet_manager {
|
||||
let path = generate_remote_keypair(
|
||||
path,
|
||||
|
@ -1,24 +1,5 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! version {
|
||||
() => {
|
||||
&*format!(
|
||||
"{}{}",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
if option_env!("CI_TAG").unwrap_or("").is_empty() {
|
||||
format!(
|
||||
" [channel={} commit={}]",
|
||||
option_env!("CHANNEL").unwrap_or("unknown"),
|
||||
option_env!("CI_COMMIT").unwrap_or("unknown"),
|
||||
)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
pub struct ArgConstant<'a> {
|
||||
pub long: &'a str,
|
||||
pub name: &'a str,
|
||||
@ -42,6 +23,7 @@ impl std::fmt::Debug for DisplayError {
|
||||
}
|
||||
}
|
||||
|
||||
pub mod commitment;
|
||||
pub mod input_parsers;
|
||||
pub mod input_validators;
|
||||
pub mod keypair;
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.0"
|
||||
version = "1.2.8"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,7 +11,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
dirs = "2.0.2"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.105"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
serde_yaml = "0.8.12"
|
||||
url = "2.1.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::io;
|
||||
use std::{collections::HashMap, io};
|
||||
use url::Url;
|
||||
|
||||
lazy_static! {
|
||||
@ -17,6 +17,8 @@ pub struct Config {
|
||||
pub json_rpc_url: String,
|
||||
pub websocket_url: String,
|
||||
pub keypair_path: String,
|
||||
#[serde(default)]
|
||||
pub address_labels: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -26,7 +28,7 @@ impl Default for Config {
|
||||
keypair_path.extend(&[".config", "solana", "id.json"]);
|
||||
keypair_path.to_str().unwrap().to_string()
|
||||
};
|
||||
let json_rpc_url = "http://127.0.0.1:8899".to_string();
|
||||
let json_rpc_url = "https://api.mainnet-beta.solana.com".to_string();
|
||||
|
||||
// Empty websocket_url string indicates the client should
|
||||
// `Config::compute_websocket_url(&json_rpc_url)`
|
||||
@ -36,6 +38,7 @@ impl Default for Config {
|
||||
json_rpc_url,
|
||||
websocket_url,
|
||||
keypair_path,
|
||||
address_labels: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -60,17 +63,38 @@ impl Config {
|
||||
ws_url
|
||||
.set_scheme(if is_secure { "wss" } else { "ws" })
|
||||
.expect("unable to set scheme");
|
||||
let ws_port = match json_rpc_url.port() {
|
||||
Some(port) => port + 1,
|
||||
None => {
|
||||
if is_secure {
|
||||
8901
|
||||
} else {
|
||||
8900
|
||||
}
|
||||
}
|
||||
};
|
||||
ws_url.set_port(Some(ws_port)).expect("unable to set port");
|
||||
if let Some(port) = json_rpc_url.port() {
|
||||
ws_url.set_port(Some(port + 1)).expect("unable to set port");
|
||||
}
|
||||
ws_url.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compute_websocket_url() {
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://devnet.solana.com"),
|
||||
"ws://devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://devnet.solana.com"),
|
||||
"wss://devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://example.com:8899"),
|
||||
"ws://example.com:8900/".to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://example.com:1234"),
|
||||
"wss://example.com:1235/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||
}
|
||||
}
|
||||
|
@ -3,53 +3,57 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.0"
|
||||
version = "1.2.8"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
bs58 = "0.3.0"
|
||||
bs58 = "0.3.1"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
clap = "2.33.0"
|
||||
clap = "2.33.1"
|
||||
criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.4", features = ["termination"] }
|
||||
console = "0.10.0"
|
||||
console = "0.10.1"
|
||||
dirs = "2.0.2"
|
||||
log = "0.4.8"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.14.0"
|
||||
humantime = "2.0.0"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.1.1"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde = "1.0.105"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.0" }
|
||||
titlecase = "1.1.0"
|
||||
thiserror = "1.0.13"
|
||||
serde_json = "1.0.53"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.8" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.8" }
|
||||
solana-client = { path = "../client", version = "1.2.8" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.8" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.8" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.8" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.8" }
|
||||
thiserror = "1.0.19"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.2.8" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.8" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana"
|
||||
path = "src/main.rs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
206
cli/src/checks.rs
Normal file
206
cli/src/checks.rs
Normal file
@ -0,0 +1,206 @@
|
||||
use crate::cli::CliError;
|
||||
use solana_client::{
|
||||
client_error::{ClientError, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
};
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator, message::Message, native_token::lamports_to_sol, pubkey::Pubkey,
|
||||
};
|
||||
|
||||
pub fn check_account_for_fee(
|
||||
rpc_client: &RpcClient,
|
||||
account_pubkey: &Pubkey,
|
||||
fee_calculator: &FeeCalculator,
|
||||
message: &Message,
|
||||
) -> Result<(), CliError> {
|
||||
check_account_for_multiple_fees(rpc_client, account_pubkey, fee_calculator, &[message])
|
||||
}
|
||||
|
||||
pub fn check_account_for_multiple_fees(
|
||||
rpc_client: &RpcClient,
|
||||
account_pubkey: &Pubkey,
|
||||
fee_calculator: &FeeCalculator,
|
||||
messages: &[&Message],
|
||||
) -> Result<(), CliError> {
|
||||
let fee = calculate_fee(fee_calculator, messages);
|
||||
if !check_account_for_balance(rpc_client, account_pubkey, fee)
|
||||
.map_err(Into::<ClientError>::into)?
|
||||
{
|
||||
return Err(CliError::InsufficientFundsForFee(lamports_to_sol(fee)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn calculate_fee(fee_calculator: &FeeCalculator, messages: &[&Message]) -> u64 {
|
||||
messages
|
||||
.iter()
|
||||
.map(|message| fee_calculator.calculate_fee(message))
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn check_account_for_balance(
|
||||
rpc_client: &RpcClient,
|
||||
account_pubkey: &Pubkey,
|
||||
balance: u64,
|
||||
) -> ClientResult<bool> {
|
||||
let lamports = rpc_client.get_balance(account_pubkey)?;
|
||||
if lamports != 0 && lamports >= balance {
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub fn check_unique_pubkeys(
|
||||
pubkey0: (&Pubkey, String),
|
||||
pubkey1: (&Pubkey, String),
|
||||
) -> Result<(), CliError> {
|
||||
if pubkey0.0 == pubkey1.0 {
|
||||
Err(CliError::BadParameter(format!(
|
||||
"Identical pubkeys found: `{}` and `{}` must be unique",
|
||||
pubkey0.1, pubkey1.1
|
||||
)))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::json;
|
||||
use solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcResponseContext},
|
||||
};
|
||||
use solana_sdk::system_instruction;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn test_check_account_for_fees() {
|
||||
let account_balance = 1;
|
||||
let account_balance_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(account_balance),
|
||||
});
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let fee_calculator = FeeCalculator::new(1);
|
||||
|
||||
let pubkey0 = Pubkey::new(&[0; 32]);
|
||||
let pubkey1 = Pubkey::new(&[1; 32]);
|
||||
let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1);
|
||||
let message0 = Message::new(&[ix0], Some(&pubkey0));
|
||||
|
||||
let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1);
|
||||
let ix1 = system_instruction::transfer(&pubkey1, &pubkey0, 1);
|
||||
let message1 = Message::new(&[ix0, ix1], Some(&pubkey0));
|
||||
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetBalance, account_balance_response.clone());
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
check_account_for_fee(&rpc_client, &pubkey, &fee_calculator, &message0)
|
||||
.expect("unexpected result");
|
||||
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetBalance, account_balance_response.clone());
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert!(check_account_for_fee(&rpc_client, &pubkey, &fee_calculator, &message1).is_err());
|
||||
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetBalance, account_balance_response);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert!(check_account_for_multiple_fees(
|
||||
&rpc_client,
|
||||
&pubkey,
|
||||
&fee_calculator,
|
||||
&[&message0, &message0]
|
||||
)
|
||||
.is_err());
|
||||
|
||||
let account_balance = 2;
|
||||
let account_balance_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(account_balance),
|
||||
});
|
||||
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetBalance, account_balance_response);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
|
||||
check_account_for_multiple_fees(
|
||||
&rpc_client,
|
||||
&pubkey,
|
||||
&fee_calculator,
|
||||
&[&message0, &message0],
|
||||
)
|
||||
.expect("unexpected result");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_account_for_balance() {
|
||||
let account_balance = 50;
|
||||
let account_balance_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(account_balance),
|
||||
});
|
||||
let pubkey = Pubkey::new_rand();
|
||||
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetBalance, account_balance_response);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
|
||||
assert_eq!(
|
||||
check_account_for_balance(&rpc_client, &pubkey, 1).unwrap(),
|
||||
true
|
||||
);
|
||||
assert_eq!(
|
||||
check_account_for_balance(&rpc_client, &pubkey, account_balance).unwrap(),
|
||||
true
|
||||
);
|
||||
assert_eq!(
|
||||
check_account_for_balance(&rpc_client, &pubkey, account_balance + 1).unwrap(),
|
||||
false
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_calculate_fee() {
|
||||
let fee_calculator = FeeCalculator::new(1);
|
||||
// No messages, no fee.
|
||||
assert_eq!(calculate_fee(&fee_calculator, &[]), 0);
|
||||
|
||||
// No signatures, no fee.
|
||||
let message = Message::default();
|
||||
assert_eq!(calculate_fee(&fee_calculator, &[&message, &message]), 0);
|
||||
|
||||
// One message w/ one signature, a fee.
|
||||
let pubkey0 = Pubkey::new(&[0; 32]);
|
||||
let pubkey1 = Pubkey::new(&[1; 32]);
|
||||
let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1);
|
||||
let message0 = Message::new(&[ix0], Some(&pubkey0));
|
||||
assert_eq!(calculate_fee(&fee_calculator, &[&message0]), 1);
|
||||
|
||||
// Two messages, additive fees.
|
||||
let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1);
|
||||
let ix1 = system_instruction::transfer(&pubkey1, &pubkey0, 1);
|
||||
let message1 = Message::new(&[ix0, ix1], Some(&pubkey0));
|
||||
assert_eq!(calculate_fee(&fee_calculator, &[&message0, &message1]), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_unique_pubkeys() {
|
||||
let pubkey0 = Pubkey::new_rand();
|
||||
let pubkey_clone = pubkey0;
|
||||
let pubkey1 = Pubkey::new_rand();
|
||||
|
||||
check_unique_pubkeys((&pubkey0, "foo".to_string()), (&pubkey1, "bar".to_string()))
|
||||
.expect("unexpected result");
|
||||
check_unique_pubkeys((&pubkey0, "foo".to_string()), (&pubkey1, "foo".to_string()))
|
||||
.expect("unexpected result");
|
||||
|
||||
assert!(check_unique_pubkeys(
|
||||
(&pubkey0, "foo".to_string()),
|
||||
(&pubkey_clone, "bar".to_string())
|
||||
)
|
||||
.is_err());
|
||||
}
|
||||
}
|
1247
cli/src/cli.rs
1247
cli/src/cli.rs
File diff suppressed because it is too large
Load Diff
1088
cli/src/cli_output.rs
Normal file
1088
cli/src/cli_output.rs
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,12 @@
|
||||
use crate::cli::SettingType;
|
||||
use console::style;
|
||||
use solana_sdk::hash::Hash;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::RpcTransactionStatusMeta;
|
||||
use std::{fmt, io};
|
||||
|
||||
// Pretty print a "name value"
|
||||
pub fn println_name_value(name: &str, value: &str) {
|
||||
@ -12,6 +18,15 @@ pub fn println_name_value(name: &str, value: &str) {
|
||||
println!("{} {}", style(name).bold(), styled_value);
|
||||
}
|
||||
|
||||
pub fn writeln_name_value(f: &mut fmt::Formatter, name: &str, value: &str) -> fmt::Result {
|
||||
let styled_value = if value == "" {
|
||||
style("(not set)").italic()
|
||||
} else {
|
||||
style(value)
|
||||
};
|
||||
writeln!(f, "{} {}", style(name).bold(), styled_value)
|
||||
}
|
||||
|
||||
pub fn println_name_value_or(name: &str, value: &str, setting_type: SettingType) {
|
||||
let description = match setting_type {
|
||||
SettingType::Explicit => "",
|
||||
@ -49,3 +64,149 @@ pub fn println_signers(
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
pub fn write_transaction<W: io::Write>(
|
||||
w: &mut W,
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) -> io::Result<()> {
|
||||
let message = &transaction.message;
|
||||
writeln!(
|
||||
w,
|
||||
"{}Recent Blockhash: {:?}",
|
||||
prefix, message.recent_blockhash
|
||||
)?;
|
||||
for (signature_index, signature) in transaction.signatures.iter().enumerate() {
|
||||
writeln!(
|
||||
w,
|
||||
"{}Signature {}: {:?}",
|
||||
prefix, signature_index, signature
|
||||
)?;
|
||||
}
|
||||
writeln!(w, "{}{:?}", prefix, message.header)?;
|
||||
for (account_index, account) in message.account_keys.iter().enumerate() {
|
||||
writeln!(w, "{}Account {}: {:?}", prefix, account_index, account)?;
|
||||
}
|
||||
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
|
||||
let program_pubkey = message.account_keys[instruction.program_id_index as usize];
|
||||
writeln!(w, "{}Instruction {}", prefix, instruction_index)?;
|
||||
writeln!(
|
||||
w,
|
||||
"{} Program: {} ({})",
|
||||
prefix, program_pubkey, instruction.program_id_index
|
||||
)?;
|
||||
for (account_index, account) in instruction.accounts.iter().enumerate() {
|
||||
let account_pubkey = message.account_keys[*account as usize];
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {}: {} ({})",
|
||||
prefix, account_index, account_pubkey, account
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut raw = true;
|
||||
if program_pubkey == solana_vote_program::id() {
|
||||
if let Ok(vote_instruction) = limited_deserialize::<
|
||||
solana_vote_program::vote_instruction::VoteInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_stake_program::id() {
|
||||
if let Ok(stake_instruction) = limited_deserialize::<
|
||||
solana_stake_program::stake_instruction::StakeInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_sdk::system_program::id() {
|
||||
if let Ok(system_instruction) = limited_deserialize::<
|
||||
solana_sdk::system_instruction::SystemInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, system_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
}
|
||||
|
||||
if raw {
|
||||
writeln!(w, "{} Data: {:?}", prefix, instruction.data)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(transaction_status) = transaction_status {
|
||||
writeln!(
|
||||
w,
|
||||
"{}Status: {}",
|
||||
prefix,
|
||||
match &transaction_status.status {
|
||||
Ok(_) => "Ok".into(),
|
||||
Err(err) => err.to_string(),
|
||||
}
|
||||
)?;
|
||||
writeln!(
|
||||
w,
|
||||
"{} Fee: {} SOL",
|
||||
prefix,
|
||||
lamports_to_sol(transaction_status.fee)
|
||||
)?;
|
||||
assert_eq!(
|
||||
transaction_status.pre_balances.len(),
|
||||
transaction_status.post_balances.len()
|
||||
);
|
||||
for (i, (pre, post)) in transaction_status
|
||||
.pre_balances
|
||||
.iter()
|
||||
.zip(transaction_status.post_balances.iter())
|
||||
.enumerate()
|
||||
{
|
||||
if pre == post {
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {} balance: {} SOL",
|
||||
prefix,
|
||||
i,
|
||||
lamports_to_sol(*pre)
|
||||
)?;
|
||||
} else {
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {} balance: {} SOL -> {} SOL",
|
||||
prefix,
|
||||
i,
|
||||
lamports_to_sol(*pre),
|
||||
lamports_to_sol(*post)
|
||||
)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
writeln!(w, "{}Status: Unavailable", prefix)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn println_transaction(
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) {
|
||||
let mut w = Vec::new();
|
||||
if write_transaction(&mut w, transaction, transaction_status, prefix).is_ok() {
|
||||
if let Ok(s) = String::from_utf8(w) {
|
||||
print!("{}", s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
pub fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
@ -1,9 +1,35 @@
|
||||
macro_rules! ACCOUNT_STRING {
|
||||
() => {
|
||||
r#", one of:
|
||||
* a base58-encoded public key
|
||||
* a path to a keypair file
|
||||
* a hyphen; signals a JSON-encoded keypair on stdin
|
||||
* the 'ASK' keyword; to recover a keypair via its seed phrase
|
||||
* a hardware wallet keypair URL (i.e. usb://ledger)"#
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_use]
|
||||
macro_rules! pubkey {
|
||||
($arg:expr, $help:expr) => {
|
||||
$arg.takes_value(true)
|
||||
.validator(is_valid_pubkey)
|
||||
.help(concat!($help, ACCOUNT_STRING!()))
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod checks;
|
||||
pub mod cli;
|
||||
pub mod cli_output;
|
||||
pub mod cluster_query;
|
||||
pub mod display;
|
||||
pub mod nonce;
|
||||
pub mod offline;
|
||||
pub mod spend_utils;
|
||||
pub mod stake;
|
||||
pub mod storage;
|
||||
pub mod test_utils;
|
||||
pub mod validator_info;
|
||||
pub mod vote;
|
||||
|
@ -2,15 +2,15 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
|
||||
use console::style;
|
||||
|
||||
use solana_clap_utils::{
|
||||
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, offline::SIGN_ONLY_ARG,
|
||||
DisplayError,
|
||||
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, DisplayError,
|
||||
};
|
||||
use solana_cli::{
|
||||
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliSigners},
|
||||
cli_output::OutputFormat,
|
||||
display::{println_name_value, println_name_value_or},
|
||||
};
|
||||
use solana_cli_config::{Config, CONFIG_FILE};
|
||||
use solana_remote_wallet::remote_wallet::{maybe_wallet_manager, RemoteWalletManager};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use std::{error, sync::Arc};
|
||||
|
||||
fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> {
|
||||
@ -102,7 +102,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
|
||||
pub fn parse_args<'a>(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<Arc<RemoteWalletManager>>,
|
||||
mut wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<(CliConfig<'a>, CliSigners), Box<dyn error::Error>> {
|
||||
let config = if let Some(config_file) = matches.value_of("config_file") {
|
||||
Config::load(config_file).unwrap_or_default()
|
||||
@ -125,7 +125,16 @@ pub fn parse_args<'a>(
|
||||
);
|
||||
|
||||
let CliCommandInfo { command, signers } =
|
||||
parse_command(&matches, &default_signer_path, wallet_manager.as_ref())?;
|
||||
parse_command(&matches, &default_signer_path, &mut wallet_manager)?;
|
||||
|
||||
let output_format = matches
|
||||
.value_of("output_format")
|
||||
.map(|value| match value {
|
||||
"json" => OutputFormat::Json,
|
||||
"json-compact" => OutputFormat::JsonCompact,
|
||||
_ => unreachable!(),
|
||||
})
|
||||
.unwrap_or(OutputFormat::Display);
|
||||
|
||||
Ok((
|
||||
CliConfig {
|
||||
@ -136,6 +145,7 @@ pub fn parse_args<'a>(
|
||||
keypair_path: default_signer_path,
|
||||
rpc_client: None,
|
||||
verbose: matches.is_present("verbose"),
|
||||
output_format,
|
||||
},
|
||||
signers,
|
||||
))
|
||||
@ -146,7 +156,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let matches = app(
|
||||
crate_name!(),
|
||||
crate_description!(),
|
||||
solana_clap_utils::version!(),
|
||||
solana_version::version!(),
|
||||
)
|
||||
.arg({
|
||||
let arg = Arg::with_name("config_file")
|
||||
@ -197,6 +207,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.global(true)
|
||||
.help("Show additional information"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("output_format")
|
||||
.long("output")
|
||||
.value_name("FORMAT")
|
||||
.global(true)
|
||||
.takes_value(true)
|
||||
.possible_values(&["json", "json-compact"])
|
||||
.help("Return information in specified output format"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
||||
@ -238,18 +257,12 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
|
||||
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
|
||||
if parse_settings(&matches)? {
|
||||
let wallet_manager = maybe_wallet_manager()?;
|
||||
let mut wallet_manager = None;
|
||||
|
||||
let (mut config, signers) = parse_args(&matches, wallet_manager)?;
|
||||
let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?;
|
||||
config.signers = signers.iter().map(|s| s.as_ref()).collect();
|
||||
let result = process_command(&config)?;
|
||||
let (_, submatches) = matches.subcommand();
|
||||
let sign_only = submatches
|
||||
.map(|m| m.is_present(SIGN_ONLY_ARG.name))
|
||||
.unwrap_or(false);
|
||||
if !sign_only {
|
||||
println!("{}", result);
|
||||
}
|
||||
println!("{}", result);
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user