Compare commits
1111 Commits
Author | SHA1 | Date | |
---|---|---|---|
dfa27b04d7 | |||
880b04906e | |||
1fe0b1e516 | |||
f9fd4bd24c | |||
c55a11d160 | |||
92118de0e1 | |||
0d9802a2cd | |||
f6beede01b | |||
ff48ea20de | |||
dd9cb18d65 | |||
71932aed0a | |||
24dc6680e1 | |||
61d9d40e48 | |||
e9b40db319 | |||
316356861d | |||
e07c00710a | |||
bc47c80610 | |||
14baa511f0 | |||
e773faeb24 | |||
42847516a2 | |||
47e9a1ae4f | |||
549a154394 | |||
dca00d1bde | |||
45ce1b4f96 | |||
a9232c0633 | |||
3da254c745 | |||
9ba3ee9683 | |||
b0addba2a9 | |||
bb59525ff8 | |||
acd25124d4 | |||
d718ab2491 | |||
1860aacd1f | |||
d4bbb7f516 | |||
d1c0f4b4f1 | |||
b72b837ba2 | |||
fde85c96c0 | |||
121418dad2 | |||
f44f94fe23 | |||
55a4481022 | |||
e859ad37a8 | |||
1a28c7fc12 | |||
c706a07764 | |||
59568e5776 | |||
33ca8fa72a | |||
4bb66a81fb | |||
468c14b14f | |||
03e505897a | |||
5205eb382e | |||
b07b6e56fa | |||
bcc890e705 | |||
07d14f6f07 | |||
03b213e296 | |||
1bfce24c9f | |||
94b2565969 | |||
2896fdb603 | |||
50970bc8f9 | |||
10df45b173 | |||
d3b8129593 | |||
f7fb5aebac | |||
9311a6e356 | |||
8c706892df | |||
7f2b11756c | |||
f324547600 | |||
36e8977f1d | |||
b88db2689e | |||
1584ec220c | |||
fb366a7236 | |||
b903158543 | |||
9dad9c6333 | |||
a6658b9d75 | |||
a97feedcc1 | |||
8021bce41f | |||
d8fa19336c | |||
191483cf9f | |||
1eb8314d42 | |||
88eeb817e4 | |||
b777126bd2 | |||
89d78dcfcf | |||
1cf142c193 | |||
3e29325410 | |||
4dc98c3dbd | |||
9caad645e2 | |||
6cb76ac326 | |||
0001e5c0a1 | |||
ab32d13da1 | |||
cefe46e981 | |||
f4d70e78b6 | |||
d130adf582 | |||
1e6285e64e | |||
e3c90c3807 | |||
85750307aa | |||
0ee4a5e799 | |||
55cb9cf681 | |||
d3af7e0653 | |||
729a24d557 | |||
55b92c16da | |||
835bacce4f | |||
ccb7b1a698 | |||
85dbdeb4c3 | |||
397f9f11c5 | |||
a11986ad1d | |||
a4d373f0af | |||
52eea215ce | |||
6f48aafd3a | |||
2d94c09aee | |||
9699b61679 | |||
8865bfbd59 | |||
5f80c1d37d | |||
f616f5dec6 | |||
db1003b5f8 | |||
f52ff777b7 | |||
4314a29953 | |||
e560fff840 | |||
5ac747ea7d | |||
f522dc1e18 | |||
486812bf54 | |||
7df8f76df1 | |||
bbe4990e80 | |||
a5baaf790d | |||
0a36ed1b8c | |||
b7ad240375 | |||
2cc71f2d55 | |||
3125c74681 | |||
d5b1dee8d6 | |||
4b33a2a1b8 | |||
58e6a5c281 | |||
7eb61074ab | |||
9b2edbaa9b | |||
e8659b45c7 | |||
a9553cb401 | |||
800c409698 | |||
b6f484ddee | |||
3c39fee5a8 | |||
560f34d1f6 | |||
dbda50941a | |||
f1e68ac25c | |||
95029b9b05 | |||
a789bf4761 | |||
d2e7ffa8b9 | |||
0914519f6a | |||
43cd5f3730 | |||
d396a5f45a | |||
76a7071dba | |||
133baa8ce6 | |||
5df3510fde | |||
357339273f | |||
2500881e0b | |||
0013bfff4e | |||
f13498b428 | |||
b567138170 | |||
653982cae5 | |||
605f4906ba | |||
d27f24e312 | |||
c9c1cb5c9c | |||
1cc6493ccf | |||
ae47862be2 | |||
8590184df7 | |||
d840bbab08 | |||
63314de516 | |||
c47a6e12c7 | |||
7937c45ba4 | |||
813b11ac56 | |||
ad6883b66a | |||
a8f4c4e297 | |||
6d68e94e4e | |||
5dd40d7d88 | |||
3f58177670 | |||
edfd65b115 | |||
51da66ec84 | |||
ba36308d69 | |||
ee450b2dd0 | |||
84b28fb261 | |||
1586b86797 | |||
8f065e487e | |||
953eadd983 | |||
a4a792facd | |||
055f808f98 | |||
0404878445 | |||
053907f8a4 | |||
f76dcc1f05 | |||
823bc138cd | |||
18f746b025 | |||
c81adaf901 | |||
2d12ddd0f6 | |||
bee36cc8d0 | |||
f7aee67023 | |||
c021727009 | |||
6653136e1d | |||
06c40c807c | |||
9b262b4915 | |||
cc2d3ecfd7 | |||
92743499bf | |||
aa6a00a03e | |||
bd19f7c4cb | |||
988bf65ba4 | |||
d5b03bd824 | |||
6a72dab111 | |||
56e8319a6d | |||
aed1e51ef1 | |||
f4278d61df | |||
a5c3ae3cef | |||
05c052e212 | |||
dc05bb648a | |||
800b65b2f6 | |||
ae1a0f57c5 | |||
df7c44bd0c | |||
3e29cfd712 | |||
202031538f | |||
29ff1b925d | |||
5a91db6e62 | |||
94ba700e58 | |||
1964c6ec29 | |||
4dd6591bfd | |||
163217815b | |||
37c182cd5d | |||
0c68f27ac3 | |||
5fb8da9b35 | |||
74d9fd1e4f | |||
e71206c578 | |||
0141c80238 | |||
ed928cfdf7 | |||
2fd319ab7a | |||
7813a1decd | |||
93e4ed1f75 | |||
a70f31b3da | |||
2d25227d0a | |||
fc7bfd0f67 | |||
2996291b37 | |||
3e80b9231c | |||
78231a8682 | |||
ace711e7f1 | |||
c9cbc39ec9 | |||
606a392d50 | |||
c67596ceb4 | |||
9a42cc7555 | |||
2e5ef2a802 | |||
8c8e2c4b2b | |||
0578801f99 | |||
6141e1410a | |||
4fc86807ff | |||
d2a2eba69e | |||
156387aba4 | |||
8a8384e674 | |||
58ae9ab34f | |||
3dfef813bf | |||
3aae98c8be | |||
8d32441b96 | |||
26acd6aafa | |||
7373163bed | |||
a21409e97e | |||
9fae5aacc2 | |||
42aaacf520 | |||
36a36d1c83 | |||
2d3a906d55 | |||
48c0845359 | |||
10b1895357 | |||
f1e932c90a | |||
269db1710e | |||
e2b5cd6d47 | |||
2928c5d103 | |||
2324eb9ff9 | |||
b7a32f01c0 | |||
967320a091 | |||
4779858dd4 | |||
c7cdbc98e5 | |||
c78fd2b36d | |||
12a3b1ba6a | |||
18be7a7966 | |||
56c7e4a66c | |||
486168b796 | |||
074c41556f | |||
10d60288e8 | |||
77d42654dc | |||
07243dc87f | |||
429802a138 | |||
8da2e1b2f7 | |||
324cfd40f0 | |||
64cec764b9 | |||
ce17de7d25 | |||
417f0e41fa | |||
d6d032dd49 | |||
357a00d2bc | |||
276815bd33 | |||
4a72c2b054 | |||
9d89fb5c35 | |||
ad7b113944 | |||
f33688361c | |||
36627fb8b3 | |||
f27d001b7a | |||
d9919b99d2 | |||
439fd30840 | |||
e66b5d09db | |||
d5d06e6be0 | |||
97f2bcff69 | |||
427c78d891 | |||
5e43304eca | |||
d34b9ba306 | |||
fac854eb9d | |||
431a228402 | |||
300b33a20e | |||
759c0e0b03 | |||
bbc549f592 | |||
bac4aec16f | |||
4ca352a344 | |||
bfcfbab818 | |||
9222bc2b35 | |||
f562ed4cc8 | |||
c4a096d8d4 | |||
5e89bd8868 | |||
7080fb9b37 | |||
a32f34f131 | |||
f342a50a76 | |||
58ef02f02b | |||
1da1667920 | |||
b762319fc5 | |||
6a6c5f196a | |||
22cddcb1a6 | |||
63813fe69f | |||
adcd2f14a5 | |||
eb1acaf927 | |||
9ef9969d29 | |||
40b7c11262 | |||
d195dce5d1 | |||
816bf6ebdd | |||
ed53a70b5c | |||
4e4a21f9b7 | |||
c5460e7fee | |||
cf8eb7700b | |||
13bc3f8094 | |||
9575afc8fa | |||
1e80044e93 | |||
e09f517094 | |||
1eb40c3fe0 | |||
ee7f15eff1 | |||
a9b82cf95b | |||
5cc252d471 | |||
a75086287c | |||
a5fb3fc220 | |||
28d1f7c5e7 | |||
59de1b3b62 | |||
84b6120983 | |||
3b9dc50541 | |||
2521f75c18 | |||
965204b8e0 | |||
6660e93c39 | |||
4fd7526852 | |||
903a8a3196 | |||
7e364d01c2 | |||
bfe179e911 | |||
af84dff9ef | |||
97e17f9b32 | |||
b4b4d6b00d | |||
1f9d0fc284 | |||
1a47b1cd86 | |||
9c0b80ea1b | |||
288c9751c1 | |||
ad3c8fb812 | |||
19722fceb3 | |||
0541431ea8 | |||
dd78184f8f | |||
af6a8f5fac | |||
405e39fb9f | |||
3ee702a922 | |||
cb50877bbf | |||
84885d79d5 | |||
57a9996921 | |||
00e45ec935 | |||
f98bfda6f9 | |||
01ab1d1369 | |||
e970c58330 | |||
c970bbea4f | |||
f12c6c1ed1 | |||
2ac50177a6 | |||
3757754c89 | |||
754c65c066 | |||
f6e26f6c8c | |||
d08d9322d2 | |||
65a52a4145 | |||
d5c889d6b0 | |||
445e6668c2 | |||
766062b2cc | |||
068666b0e3 | |||
09ae61651a | |||
e951f8d0ed | |||
e078ba1dde | |||
16ddd001f6 | |||
72312ad615 | |||
3442f36f8a | |||
b2672fd623 | |||
16af67d5e1 | |||
627bc7e3a9 | |||
f5b0d13f08 | |||
3aedb81d48 | |||
f8ad3aca25 | |||
a8394317c7 | |||
ffbbdd46e8 | |||
f37f83fd12 | |||
de04563f18 | |||
894549f002 | |||
fc46a0d441 | |||
6eb50450ec | |||
79a6b4b596 | |||
db8011f4f3 | |||
8dfe0affd4 | |||
450f1d2867 | |||
7678af6300 | |||
217931479b | |||
e5bad7594f | |||
de9d8cd849 | |||
6deaf649ef | |||
a91236012d | |||
a0514eb2ae | |||
230df0ec0c | |||
2f08b12753 | |||
efb4988d10 | |||
6ed29b3653 | |||
1018807db9 | |||
0954ea19e8 | |||
b26c07b788 | |||
eb24f3df84 | |||
3d40ca86b0 | |||
d836dfff14 | |||
a4fe11fad2 | |||
9d91cca73c | |||
0a16d09e1f | |||
068f12fd6f | |||
063f616a19 | |||
87827b2330 | |||
f5aaf7ff28 | |||
6e42989309 | |||
d67ad70443 | |||
655e3bc418 | |||
659e87703b | |||
2de999fb61 | |||
a12428a5b8 | |||
3d8fc8a4a8 | |||
ef7196cec2 | |||
a61904b2dc | |||
aac580686f | |||
efad193180 | |||
193dbb1794 | |||
c11abf88b7 | |||
2f705b5b55 | |||
839ff51b9a | |||
8ef097bf6f | |||
c372a39dd3 | |||
c5a7df9221 | |||
f71a23a72a | |||
41eba7d1c7 | |||
9918539229 | |||
e907c0e650 | |||
d3e3f51330 | |||
c9d6c39c31 | |||
05acd4b29f | |||
a7f33b5014 | |||
d7f37a703e | |||
c92f95e0b8 | |||
fa20963b93 | |||
50f1ec0374 | |||
76b1c2baf0 | |||
767a0f9384 | |||
d44e0b7cd8 | |||
3670d3fd7a | |||
cb2efd530f | |||
79829c98db | |||
d2cef8ed9b | |||
17a8b0f783 | |||
3acfe42622 | |||
d1cbccd9ba | |||
504160b11f | |||
b21fd27360 | |||
8df79a3559 | |||
ecb343c23b | |||
7e48e5859d | |||
57a25de910 | |||
24354ccd6a | |||
71f7a7243b | |||
17e7667da4 | |||
5d2f488004 | |||
ab4bdd59db | |||
d5abff82e0 | |||
611d2fa75d | |||
89b30b4853 | |||
9b71573965 | |||
77c3a1f372 | |||
08e73e5366 | |||
2e8349196e | |||
2a935ec15f | |||
7bf1720a76 | |||
ba58589656 | |||
5b8d963ee2 | |||
45ff1f2379 | |||
0d24e758b2 | |||
cbc7b3b0b7 | |||
111a86f3ec | |||
bab3502260 | |||
ad186b8652 | |||
3023691487 | |||
92afe9020f | |||
7d6cdf83dc | |||
64e5684d45 | |||
4d97d3bdb1 | |||
18cba86f77 | |||
914b022663 | |||
6e908a1be8 | |||
5402434218 | |||
6793c10860 | |||
c856d8bdbd | |||
a6ad660e5e | |||
b1a0abc7a6 | |||
3fbe7f0bb3 | |||
41fec5bd5b | |||
44cced3ffc | |||
498d025bd3 | |||
8a69ea971f | |||
b1ca74ed30 | |||
6d941c82fd | |||
77fb4230d6 | |||
a5419fe79e | |||
1607891b29 | |||
75b25e33f6 | |||
d08517db8c | |||
65a9658b13 | |||
3205361163 | |||
58887c591b | |||
657fbfbefa | |||
36bf7ad694 | |||
679e7863cb | |||
a7aa7e172b | |||
729cb5eec6 | |||
addbdcb660 | |||
f142451a33 | |||
124287a0ea | |||
9da366c193 | |||
cb0a1a94a7 | |||
dbaebe101c | |||
8509dcb8a0 | |||
7b5cdf6adf | |||
7207a91aa5 | |||
55ed52a71d | |||
cd4927053e | |||
982e6c4916 | |||
b58338b066 | |||
a9c38fb0df | |||
9bba27a3aa | |||
e655cba5bd | |||
bcfd379f32 | |||
47ae57610a | |||
5ed39de8c5 | |||
66abe45ea1 | |||
425b4fe6dd | |||
93669ab1fc | |||
16b2d41dd6 | |||
30b3862770 | |||
7e7cbec8a1 | |||
4ac15e68cf | |||
a7ed33b552 | |||
9cc7265b05 | |||
d567799d43 | |||
530c542002 | |||
7aa4d401f7 | |||
a8b8c2f438 | |||
241a05fc52 | |||
40737e9efa | |||
217828a849 | |||
69f1e487b3 | |||
2b2b2cac1f | |||
ee72714c08 | |||
83a96c557d | |||
892e425d87 | |||
5298e3872c | |||
c77ed82caa | |||
2d0224b64e | |||
68b099c277 | |||
283f3ff620 | |||
9a95257c40 | |||
bcfadd6085 | |||
d4ea1ec6ad | |||
a0f0e199b7 | |||
5a0c2a0c1d | |||
ce027da236 | |||
37b048effb | |||
92a5a51632 | |||
230f014b9e | |||
3f33f4d3a9 | |||
47fc0a5cfa | |||
c86b0d8a85 | |||
8cda974552 | |||
3f1399cb0d | |||
99655206c8 | |||
3037eb8d4f | |||
31ebdbc77f | |||
6e1ce5ab6c | |||
aa8dfac313 | |||
c6da2ab0de | |||
f0291dc5d3 | |||
994f8c325a | |||
ae5a6419d4 | |||
85feca305b | |||
7b71a331c6 | |||
032127b591 | |||
91159ea8e3 | |||
d5a9ee97f2 | |||
900933bbcc | |||
aeddd8c95a | |||
be77bdef12 | |||
f3afe5c99c | |||
aab9d9229c | |||
a714b8052d | |||
e873c93be3 | |||
cb5c337540 | |||
4d14372d5e | |||
4b8d1abb5d | |||
d63ada489a | |||
d4e284b7c5 | |||
21cb56d808 | |||
e1aa247548 | |||
638108e9d5 | |||
f655b3f0fd | |||
6a2be8b0ca | |||
ad0482be73 | |||
4522e85ac4 | |||
36e73cada4 | |||
8e5ac1338f | |||
cb6cf189b4 | |||
8ed05c27f2 | |||
9883ca8549 | |||
dc91698b3a | |||
b4e00275b2 | |||
03978ac5a5 | |||
33a68ec9c3 | |||
c78b658a92 | |||
6b988155e1 | |||
4677cdb4c2 | |||
96c23110ae | |||
a4e2ee99d3 | |||
9a9fa5594d | |||
1c73f3e100 | |||
75234e28e5 | |||
b20edaca26 | |||
62cb2cd13c | |||
bfea3572ea | |||
acf64f8476 | |||
b28ec430e4 | |||
7b68628e6c | |||
b584174d67 | |||
49e2cc6593 | |||
36ab7e0600 | |||
ad0997e15f | |||
8cdf406dd3 | |||
2d618722e6 | |||
c0afbae940 | |||
ed86d8d1fc | |||
c1441a2a8f | |||
b557b3170e | |||
9493de4443 | |||
175ffd9054 | |||
66c78cb819 | |||
962e41f9ca | |||
fd5f8a8046 | |||
d61191db40 | |||
0139236464 | |||
c5b2db72a2 | |||
303a1207c1 | |||
1078c86100 | |||
c67e9fabc4 | |||
ad98f14fc1 | |||
ec4745d174 | |||
0e53939e00 | |||
8d1cd3ae5c | |||
18fe0f0c44 | |||
3b89708653 | |||
23bf7b8d63 | |||
a8817fb973 | |||
8b14eb9020 | |||
25ee36bbba | |||
19693a85cd | |||
c7ba1994ac | |||
9aab0b9388 | |||
492b7d5ef9 | |||
352de7929b | |||
9f5d3f0ee5 | |||
691a3c6087 | |||
268e04cb4a | |||
7605f1f540 | |||
b543aee24e | |||
a74a64084d | |||
743b8cddf9 | |||
74774dd44f | |||
a8d4b1c90a | |||
3a6cdf02e5 | |||
56667e17c9 | |||
1e6b789bfa | |||
a61ddb6f61 | |||
62e12e3af5 | |||
93be7370d9 | |||
130c0b484d | |||
974848310c | |||
49494be653 | |||
0e2722c638 | |||
66946a4680 | |||
24d887a38a | |||
73e99cc513 | |||
e6db701c17 | |||
50fa577af8 | |||
8636ef5e24 | |||
62040cef56 | |||
8731b6279f | |||
ae66c0e497 | |||
c67703e7a3 | |||
b1771b92ec | |||
5f31444300 | |||
729cc4e04f | |||
2ed3e2160d | |||
8bbf6e3f54 | |||
d7fa40087c | |||
3ae6e0b8ab | |||
4b7da6e60d | |||
2863f8ec65 | |||
e2491c6322 | |||
4a8b1d9b2c | |||
74aed5cb58 | |||
b130c298df | |||
e5a6f8c2de | |||
87e5f8acbf | |||
c1a3b6ecc2 | |||
c242d66130 | |||
864d212c64 | |||
a9564d207b | |||
b82a9c832b | |||
5d9298543f | |||
4e9ae61044 | |||
d47262d233 | |||
8fdcf9f968 | |||
c82d37f6c3 | |||
5a8658283a | |||
4b97e58cba | |||
48031651a0 | |||
f3d556e3f9 | |||
8d4cecdb77 | |||
39a622f66e | |||
dae28b9cfe | |||
b7b4aa5d4d | |||
ed036b978d | |||
284920433f | |||
30bed18b77 | |||
6678dd10a5 | |||
296d740f83 | |||
b8fda9d730 | |||
2623c71ed3 | |||
e4472db33f | |||
076fef5e57 | |||
40eba48109 | |||
095c79e863 | |||
959c1ea857 | |||
ef3af104ae | |||
9dc69d9843 | |||
45348b2c83 | |||
c558db2a48 | |||
f987c18a7e | |||
5d3f43c10b | |||
216b01b224 | |||
35dd52e9ba | |||
b0c83921be | |||
e744b15ad2 | |||
1fd695d337 | |||
8f38bc7dc0 | |||
7d6ea6c17e | |||
56dc958116 | |||
19dfb87b1f | |||
a5287f56fc | |||
eed8087d87 | |||
4115d73b9a | |||
064b95c16a | |||
70c167182a | |||
fee002382e | |||
d75a470ffa | |||
c530fbd22b | |||
1b8f9e75dd | |||
1a5b01676d | |||
4b397d15b3 | |||
4d2b83d01f | |||
87096f13d2 | |||
a0ffcc61ae | |||
4b4819cd07 | |||
ca791a0378 | |||
b08f8d3103 | |||
88ba8439fc | |||
4dd0367136 | |||
ff2c183ac1 | |||
aa24181a53 | |||
1f83c56e05 | |||
2592894958 | |||
85027caf42 | |||
3ea556bc24 | |||
ca4a22d4ba | |||
18c1f0dfe9 | |||
734afee5e0 | |||
271e17547a | |||
e28368ff1b | |||
1aab959d4e | |||
bca769111f | |||
909321928c | |||
8b0a7f6838 | |||
5fa36bbab3 | |||
d65a7a3c30 | |||
453f5ce8f2 | |||
dc1db33ec9 | |||
c68e80c93b | |||
6b9a0935c1 | |||
b84468ecd3 | |||
ff4ba54553 | |||
f78a90bce2 | |||
24d871b529 | |||
e547f38589 | |||
6fb16f9879 | |||
2dc50cff5b | |||
98228c392e | |||
aeb7278b00 | |||
42d7609d54 | |||
a70008cc5c | |||
306a5c849e | |||
bb92184085 | |||
90c9462dd4 | |||
21b287ef0b | |||
b0c524765e | |||
6d0318cbe6 | |||
8f5ee6832f | |||
38fe766fa7 | |||
74866882f2 | |||
c638e83bf5 | |||
de6ef68571 | |||
c51049a59b | |||
9cedeb0a8d | |||
e37a4823f1 | |||
bf60345b7a | |||
cb29b8dd2a | |||
3a501ad69e | |||
e6e43d236f | |||
142601d4b6 | |||
f192e4f08f | |||
f020370ae7 | |||
24935af867 | |||
6a213bc8f5 | |||
f0414711b7 | |||
d087ed5bf6 | |||
d14dea4660 | |||
29abfebb68 | |||
668dfc40c7 | |||
61514e3b0e | |||
46fcab14dd | |||
2435c3ce0c | |||
55907b2167 | |||
a03eff51af | |||
10175618d2 | |||
4ff033852d | |||
2237f47b90 | |||
bfca226964 | |||
6077458ad8 | |||
7079559c2d | |||
0641244378 | |||
563da2bb18 | |||
dc347dd3d7 | |||
eab4fe50a3 | |||
ead6dc553a | |||
009c124fac | |||
7029c88305 | |||
9411fc00b8 | |||
5a93a4c466 | |||
9afc5da2e1 | |||
49706172f3 | |||
b2a0cdaa38 | |||
5481d1a039 | |||
dd5e320aa1 | |||
3c2aff2b5b | |||
c3c4c9326b | |||
ae70f4ea92 | |||
29fb79382c | |||
5c2cf04e10 | |||
9e0a26628b | |||
ce88602ced | |||
53b8d0d528 | |||
96a61cc4e4 | |||
b7b36bb0a4 | |||
52b254071c | |||
fbf2dd1672 | |||
4bbf09f582 | |||
952cd38b7b | |||
9a79be5ca0 | |||
2182521a8b | |||
fe65c2ae02 | |||
554d36c74b | |||
29ef0916db | |||
f93c8290f4 | |||
a69293df24 | |||
48ac038f7a | |||
5a7d2560c9 | |||
d91027f771 | |||
deaf3cb416 | |||
f95e1ea40f | |||
f64ab49307 | |||
fe1c99c0cf | |||
bdb7b73b8a | |||
293fff90d3 | |||
6eb4973780 | |||
5f5824d78d | |||
0ef9d79056 | |||
215650f6e7 | |||
a0d0d4c0e9 | |||
0422af2aae | |||
cef8e42938 | |||
0eeeec38fa | |||
75a84ecdae | |||
87c507fdbe | |||
3783ae823d | |||
f3ed00e28e | |||
307d023b2e | |||
775ce3a03f | |||
f655372b08 | |||
2c4079f4c8 | |||
ac1f90f1a9 | |||
4bb55b1622 | |||
23c5bb17c7 | |||
a0ed3261c9 | |||
261732f140 | |||
595c96b262 | |||
496999beba | |||
bb50881346 | |||
948902eae0 | |||
e41ff2df66 | |||
f88b79d42b | |||
1a0dd53450 | |||
9872430bd2 | |||
ae8badb141 | |||
36fa3a1a0a | |||
df8a69d15f | |||
fad08a19cc | |||
6527d05d77 | |||
d303e6b94e | |||
5fa397ceed | |||
c0fd017906 | |||
74e7da214a | |||
756ba07b16 | |||
5c236fd06c | |||
f671be814e | |||
e277437bd2 | |||
beead7e54d | |||
ea010be5cb | |||
97b6c41d42 | |||
6d0f3762b2 | |||
132a2a73af | |||
eab80d0aea | |||
88b1383eed | |||
ff74452ef3 | |||
bf8e9b3d71 | |||
de34187db0 | |||
acb23e8ef0 | |||
f992ee3140 | |||
97986a5241 | |||
a7d1346d51 | |||
983ec5debc | |||
cb28ac3aed | |||
a817a7c889 | |||
a5f2444ad2 | |||
cea8067219 | |||
4db074a5aa | |||
3eb00ef60f | |||
ca8bf8f964 | |||
39b3ce9bd3 | |||
4caa313aef | |||
a78a339407 | |||
0919b13c87 | |||
f2b0e2f418 | |||
cb6848aa80 | |||
542691c4e4 | |||
8ad6a8767f | |||
2242b1b4a5 | |||
8df4d8b905 | |||
7fad53b112 | |||
9d667db634 | |||
f47a789b15 | |||
5e3ce30d02 | |||
97c5fb8141 | |||
0e3a8fa6d9 | |||
5eae76c66e | |||
849f79e4ed | |||
ff7cf839d8 | |||
f3cbd243cc | |||
f146c92e88 | |||
fb2620b3a5 | |||
fd00e5cb35 | |||
44fde2d964 | |||
448b957a13 | |||
01607b9860 | |||
23d8c7ff0e | |||
b321da00b4 | |||
dec3da8f9d | |||
80aae18794 | |||
1f2aaf3f98 | |||
2534a028c0 | |||
fc409d9262 | |||
b70d195473 | |||
7eedff2714 | |||
6d9185d121 | |||
f89c22b5ee | |||
f23dc11a86 | |||
09a0325534 | |||
408d5da50f | |||
561808cf90 | |||
25df95be6f | |||
b85d7c1f70 | |||
642720a2fe | |||
1cc7131bb7 | |||
8f60f1093a | |||
d3b458dd9b | |||
a08e2cc434 | |||
b83a0434a4 | |||
b68b74ac32 | |||
b084c1d437 | |||
63ed892502 | |||
1cb6101c6a | |||
be0cc0273f | |||
abf33b3b3b | |||
d9b0490f72 | |||
caa70d2bca | |||
4f05f08f5d | |||
0c76b89e55 | |||
08ab4b93ea | |||
f0028b6972 | |||
b6553357f9 | |||
d86103383a | |||
1265afebbb | |||
306783c661 | |||
8ec8204a30 | |||
8cf3ef895d | |||
e4498adb1f | |||
42c5c59800 | |||
8ef8c9094a | |||
8dc4724340 | |||
13551885c2 | |||
d677e83ed4 | |||
5d9130a3c4 | |||
1ca4913328 | |||
b7614abb9e | |||
862a4a243f | |||
db291234ed | |||
2a5605db24 | |||
b4362cc18b | |||
6a5a6387e2 | |||
0f31adeafb | |||
ae817722d8 | |||
90bedd7e06 | |||
7d27be2a73 | |||
74da2de3b7 | |||
35db70a56c | |||
7dac8e2dde | |||
82c6992d6f | |||
4831c7b9af | |||
113db8d656 | |||
de6679ea95 | |||
0b66ae5c53 | |||
61a20febb9 | |||
29f81577e9 | |||
3acf956f6f | |||
87b13bef8e | |||
0d4cb252c4 | |||
fcabc6f799 | |||
848c43a9ab | |||
5f766cd20b | |||
8c07ba635e | |||
bb07aecfec | |||
27c5ec0149 | |||
4f01db0482 | |||
f2f8a7a90e | |||
e743414908 | |||
f6f0f94e17 | |||
d47a47924a | |||
7a2bf7e7eb | |||
d5a7867087 | |||
fbf78b83c4 | |||
2c63cf3cbd | |||
3b648e71e6 | |||
021d0a46f8 | |||
8839dbfe5b | |||
407d058611 | |||
c6a7f499ce | |||
d821fd29d6 | |||
6b99ab3a57 | |||
004f1d5aed | |||
1caeea8bc2 | |||
6ce4a1a18d | |||
0b48c8eb35 | |||
fef913085e | |||
2059af822d | |||
0fe74e95fe | |||
b7755123c1 | |||
39282be486 | |||
b18e4057bb | |||
12a9b5f35e | |||
89baa94002 | |||
1ef3478709 | |||
73063544bd | |||
90240bf11d | |||
5c5a06198c | |||
394933e53c | |||
b106d3ba60 | |||
947a339714 | |||
edb18349c9 | |||
9dcb965959 | |||
72ae82fe47 | |||
2d9d2f1e99 |
@ -1,42 +0,0 @@
|
||||
version: '{build}'
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^v[0-9.]+\.[0-9.]+/
|
||||
|
||||
cache:
|
||||
- '%USERPROFILE%\.cargo'
|
||||
- '%APPVEYOR_BUILD_FOLDER%\target'
|
||||
|
||||
clone_folder: d:\projects\solana
|
||||
|
||||
build_script:
|
||||
- bash ci/publish-tarball.sh
|
||||
|
||||
notifications:
|
||||
- provider: Slack
|
||||
incoming_webhook:
|
||||
secure: GJsBey+F5apAtUm86MHVJ68Uqa6WN1SImcuIc4TsTZrDhA8K1QWUNw9FFQPybUWDyOcS5dly3kubnUqlGt9ux6Ad2efsfRIQYWv0tOVXKeY=
|
||||
channel: ci-status
|
||||
on_build_success: false
|
||||
on_build_failure: true
|
||||
on_build_status_changed: true
|
||||
|
||||
deploy:
|
||||
- provider: S3
|
||||
access_key_id:
|
||||
secure: fTbJl6JpFebR40J7cOWZ2mXBa3kIvEiXgzxAj6L3N7A=
|
||||
secret_access_key:
|
||||
secure: vItsBXb2rEFLvkWtVn/Rcxu5a5+2EwC+b7GsA0waJy9hXh6XuBAD0lnHd9re3g/4
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
set_public: true
|
||||
|
||||
- provider: GitHub
|
||||
auth_token:
|
||||
secure: 81fEmPZ0cV1wLtNuUrcmtgxKF6ROQF1+/ft5m+fHX21z6PoeCbaNo8cTyLioWBj7
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
17
.buildkite/env/secrets.ejson
vendored
17
.buildkite/env/secrets.ejson
vendored
@ -1,15 +1,12 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:JnxhrIxh09AvqdJgrVSYmb7PxSrh19aE:07WzVExCHEd1lJ1m8QizRRthGri+WBNeZRKjjEvsy5eo4gv3HD7zVEm42tVTGkqITKkBNQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:d0jJqC32/axwzq/N7kMRmpxKhnRrhtpt:zvcPHwkOzGnjhNkAQSejwdy1Jkr9wR1qXFFCnfIjyt/XQYubzB1tLkoly/qdmeb5]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R4gfB6Ey4i50HyfLt4UZDLBqg3qHEUye:UfZCOgt8XI6Y2g+ivCRVoS1fjFycFs7/GSevvCqh1B50mG0+hzpEyzXQLuKG5OeI]",
|
||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]",
|
||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Egc2dMrHDU0NcZ71LwGv/V66shUhwYUE:04VoIb8CKy7KYhQ5W4cEW9SDKZltxWBL5Hob106lMBbUOD/yUvKYcG3Ep8JfTMwO3K8zowW5HpU/IdGoilX0XWLiJJ6t+p05WWK0TA16nOEtwrEG+UK8wm3sN+xCO20i4jDhpNpgg3FYFHT5rKTHW8+zaBTNUX/SFxkN67Lm+92IM28CXYE43SU1WV6H99hGFFVpTK5JVM3JuYU1ex/dHRE+xCzTr4MYUB/F+nGoNFW8HUDV/y0e1jxT9to3x0SmnytEEuk+5RUzFuEt9cKNFeNml3fOCi4qL+sfj/Y5pjH9xDiUxsvH/8NL35jbLP244aFHgWcp]",
|
||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_apple_darwin": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:NeOxSoWCvXB9AL4H6OK26l/7bmsKd/oz:Ijfoxtvk2CHlN1ZXHup3Gg/914kbbAkEGWJfvozA8UIe+aUzUObMyTrKkVOeNAH8Q8YH9tNzk7RRnrTcpnzeCCBLlWcVEeruMxHox3mPRzmSeDLxtbzCl9VePlRO3T7jg90K5hW+ZAkd5J/WJNzpAcmr93ts/of3MbvGHSujId/efCTzJEcP6JInnBb8Vrj7TlgKbzUlnqpq1+NjYPSXN3maKa9pKeo2JWxZlGBMoy6QWUUY5GbYEylw9smwh1LJcHZjlaZNMuOl4gNKtaSr38IXQkAXaRUJDPAmPras00YObKzXU8RkTrP4EoP/jx5LPR7f]",
|
||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_pc_windows_msvc": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:7t+56twjW+jR7fpFNNeRFLPd7E4lbmyN:JuviDpkQrfVcNUGRGsa2e/UhvH6tTYyk1s4cHHE5xZH1NByL7Kpqx36VG/+o1AUGEeSQdsBnKgzYdMoFYbO8o50DoRPc86QIEVXCupD6J9avxLFtQgOWgJp+/mCdUVXlqXiFs/vQgS/L4psrcKdF6WHd77BeUr6ll8DjH+9m5FC9Rcai2pXno6VbPpunHQ0oUdYzhFR64+LiRacBaefQ9igZ+nSEWDLqbaZSyfm9viWkijoVFTq8gAgdXXEh7g0QdxVE5T6bPristJhT6jWBhWunPUCDNFFErWIsbRGctepl4pbCWqh2hNTw9btSgVfeY6uGCOsdy9E=]"
|
||||
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
|
||||
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
|
||||
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
|
||||
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +0,0 @@
|
||||
root: ./book/src
|
||||
|
||||
structure:
|
||||
readme: introduction.md
|
||||
summary: SUMMARY.md
|
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,6 +1,7 @@
|
||||
/book/html/
|
||||
/book/src/tests.ok
|
||||
/book/src/.gitbook/assets/*.svg
|
||||
/docs/html/
|
||||
/docs/src/tests.ok
|
||||
/docs/src/cli/usage.md
|
||||
/docs/src/.gitbook/assets/*.svg
|
||||
/farf/
|
||||
/solana-release/
|
||||
/solana-release.tar.bz2
|
||||
|
47
.mergify.yml
47
.mergify.yml
@ -1,9 +1,40 @@
|
||||
# Validate your changes with:
|
||||
#
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate/
|
||||
#
|
||||
# https://doc.mergify.io/
|
||||
pull_request_rules:
|
||||
- name: automatic merge (squash) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
# Join the dont-squash-my-commits group if you won't like your commits squashed
|
||||
- name: automatic merge (rebase) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author=@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
- name: remove automerge label on CI failure
|
||||
conditions:
|
||||
- label=automerge
|
||||
- "#status-failure!=0"
|
||||
actions:
|
||||
label:
|
||||
remove:
|
||||
- automerge
|
||||
comment:
|
||||
message: automerge label removed due to a CI failure
|
||||
- name: remove outdated reviews
|
||||
conditions:
|
||||
- base=master
|
||||
@ -19,35 +50,27 @@ pull_request_rules:
|
||||
label:
|
||||
add:
|
||||
- automerge
|
||||
- name: v0.23 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.23
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.23
|
||||
- name: v1.0 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v1.0
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.0
|
||||
- name: v1.1 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v1.1
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.1
|
||||
- name: v1.2 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v1.2
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.2
|
||||
|
93
.travis.yml
93
.travis.yml
@ -1,17 +1,3 @@
|
||||
os:
|
||||
- osx
|
||||
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
@ -22,21 +8,64 @@ notifications:
|
||||
on_success: change
|
||||
secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU=
|
||||
|
||||
deploy:
|
||||
- provider: s3
|
||||
access_key_id: $AWS_ACCESS_KEY_ID
|
||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
skip_cleanup: true
|
||||
acl: public_read
|
||||
local_dir: travis-s3-upload
|
||||
on:
|
||||
all_branches: true
|
||||
- provider: releases
|
||||
api_key: $GITHUB_TOKEN
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
file: travis-release-upload/*
|
||||
on:
|
||||
tags: true
|
||||
os: linux
|
||||
dist: bionic
|
||||
language: minimal
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- &release-artifacts
|
||||
if: type = push
|
||||
name: "macOS release artifacts"
|
||||
os: osx
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
deploy:
|
||||
- provider: s3
|
||||
access_key_id: $AWS_ACCESS_KEY_ID
|
||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
skip_cleanup: true
|
||||
acl: public_read
|
||||
local_dir: travis-s3-upload
|
||||
on:
|
||||
all_branches: true
|
||||
- provider: releases
|
||||
token: $GITHUB_TOKEN
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
file: travis-release-upload/*
|
||||
on:
|
||||
tags: true
|
||||
- <<: *release-artifacts
|
||||
name: "Windows release artifacts"
|
||||
os: windows
|
||||
|
||||
# docs pull request or commit
|
||||
- name: "docs"
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "node"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ~/.npm
|
||||
|
||||
before_install:
|
||||
- .travis/affects.sh docs/ .travis || travis_terminate 0
|
||||
- cd docs/
|
||||
- source .travis/before_install.sh
|
||||
|
||||
script:
|
||||
- source .travis/script.sh
|
||||
|
25
.travis/affects.sh
Executable file
25
.travis/affects.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Check if files in the commit range match one or more prefixes
|
||||
#
|
||||
|
||||
# Always run the job if we are on a tagged release
|
||||
if [[ -n "$TRAVIS_TAG" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
git diff --name-only "$TRAVIS_COMMIT_RANGE"
|
||||
)
|
||||
|
||||
for file in $(git diff --name-only "$TRAVIS_COMMIT_RANGE"); do
|
||||
for prefix in "$@"; do
|
||||
if [[ $file =~ ^"$prefix" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "No modifications to $*"
|
||||
exit 1
|
@ -224,21 +224,20 @@ Inventing new terms is allowed, but should only be done when the term is widely
|
||||
used and understood. Avoid introducing new 3-letter terms, which can be
|
||||
confused with 3-letter acronyms.
|
||||
|
||||
[Terms currently in use](book/src/terminology.md)
|
||||
[Terms currently in use](docs/src/terminology.md)
|
||||
|
||||
|
||||
## Design Proposals
|
||||
|
||||
Solana's architecture is described by a book generated from markdown files in
|
||||
the `book/src/` directory, maintained by an *editor* (currently @garious). To
|
||||
add a design proposal, you'll need to at least propose a change the content
|
||||
under the [Accepted Design
|
||||
Proposals](https://docs.solana.com/book/v/master/proposals) chapter. Here's
|
||||
the full process:
|
||||
Solana's architecture is described by docs generated from markdown files in
|
||||
the `docs/src/` directory, maintained by an *editor* (currently @garious). To
|
||||
add a design proposal, you'll need to include it in the
|
||||
[Accepted Design Proposals](https://docs.solana.com/proposals)
|
||||
section of the Solana docs. Here's the full process:
|
||||
|
||||
1. Propose a design by creating a PR that adds a markdown document to the
|
||||
directory `book/src/` and references it from the [table of
|
||||
contents](book/src/SUMMARY.md). Add any relevant *maintainers* to the PR
|
||||
`docs/src/proposals` directory and references it from the [table of
|
||||
contents](docs/src/SUMMARY.md). Add any relevant *maintainers* to the PR
|
||||
review.
|
||||
2. The PR being merged indicates your proposed change was accepted and that the
|
||||
maintainers support your plan of attack.
|
||||
|
7069
Cargo.lock
generated
7069
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
21
Cargo.toml
21
Cargo.toml
@ -3,13 +3,13 @@ members = [
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"accounts-bench",
|
||||
"banking-bench",
|
||||
"chacha",
|
||||
"chacha-cuda",
|
||||
"chacha-sys",
|
||||
"cli-config",
|
||||
"client",
|
||||
"core",
|
||||
"dos",
|
||||
"download-utils",
|
||||
"faucet",
|
||||
"perf",
|
||||
"validator",
|
||||
@ -24,9 +24,12 @@ members = [
|
||||
"logger",
|
||||
"log-analyzer",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"notifier",
|
||||
"programs/bpf_loader",
|
||||
"programs/budget",
|
||||
"programs/btc_spv",
|
||||
@ -37,20 +40,22 @@ members = [
|
||||
"programs/noop",
|
||||
"programs/ownable",
|
||||
"programs/stake",
|
||||
"programs/storage",
|
||||
"programs/vest",
|
||||
"programs/vote",
|
||||
"archiver",
|
||||
"archiver-lib",
|
||||
"archiver-utils",
|
||||
"remote-wallet",
|
||||
"ramp-tps",
|
||||
"runtime",
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
"account-decoder",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"version",
|
||||
"vote-signer",
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
|
179
README.md
179
README.md
@ -1,76 +1,17 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
[](https://crates.io/crates/solana-core)
|
||||
[](https://docs.rs/solana-core)
|
||||
[](https://buildkite.com/solana-labs/solana/builds?branch=master)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
Blockchain Rebuilt for Scale
|
||||
===
|
||||
# Building
|
||||
|
||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||
up to 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
Introduction
|
||||
===
|
||||
|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
|
||||
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
Architecture
|
||||
===
|
||||
|
||||
Before you jump into the code, review the online book [Solana: Blockchain Rebuilt for Scale](https://docs.solana.com/book/).
|
||||
|
||||
(The _latest_ development version of the online book is also [available here](https://docs.solana.com/book/v/master/).)
|
||||
|
||||
Release Binaries
|
||||
===
|
||||
Official release binaries are available at [Github Releases](https://github.com/solana-labs/solana/releases).
|
||||
|
||||
Additionally we provide pre-release binaries for the latest code on the edge and
|
||||
beta channels. Note that these pre-release binaries may be less stable than an
|
||||
official release.
|
||||
|
||||
### Edge channel
|
||||
#### Linux (x86_64-unknown-linux-gnu)
|
||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
||||
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
||||
#### mac OS (x86_64-apple-darwin)
|
||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-apple-darwin.tar.bz2)
|
||||
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
||||
#### Windows (x86_64-pc-windows-msvc)
|
||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
||||
* [solana-install-init.exe](http://release.solana.com/edge/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
||||
#### All platforms
|
||||
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/edge/solana-metrics.tar.bz2)
|
||||
|
||||
### Beta channel
|
||||
#### Linux (x86_64-unknown-linux-gnu)
|
||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
||||
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
||||
#### mac OS (x86_64-apple-darwin)
|
||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-apple-darwin.tar.bz2)
|
||||
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
||||
#### Windows (x86_64-pc-windows-msvc)
|
||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
||||
* [solana-install-init.exe](http://release.solana.com/beta/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
||||
#### All platforms
|
||||
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/beta/solana-metrics.tar.bz2)
|
||||
|
||||
Developing
|
||||
===
|
||||
|
||||
Building
|
||||
---
|
||||
|
||||
Install rustc, cargo and rustfmt:
|
||||
## **1. Install rustc, cargo and rustfmt.**
|
||||
|
||||
```bash
|
||||
$ curl https://sh.rustup.rs -sSf | sh
|
||||
@ -91,112 +32,39 @@ $ sudo apt-get update
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang
|
||||
```
|
||||
|
||||
Download the source code:
|
||||
## **2. Download the source code.**
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
Build
|
||||
## **3. Build.**
|
||||
|
||||
```bash
|
||||
$ cargo build
|
||||
```
|
||||
|
||||
Then to run a minimal local cluster
|
||||
## **4. Run a minimal local cluster.**
|
||||
```bash
|
||||
$ ./run.sh
|
||||
```
|
||||
|
||||
Testing
|
||||
---
|
||||
# Testing
|
||||
|
||||
Run the test suite:
|
||||
**Run the test suite:**
|
||||
|
||||
```bash
|
||||
$ cargo test
|
||||
```
|
||||
|
||||
Local Testnet
|
||||
---
|
||||
|
||||
Start your own testnet locally, instructions are in the book [Solana: Blockchain Rebuild for Scale: Getting Started](https://docs.solana.com/book/building-from-source).
|
||||
|
||||
Remote Testnets
|
||||
---
|
||||
### Starting a local testnet
|
||||
Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/bench-tps).
|
||||
|
||||
### Accessing the remote testnet
|
||||
* `testnet` - public stable testnet accessible via devnet.solana.com. Runs 24/7
|
||||
|
||||
|
||||
## Deploy process
|
||||
|
||||
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
|
||||
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
|
||||
Each testnet can be manually manipulated from buildkite as well.
|
||||
|
||||
## How do I reset the testnet?
|
||||
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
|
||||
and when prompted select the desired testnet
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
Increase the TX rate by increasing the number of cores on the client machine which is running
|
||||
`bench-tps` or run multiple clients. Decrease by lowering cores or using the rayon env
|
||||
variable `RAYON_NUM_THREADS=<xx>`
|
||||
|
||||
## How can I test a change on the testnet?
|
||||
|
||||
Currently, a merged PR is the only way to test a change on the testnet. But you
|
||||
can run your own testnet using the scripts in the `net/` directory.
|
||||
|
||||
## Adjusting the number of clients or validators on the testnet
|
||||
Edit `ci/testnet-manager.sh`
|
||||
|
||||
|
||||
## Metrics Server Maintenance
|
||||
Sometimes the dashboard becomes unresponsive. This happens due to glitch in the metrics server.
|
||||
The current solution is to reset the metrics server. Use the following steps.
|
||||
|
||||
1. The server is hosted in a GCP VM instance. Check if the VM instance is down by trying to SSH
|
||||
into it from the GCP console. The name of the VM is ```metrics-solana-com```.
|
||||
2. If the VM is inaccessible, reset it from the GCP console.
|
||||
3. Once VM is up (or, was already up), the metrics services can be restarted from build automation.
|
||||
1. Navigate to https://buildkite.com/solana-labs/metrics-dot-solana-dot-com in your web browser
|
||||
2. Click on ```New Build```
|
||||
3. This will show a pop up dialog. Click on ```options``` drop down.
|
||||
4. Type in ```FORCE_START=true``` in ```Environment Variables``` text box.
|
||||
5. Click ```Create Build```
|
||||
6. This will restart the metrics services, and the dashboards should be accessible afterwards.
|
||||
|
||||
## Debugging Testnet
|
||||
Testnet may exhibit different symptoms of failures. Primary statistics to check are
|
||||
1. Rise in Confirmation Time
|
||||
2. Nodes are not voting
|
||||
3. Panics, and OOM notifications
|
||||
|
||||
Check the following if there are any signs of failure.
|
||||
1. Did testnet deployment fail?
|
||||
1. View buildkite logs for the last deployment: https://buildkite.com/solana-labs/testnet-management
|
||||
2. Use the relevant branch
|
||||
3. If the deployment failed, look at the build logs. The build artifacts for each remote node is uploaded.
|
||||
It's a good first step to triage from these logs.
|
||||
2. You may have to log into remote node if the deployment succeeded, but something failed during runtime.
|
||||
1. Get the private key for the testnet deployment from ```metrics-solana-com``` GCP instance.
|
||||
2. SSH into ```metrics-solana-com``` using GCP console and do the following.
|
||||
```bash
|
||||
sudo bash
|
||||
cd ~buildkite-agent/.ssh
|
||||
ls
|
||||
```
|
||||
3. Copy the relevant private key to your local machine
|
||||
4. Find the public IP address of the AWS instance for the remote node using AWS console
|
||||
5. ```ssh -i <private key file> ubuntu@<ip address of remote node>```
|
||||
6. The logs are in ```~solana\solana``` folder
|
||||
|
||||
|
||||
Benchmarking
|
||||
---
|
||||
# Benchmarking
|
||||
|
||||
First install the nightly build of rustc. `cargo bench` requires use of the
|
||||
unstable features only available in the nightly build.
|
||||
@ -211,13 +79,11 @@ Run the benchmarks:
|
||||
$ cargo +nightly bench
|
||||
```
|
||||
|
||||
Release Process
|
||||
---
|
||||
# Release Process
|
||||
|
||||
The release process for this project is described [here](RELEASE.md).
|
||||
|
||||
|
||||
Code coverage
|
||||
---
|
||||
# Code coverage
|
||||
|
||||
To generate code coverage statistics:
|
||||
|
||||
@ -226,7 +92,6 @@ $ scripts/coverage.sh
|
||||
$ open target/cov/lcov-local/index.html
|
||||
```
|
||||
|
||||
|
||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
|
||||
@ -238,3 +103,7 @@ problem is solved by this code?" On the other hand, if a test does fail and you
|
||||
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||
send us that patch!
|
||||
|
||||
# Disclaimer
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
@ -116,7 +116,8 @@ There are three release channels that map to branches as follows:
|
||||
|
||||
1. After the new release has been tagged, update the Cargo.toml files on **release branch** to the next semantic version (e.g. 0.9.0 -> 0.9.1) with:
|
||||
```
|
||||
scripts/increment-cargo-version.sh patch
|
||||
$ scripts/increment-cargo-version.sh patch
|
||||
$ ./scripts/cargo-for-all-lock-files.sh tree
|
||||
```
|
||||
1. Rebuild to get an updated version of `Cargo.lock`:
|
||||
```
|
||||
@ -138,7 +139,7 @@ There are three release channels that map to branches as follows:
|
||||
### Update documentation
|
||||
TODO: Documentation update procedure is WIP as we move to gitbook
|
||||
|
||||
Document the new recommended version by updating `book/src/running-archiver.md` and `book/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version.
|
||||
Document the new recommended version by updating `docs/src/running-archiver.md` and `docs/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version.
|
||||
|
||||
### Update software on devnet.solana.com
|
||||
|
||||
|
25
account-decoder/Cargo.toml
Normal file
25
account-decoder/Cargo.toml
Normal file
@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.2.13"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
bs58 = "0.3.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
solana-sdk = { path = "../sdk", version = "1.2.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.13" }
|
||||
spl-memo = "1.0.1"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.54"
|
||||
thiserror = "1.0"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
80
account-decoder/src/lib.rs
Normal file
80
account-decoder/src/lib.rs
Normal file
@ -0,0 +1,80 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod parse_account_data;
|
||||
pub mod parse_nonce;
|
||||
pub mod parse_vote;
|
||||
|
||||
use crate::parse_account_data::parse_account_data;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{account::Account, clock::Epoch, pubkey::Pubkey};
|
||||
use std::str::FromStr;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAccount {
|
||||
pub lamports: u64,
|
||||
pub data: UiAccountData,
|
||||
pub owner: String,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum UiAccountData {
|
||||
Binary(String),
|
||||
Json(Value),
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for UiAccountData {
|
||||
fn from(data: Vec<u8>) -> Self {
|
||||
Self::Binary(bs58::encode(data).into_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary,
|
||||
JsonParsed,
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
pub fn encode(account: Account, encoding: UiAccountEncoding) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => account.data.into(),
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) = parse_account_data(&account.owner, &account.data) {
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
account.data.into()
|
||||
}
|
||||
}
|
||||
};
|
||||
UiAccount {
|
||||
lamports: account.lamports,
|
||||
data,
|
||||
owner: account.owner.to_string(),
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> Option<Account> {
|
||||
let data = match &self.data {
|
||||
UiAccountData::Json(_) => None,
|
||||
UiAccountData::Binary(blob) => bs58::decode(blob).into_vec().ok(),
|
||||
}?;
|
||||
Some(Account {
|
||||
lamports: self.lamports,
|
||||
data,
|
||||
owner: Pubkey::from_str(&self.owner).ok()?,
|
||||
executable: self.executable,
|
||||
rent_epoch: self.rent_epoch,
|
||||
})
|
||||
}
|
||||
}
|
80
account-decoder/src/parse_account_data.rs
Normal file
80
account-decoder/src/parse_account_data.rs
Normal file
@ -0,0 +1,80 @@
|
||||
use crate::{parse_nonce::parse_nonce, parse_vote::parse_vote};
|
||||
use inflector::Inflector;
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program};
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey =
|
||||
Pubkey::from_str(&system_program::id().to_string()).unwrap();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey =
|
||||
Pubkey::from_str(&solana_vote_program::id().to_string()).unwrap();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
|
||||
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ParseAccountError {
|
||||
#[error("Program not parsable")]
|
||||
ProgramNotParsable,
|
||||
|
||||
#[error("Instruction error")]
|
||||
InstructionError(#[from] InstructionError),
|
||||
|
||||
#[error("Serde json error")]
|
||||
SerdeJsonError(#[from] serde_json::error::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum ParsableAccount {
|
||||
Nonce,
|
||||
Vote,
|
||||
}
|
||||
|
||||
pub fn parse_account_data(program_id: &Pubkey, data: &[u8]) -> Result<Value, ParseAccountError> {
|
||||
let program_name = PARSABLE_PROGRAM_IDS
|
||||
.get(program_id)
|
||||
.ok_or_else(|| ParseAccountError::ProgramNotParsable)?;
|
||||
let parsed_json = match program_name {
|
||||
ParsableAccount::Nonce => serde_json::to_value(parse_nonce(data)?)?,
|
||||
ParsableAccount::Vote => serde_json::to_value(parse_vote(data)?)?,
|
||||
};
|
||||
Ok(json!({
|
||||
format!("{:?}", program_name).to_kebab_case(): parsed_json
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
let other_program = Pubkey::new_rand();
|
||||
let data = vec![0; 4];
|
||||
assert!(parse_account_data(&other_program, &data).is_err());
|
||||
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let parsed = parse_account_data(&solana_vote_program::id(), &vote_account_data).unwrap();
|
||||
assert!(parsed.as_object().unwrap().contains_key("vote"));
|
||||
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
let parsed = parse_account_data(&system_program::id(), &nonce_account_data).unwrap();
|
||||
assert!(parsed.as_object().unwrap().contains_key("nonce"));
|
||||
}
|
||||
}
|
66
account-decoder/src/parse_nonce.rs
Normal file
66
account-decoder/src/parse_nonce.rs
Normal file
@ -0,0 +1,66 @@
|
||||
use crate::parse_account_data::ParseAccountError;
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator,
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
};
|
||||
|
||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
let nonce_state: Versions = bincode::deserialize(data)
|
||||
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
|
||||
let nonce_state = nonce_state.convert_to_current();
|
||||
match nonce_state {
|
||||
State::Uninitialized => Ok(UiNonceState::Uninitialized),
|
||||
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
||||
authority: data.authority.to_string(),
|
||||
blockhash: data.blockhash.to_string(),
|
||||
fee_calculator: data.fee_calculator,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// A duplicate representation of NonceState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiNonceState {
|
||||
Uninitialized,
|
||||
Initialized(UiNonceData),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiNonceData {
|
||||
pub authority: String,
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_nonce() {
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
assert_eq!(
|
||||
parse_nonce(&nonce_account_data).unwrap(),
|
||||
UiNonceState::Initialized(UiNonceData {
|
||||
authority: Pubkey::default().to_string(),
|
||||
blockhash: Hash::default().to_string(),
|
||||
fee_calculator: FeeCalculator::default(),
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_nonce(&bad_data).is_err());
|
||||
}
|
||||
}
|
134
account-decoder/src/parse_vote.rs
Normal file
134
account-decoder/src/parse_vote.rs
Normal file
@ -0,0 +1,134 @@
|
||||
use crate::parse_account_data::ParseAccountError;
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<UiVoteState, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
let epoch_credits = vote_state
|
||||
.epoch_credits()
|
||||
.iter()
|
||||
.map(|(epoch, credits, previous_credits)| UiEpochCredits {
|
||||
epoch: *epoch,
|
||||
credits: *credits,
|
||||
previous_credits: *previous_credits,
|
||||
})
|
||||
.collect();
|
||||
let votes = vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|lockout| UiLockout {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
})
|
||||
.collect();
|
||||
let authorized_voters = vote_state
|
||||
.authorized_voters()
|
||||
.iter()
|
||||
.map(|(epoch, authorized_voter)| UiAuthorizedVoters {
|
||||
epoch: *epoch,
|
||||
authorized_voter: authorized_voter.to_string(),
|
||||
})
|
||||
.collect();
|
||||
let prior_voters = vote_state
|
||||
.prior_voters()
|
||||
.buf()
|
||||
.iter()
|
||||
.filter(|(pubkey, _, _)| pubkey != &Pubkey::default())
|
||||
.map(
|
||||
|(authorized_pubkey, epoch_of_last_authorized_switch, target_epoch)| UiPriorVoters {
|
||||
authorized_pubkey: authorized_pubkey.to_string(),
|
||||
epoch_of_last_authorized_switch: *epoch_of_last_authorized_switch,
|
||||
target_epoch: *target_epoch,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
Ok(UiVoteState {
|
||||
node_pubkey: vote_state.node_pubkey.to_string(),
|
||||
authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
|
||||
commission: vote_state.commission,
|
||||
votes,
|
||||
root_slot: vote_state.root_slot,
|
||||
authorized_voters,
|
||||
prior_voters,
|
||||
epoch_credits,
|
||||
last_timestamp: vote_state.last_timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
/// A duplicate representation of VoteState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, Default, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiVoteState {
|
||||
node_pubkey: String,
|
||||
authorized_withdrawer: String,
|
||||
commission: u8,
|
||||
votes: Vec<UiLockout>,
|
||||
root_slot: Option<Slot>,
|
||||
authorized_voters: Vec<UiAuthorizedVoters>,
|
||||
prior_voters: Vec<UiPriorVoters>,
|
||||
epoch_credits: Vec<UiEpochCredits>,
|
||||
last_timestamp: BlockTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiLockout {
|
||||
slot: Slot,
|
||||
confirmation_count: u32,
|
||||
}
|
||||
|
||||
impl From<&Lockout> for UiLockout {
|
||||
fn from(lockout: &Lockout) -> Self {
|
||||
Self {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiAuthorizedVoters {
|
||||
epoch: Epoch,
|
||||
authorized_voter: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiPriorVoters {
|
||||
authorized_pubkey: String,
|
||||
epoch_of_last_authorized_switch: Epoch,
|
||||
target_epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiEpochCredits {
|
||||
epoch: Epoch,
|
||||
credits: u64,
|
||||
previous_credits: u64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_vote_program::vote_state::VoteStateVersions;
|
||||
|
||||
#[test]
|
||||
fn test_parse_vote() {
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let mut expected_vote_state = UiVoteState::default();
|
||||
expected_vote_state.node_pubkey = Pubkey::default().to_string();
|
||||
expected_vote_state.authorized_withdrawer = Pubkey::default().to_string();
|
||||
assert_eq!(parse_vote(&vote_account_data).unwrap(), expected_vote_state,);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_vote(&bad_data).is_err());
|
||||
}
|
||||
}
|
22
accounts-bench/Cargo.toml
Normal file
22
accounts-bench/Cargo.toml
Normal file
@ -0,0 +1,22 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.2.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.13" }
|
||||
solana-measure = { path = "../measure", version = "1.2.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.13" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
105
accounts-bench/src/main.rs
Normal file
105
accounts-bench/src/main.rs
Normal file
@ -0,0 +1,105 @@
|
||||
use clap::{value_t, App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts, Accounts},
|
||||
accounts_index::Ancestors,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
|
||||
let matches = App::new("crate")
|
||||
.about("about")
|
||||
.version("version")
|
||||
.arg(
|
||||
Arg::with_name("num_slots")
|
||||
.long("num_slots")
|
||||
.takes_value(true)
|
||||
.value_name("SLOTS")
|
||||
.help("Number of slots to store to."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_accounts")
|
||||
.long("num_accounts")
|
||||
.takes_value(true)
|
||||
.value_name("NUM_ACCOUNTS")
|
||||
.help("Total number of accounts"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("iterations")
|
||||
.long("iterations")
|
||||
.takes_value(true)
|
||||
.value_name("ITERATIONS")
|
||||
.help("Number of bench iterations"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("clean")
|
||||
.long("clean")
|
||||
.takes_value(false)
|
||||
.help("Run clean"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let num_slots = value_t!(matches, "num_slots", usize).unwrap_or(4);
|
||||
let num_accounts = value_t!(matches, "num_accounts", usize).unwrap_or(10_000);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(20);
|
||||
let clean = matches.is_present("clean");
|
||||
println!("clean: {:?}", clean);
|
||||
|
||||
let path = PathBuf::from("farf/accounts-bench");
|
||||
if fs::remove_dir_all(path.clone()).is_err() {
|
||||
println!("Warning: Couldn't remove {:?}", path);
|
||||
}
|
||||
let accounts = Accounts::new(vec![path]);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
let pubkeys: Vec<_> = (0..num_slots)
|
||||
.into_par_iter()
|
||||
.map(|slot| {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_test_accounts(
|
||||
&accounts,
|
||||
&mut pubkeys,
|
||||
num_accounts / num_slots,
|
||||
slot as u64,
|
||||
);
|
||||
pubkeys
|
||||
})
|
||||
.collect();
|
||||
let pubkeys: Vec<_> = pubkeys.into_iter().flatten().collect();
|
||||
create_time.stop();
|
||||
println!(
|
||||
"created {} accounts in {} slots {}",
|
||||
(num_accounts / num_slots) * num_slots,
|
||||
num_slots,
|
||||
create_time
|
||||
);
|
||||
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
||||
for i in 1..num_slots {
|
||||
ancestors.insert(i as u64, i - 1);
|
||||
accounts.add_root(i as u64);
|
||||
}
|
||||
for x in 0..iterations {
|
||||
if clean {
|
||||
let mut time = Measure::start("clean");
|
||||
accounts.accounts_db.clean_accounts();
|
||||
time.stop();
|
||||
println!("{}", time);
|
||||
for slot in 0..num_slots {
|
||||
update_accounts(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
|
||||
accounts.add_root((x * num_slots + slot) as u64);
|
||||
}
|
||||
} else {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
let mut time = Measure::start("hash");
|
||||
let hash = accounts.accounts_db.update_accounts_hash(0, &ancestors);
|
||||
time.stop();
|
||||
println!("hash: {} {}", hash, time);
|
||||
create_test_accounts(&accounts, &mut pubkeys, 1, 0);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.0.0"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
crossbeam-channel = "0.3"
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "1.0.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.0" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.104"
|
||||
serde_json = "1.0.46"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.0" }
|
||||
solana-logger = { path = "../logger", version = "1.0.0" }
|
||||
solana-perf = { path = "../perf", version = "1.0.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.0" }
|
||||
solana-core = { path = "../core", version = "1.0.0" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_lib"
|
@ -1,944 +0,0 @@
|
||||
use crate::result::ArchiverError;
|
||||
use crossbeam_channel::unbounded;
|
||||
use rand::{thread_rng, Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_archiver_utils::sample_file;
|
||||
use solana_chacha::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
|
||||
use solana_client::{
|
||||
rpc_client::RpcClient, rpc_request::RpcRequest, rpc_response::RpcStorageTurn,
|
||||
thin_client::ThinClient,
|
||||
};
|
||||
use solana_core::{
|
||||
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::GossipService,
|
||||
packet::{limited_deserialize, PACKET_DATA_SIZE},
|
||||
repair_service,
|
||||
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
|
||||
serve_repair::ServeRepair,
|
||||
shred_fetch_stage::ShredFetchStage,
|
||||
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
|
||||
storage_stage::NUM_STORAGE_SAMPLES,
|
||||
streamer::{receiver, responder, PacketReceiver},
|
||||
window_service::WindowService,
|
||||
};
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
|
||||
};
|
||||
use solana_net_utils::bind_in_range;
|
||||
use solana_perf::packet::Packets;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_sdk::packet::Packet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
client::{AsyncClient, SyncClient},
|
||||
clock::{get_complete_segment_from_slot, get_segment_from_slot, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
signature::{Keypair, Signature, Signer},
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
transport::TransportError,
|
||||
};
|
||||
use solana_storage_program::{
|
||||
storage_contract::StorageContract,
|
||||
storage_instruction::{self, StorageAccountType},
|
||||
};
|
||||
use std::{
|
||||
io::{self, ErrorKind},
|
||||
net::{SocketAddr, UdpSocket},
|
||||
path::{Path, PathBuf},
|
||||
result,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, Sender},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{sleep, spawn, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
type Result<T> = std::result::Result<T, ArchiverError>;
|
||||
|
||||
static ENCRYPTED_FILENAME: &str = "ledger.enc";
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum ArchiverRequest {
|
||||
GetSlotHeight(SocketAddr),
|
||||
}
|
||||
|
||||
pub struct Archiver {
|
||||
thread_handles: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
// Shared Archiver Meta struct used internally
|
||||
#[derive(Default)]
|
||||
struct ArchiverMeta {
|
||||
slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
ledger_path: PathBuf,
|
||||
signature: Signature,
|
||||
ledger_data_file_encrypted: PathBuf,
|
||||
sampling_offsets: Vec<u64>,
|
||||
blockhash: Hash,
|
||||
sha_state: Hash,
|
||||
num_chacha_blocks: usize,
|
||||
client_commitment: CommitmentConfig,
|
||||
}
|
||||
|
||||
fn get_slot_from_signature(
|
||||
signature: &Signature,
|
||||
storage_turn: u64,
|
||||
slots_per_segment: u64,
|
||||
) -> u64 {
|
||||
let signature_vec = signature.as_ref();
|
||||
let mut segment_index = u64::from(signature_vec[0])
|
||||
| (u64::from(signature_vec[1]) << 8)
|
||||
| (u64::from(signature_vec[1]) << 16)
|
||||
| (u64::from(signature_vec[2]) << 24);
|
||||
let max_segment_index =
|
||||
get_complete_segment_from_slot(storage_turn, slots_per_segment).unwrap();
|
||||
segment_index %= max_segment_index as u64;
|
||||
segment_index * slots_per_segment
|
||||
}
|
||||
|
||||
fn create_request_processor(
|
||||
socket: UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
slot_receiver: Receiver<u64>,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
let mut thread_handles = vec![];
|
||||
let (s_reader, r_reader) = channel();
|
||||
let (s_responder, r_responder) = channel();
|
||||
let storage_socket = Arc::new(socket);
|
||||
let recycler = Recycler::default();
|
||||
let t_receiver = receiver(storage_socket.clone(), exit, s_reader, recycler, "archiver");
|
||||
thread_handles.push(t_receiver);
|
||||
|
||||
let t_responder = responder("archiver-responder", storage_socket, r_responder);
|
||||
thread_handles.push(t_responder);
|
||||
|
||||
let exit = exit.clone();
|
||||
let t_processor = spawn(move || {
|
||||
let slot = poll_for_slot(slot_receiver, &exit);
|
||||
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
let packets = r_reader.recv_timeout(Duration::from_secs(1));
|
||||
|
||||
if let Ok(packets) = packets {
|
||||
for packet in &packets.packets {
|
||||
let req: result::Result<ArchiverRequest, Box<bincode::ErrorKind>> =
|
||||
limited_deserialize(&packet.data[..packet.meta.size]);
|
||||
match req {
|
||||
Ok(ArchiverRequest::GetSlotHeight(from)) => {
|
||||
let packet = Packet::from_data(&from, slot);
|
||||
let _ = s_responder.send(Packets::new(vec![packet]));
|
||||
}
|
||||
Err(e) => {
|
||||
info!("invalid request: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
thread_handles.push(t_processor);
|
||||
thread_handles
|
||||
}
|
||||
|
||||
fn poll_for_slot(receiver: Receiver<u64>, exit: &Arc<AtomicBool>) -> u64 {
|
||||
loop {
|
||||
let slot = receiver.recv_timeout(Duration::from_secs(1));
|
||||
if let Ok(slot) = slot {
|
||||
return slot;
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Archiver {
|
||||
/// Returns a Result that contains an archiver on success
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ledger_path` - path to where the ledger will be stored.
|
||||
/// Causes panic if none
|
||||
/// * `node` - The archiver node
|
||||
/// * `cluster_entrypoint` - ContactInfo representing an entry into the network
|
||||
/// * `keypair` - Keypair for this archiver
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
ledger_path: &Path,
|
||||
node: Node,
|
||||
cluster_entrypoint: ContactInfo,
|
||||
keypair: Arc<Keypair>,
|
||||
storage_keypair: Arc<Keypair>,
|
||||
client_commitment: CommitmentConfig,
|
||||
) -> Result<Self> {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
info!("Archiver: id: {}", keypair.pubkey());
|
||||
info!("Creating cluster info....");
|
||||
let mut cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
|
||||
cluster_info.set_entrypoint(cluster_entrypoint.clone());
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
|
||||
// Note for now, this ledger will not contain any of the existing entries
|
||||
// in the ledger located at ledger_path, and will only append on newly received
|
||||
// entries after being passed to window_service
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
|
||||
let gossip_service = GossipService::new(&cluster_info, None, node.sockets.gossip, &exit);
|
||||
|
||||
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
|
||||
let (nodes, _) =
|
||||
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) {
|
||||
Ok(nodes_and_archivers) => nodes_and_archivers,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
gossip_service.join()?;
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
let client = solana_core::gossip_service::get_client(&nodes);
|
||||
|
||||
info!("Setting up mining account...");
|
||||
if let Err(e) = Self::setup_mining_account(
|
||||
&client,
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
client_commitment.clone(),
|
||||
) {
|
||||
//shutdown services before exiting
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
gossip_service.join()?;
|
||||
return Err(e);
|
||||
};
|
||||
|
||||
let repair_socket = Arc::new(node.sockets.repair);
|
||||
let shred_sockets: Vec<Arc<UdpSocket>> =
|
||||
node.sockets.tvu.into_iter().map(Arc::new).collect();
|
||||
let shred_forward_sockets: Vec<Arc<UdpSocket>> = node
|
||||
.sockets
|
||||
.tvu_forwards
|
||||
.into_iter()
|
||||
.map(Arc::new)
|
||||
.collect();
|
||||
let (shred_fetch_sender, shred_fetch_receiver) = channel();
|
||||
let fetch_stage = ShredFetchStage::new(
|
||||
shred_sockets,
|
||||
shred_forward_sockets,
|
||||
repair_socket.clone(),
|
||||
&shred_fetch_sender,
|
||||
&exit,
|
||||
);
|
||||
let (slot_sender, slot_receiver) = channel();
|
||||
let request_processor =
|
||||
create_request_processor(node.sockets.storage.unwrap(), &exit, slot_receiver);
|
||||
|
||||
let t_archiver = {
|
||||
let exit = exit.clone();
|
||||
let node_info = node.info.clone();
|
||||
let mut meta = ArchiverMeta {
|
||||
ledger_path: ledger_path.to_path_buf(),
|
||||
client_commitment,
|
||||
..ArchiverMeta::default()
|
||||
};
|
||||
spawn(move || {
|
||||
// setup archiver
|
||||
let window_service = match Self::setup(
|
||||
&mut meta,
|
||||
cluster_info.clone(),
|
||||
&blockstore,
|
||||
&exit,
|
||||
&node_info,
|
||||
&storage_keypair,
|
||||
repair_socket,
|
||||
shred_fetch_receiver,
|
||||
slot_sender,
|
||||
) {
|
||||
Ok(window_service) => window_service,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
error!("setup failed {:?}; archiver thread exiting...", e);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
request_processor
|
||||
.into_iter()
|
||||
.for_each(|t| t.join().unwrap());
|
||||
fetch_stage.join().unwrap();
|
||||
gossip_service.join().unwrap();
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
info!("setup complete");
|
||||
// run archiver
|
||||
Self::run(
|
||||
&mut meta,
|
||||
&blockstore,
|
||||
cluster_info,
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
&exit,
|
||||
);
|
||||
// wait until exit
|
||||
request_processor
|
||||
.into_iter()
|
||||
.for_each(|t| t.join().unwrap());
|
||||
fetch_stage.join().unwrap();
|
||||
gossip_service.join().unwrap();
|
||||
window_service.join().unwrap()
|
||||
})
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
thread_handles: vec![t_archiver],
|
||||
exit,
|
||||
})
|
||||
}
|
||||
|
||||
fn run(
|
||||
meta: &mut ArchiverMeta,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) {
|
||||
// encrypt segment
|
||||
Self::encrypt_ledger(meta, blockstore).expect("ledger encrypt not successful");
|
||||
let enc_file_path = meta.ledger_data_file_encrypted.clone();
|
||||
// do replicate
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
// TODO check if more segments are available - based on space constraints
|
||||
Self::create_sampling_offsets(meta);
|
||||
let sampling_offsets = &meta.sampling_offsets;
|
||||
meta.sha_state =
|
||||
match Self::sample_file_to_create_mining_hash(&enc_file_path, sampling_offsets) {
|
||||
Ok(hash) => hash,
|
||||
Err(err) => {
|
||||
warn!("Error sampling file, exiting: {:?}", err);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
Self::submit_mining_proof(meta, &cluster_info, archiver_keypair, storage_keypair);
|
||||
|
||||
// TODO make this a lot more frequent by picking a "new" blockhash instead of picking a storage blockhash
|
||||
// prep the next proof
|
||||
let (storage_blockhash, _) = match Self::poll_for_blockhash_and_slot(
|
||||
&cluster_info,
|
||||
meta.slots_per_segment,
|
||||
&meta.blockhash,
|
||||
exit,
|
||||
) {
|
||||
Ok(blockhash_and_slot) => blockhash_and_slot,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Error couldn't get a newer blockhash than {:?}. {:?}",
|
||||
meta.blockhash, e
|
||||
);
|
||||
break;
|
||||
}
|
||||
};
|
||||
meta.blockhash = storage_blockhash;
|
||||
Self::redeem_rewards(
|
||||
&cluster_info,
|
||||
archiver_keypair,
|
||||
storage_keypair,
|
||||
meta.client_commitment.clone(),
|
||||
);
|
||||
}
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
fn redeem_rewards(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
client_commitment: CommitmentConfig,
|
||||
) {
|
||||
let nodes = cluster_info.read().unwrap().tvu_peers();
|
||||
let client = solana_core::gossip_service::get_client(&nodes);
|
||||
|
||||
if let Ok(Some(account)) =
|
||||
client.get_account_with_commitment(&storage_keypair.pubkey(), client_commitment.clone())
|
||||
{
|
||||
if let Ok(StorageContract::ArchiverStorage { validations, .. }) = account.state() {
|
||||
if !validations.is_empty() {
|
||||
let ix = storage_instruction::claim_reward(
|
||||
&archiver_keypair.pubkey(),
|
||||
&storage_keypair.pubkey(),
|
||||
);
|
||||
let message =
|
||||
Message::new_with_payer(vec![ix], Some(&archiver_keypair.pubkey()));
|
||||
if let Err(e) = client.send_message(&[archiver_keypair.as_ref()], message) {
|
||||
error!("unable to redeem reward, tx failed: {:?}", e);
|
||||
} else {
|
||||
info!(
|
||||
"collected mining rewards: Account balance {:?}",
|
||||
client.get_balance_with_commitment(
|
||||
&archiver_keypair.pubkey(),
|
||||
client_commitment
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("Redeem mining reward: No account data found");
|
||||
}
|
||||
}
|
||||
|
||||
// Find a segment to replicate and download it.
|
||||
fn setup(
|
||||
meta: &mut ArchiverMeta,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
node_info: &ContactInfo,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
shred_fetch_receiver: PacketReceiver,
|
||||
slot_sender: Sender<u64>,
|
||||
) -> Result<WindowService> {
|
||||
let slots_per_segment =
|
||||
match Self::get_segment_config(&cluster_info, meta.client_commitment.clone()) {
|
||||
Ok(slots_per_segment) => slots_per_segment,
|
||||
Err(e) => {
|
||||
error!("unable to get segment size configuration, exiting...");
|
||||
//shutdown services before exiting
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
let (segment_blockhash, segment_slot) = match Self::poll_for_segment(
|
||||
&cluster_info,
|
||||
slots_per_segment,
|
||||
&Hash::default(),
|
||||
exit,
|
||||
) {
|
||||
Ok(blockhash_and_slot) => blockhash_and_slot,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
let signature = storage_keypair.sign_message(segment_blockhash.as_ref());
|
||||
let slot = get_slot_from_signature(&signature, segment_slot, slots_per_segment);
|
||||
info!("replicating slot: {}", slot);
|
||||
slot_sender.send(slot)?;
|
||||
meta.slot = slot;
|
||||
meta.slots_per_segment = slots_per_segment;
|
||||
meta.signature = signature;
|
||||
meta.blockhash = segment_blockhash;
|
||||
|
||||
let mut repair_slot_range = RepairSlotRange::default();
|
||||
repair_slot_range.end = slot + slots_per_segment;
|
||||
repair_slot_range.start = slot;
|
||||
|
||||
let (retransmit_sender, _) = channel();
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
|
||||
let _sigverify_stage = SigVerifyStage::new(
|
||||
shred_fetch_receiver,
|
||||
verified_sender,
|
||||
DisabledSigVerifier::default(),
|
||||
);
|
||||
|
||||
let window_service = WindowService::new(
|
||||
blockstore.clone(),
|
||||
cluster_info.clone(),
|
||||
verified_receiver,
|
||||
retransmit_sender,
|
||||
repair_socket,
|
||||
&exit,
|
||||
RepairStrategy::RepairRange(repair_slot_range),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
|_, _, _, _| true,
|
||||
);
|
||||
info!("waiting for ledger download");
|
||||
Self::wait_for_segment_download(
|
||||
slot,
|
||||
slots_per_segment,
|
||||
&blockstore,
|
||||
&exit,
|
||||
&node_info,
|
||||
cluster_info,
|
||||
);
|
||||
Ok(window_service)
|
||||
}
|
||||
|
||||
fn wait_for_segment_download(
|
||||
start_slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
node_info: &ContactInfo,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
) {
|
||||
info!(
|
||||
"window created, waiting for ledger download starting at slot {:?}",
|
||||
start_slot
|
||||
);
|
||||
let mut current_slot = start_slot;
|
||||
'outer: loop {
|
||||
while blockstore.is_full(current_slot) {
|
||||
current_slot += 1;
|
||||
info!("current slot: {}", current_slot);
|
||||
if current_slot >= start_slot + slots_per_segment {
|
||||
break 'outer;
|
||||
}
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
|
||||
info!("Done receiving entries from window_service");
|
||||
|
||||
// Remove archiver from the data plane
|
||||
let mut contact_info = node_info.clone();
|
||||
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
|
||||
contact_info.wallclock = timestamp();
|
||||
// copy over the adopted shred_version from the entrypoint
|
||||
contact_info.shred_version = cluster_info.read().unwrap().my_data().shred_version;
|
||||
{
|
||||
let mut cluster_info_w = cluster_info.write().unwrap();
|
||||
cluster_info_w.insert_self(contact_info);
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
|
||||
meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME);
|
||||
|
||||
{
|
||||
let mut ivec = [0u8; 64];
|
||||
ivec.copy_from_slice(&meta.signature.as_ref());
|
||||
|
||||
let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
|
||||
blockstore,
|
||||
meta.slot,
|
||||
meta.slots_per_segment,
|
||||
&meta.ledger_data_file_encrypted,
|
||||
&mut ivec,
|
||||
)?;
|
||||
|
||||
meta.num_chacha_blocks = num_encrypted_bytes / CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Done encrypting the ledger: {:?}",
|
||||
meta.ledger_data_file_encrypted
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_sampling_offsets(meta: &mut ArchiverMeta) {
|
||||
meta.sampling_offsets.clear();
|
||||
let mut rng_seed = [0u8; 32];
|
||||
rng_seed.copy_from_slice(&meta.blockhash.as_ref());
|
||||
let mut rng = ChaChaRng::from_seed(rng_seed);
|
||||
for _ in 0..NUM_STORAGE_SAMPLES {
|
||||
meta.sampling_offsets
|
||||
.push(rng.gen_range(0, meta.num_chacha_blocks) as u64);
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_file_to_create_mining_hash(
|
||||
enc_file_path: &Path,
|
||||
sampling_offsets: &[u64],
|
||||
) -> Result<Hash> {
|
||||
let sha_state = sample_file(enc_file_path, sampling_offsets)?;
|
||||
info!("sampled sha_state: {}", sha_state);
|
||||
Ok(sha_state)
|
||||
}
|
||||
|
||||
fn setup_mining_account(
|
||||
client: &ThinClient,
|
||||
keypair: &Keypair,
|
||||
storage_keypair: &Keypair,
|
||||
client_commitment: CommitmentConfig,
|
||||
) -> Result<()> {
|
||||
// make sure archiver has some balance
|
||||
info!("checking archiver keypair...");
|
||||
if client.poll_balance_with_timeout_and_commitment(
|
||||
&keypair.pubkey(),
|
||||
&Duration::from_millis(100),
|
||||
&Duration::from_secs(5),
|
||||
client_commitment.clone(),
|
||||
)? == 0
|
||||
{
|
||||
return Err(ArchiverError::EmptyStorageAccountBalance);
|
||||
}
|
||||
|
||||
info!("checking storage account keypair...");
|
||||
// check if the storage account exists
|
||||
let balance = client
|
||||
.poll_get_balance_with_commitment(&storage_keypair.pubkey(), client_commitment.clone());
|
||||
if balance.is_err() || balance.unwrap() == 0 {
|
||||
let blockhash =
|
||||
match client.get_recent_blockhash_with_commitment(client_commitment.clone()) {
|
||||
Ok((blockhash, _)) => blockhash,
|
||||
Err(e) => {
|
||||
return Err(ArchiverError::TransportError(e));
|
||||
}
|
||||
};
|
||||
|
||||
let ix = storage_instruction::create_storage_account(
|
||||
&keypair.pubkey(),
|
||||
&keypair.pubkey(),
|
||||
&storage_keypair.pubkey(),
|
||||
1,
|
||||
StorageAccountType::Archiver,
|
||||
);
|
||||
let tx = Transaction::new_signed_instructions(&[keypair], ix, blockhash);
|
||||
let signature = client.async_send_transaction(tx)?;
|
||||
client
|
||||
.poll_for_signature_with_commitment(&signature, client_commitment)
|
||||
.map_err(|err| match err {
|
||||
TransportError::IoError(e) => e,
|
||||
TransportError::TransactionError(_) => io::Error::new(
|
||||
ErrorKind::Other,
|
||||
"setup_mining_account: signature not found",
|
||||
),
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn submit_mining_proof(
|
||||
meta: &ArchiverMeta,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
) {
|
||||
// No point if we've got no storage account...
|
||||
let nodes = cluster_info.read().unwrap().tvu_peers();
|
||||
let client = solana_core::gossip_service::get_client(&nodes);
|
||||
let storage_balance = client.poll_get_balance_with_commitment(
|
||||
&storage_keypair.pubkey(),
|
||||
meta.client_commitment.clone(),
|
||||
);
|
||||
if storage_balance.is_err() || storage_balance.unwrap() == 0 {
|
||||
error!("Unable to submit mining proof, no storage account");
|
||||
return;
|
||||
}
|
||||
// ...or no lamports for fees
|
||||
let balance = client.poll_get_balance_with_commitment(
|
||||
&archiver_keypair.pubkey(),
|
||||
meta.client_commitment.clone(),
|
||||
);
|
||||
if balance.is_err() || balance.unwrap() == 0 {
|
||||
error!("Unable to submit mining proof, insufficient Archiver Account balance");
|
||||
return;
|
||||
}
|
||||
|
||||
let blockhash =
|
||||
match client.get_recent_blockhash_with_commitment(meta.client_commitment.clone()) {
|
||||
Ok((blockhash, _)) => blockhash,
|
||||
Err(_) => {
|
||||
error!("unable to get recent blockhash, can't submit proof");
|
||||
return;
|
||||
}
|
||||
};
|
||||
let instruction = storage_instruction::mining_proof(
|
||||
&storage_keypair.pubkey(),
|
||||
meta.sha_state,
|
||||
get_segment_from_slot(meta.slot, meta.slots_per_segment),
|
||||
Signature::new(&meta.signature.as_ref()),
|
||||
meta.blockhash,
|
||||
);
|
||||
let message = Message::new_with_payer(vec![instruction], Some(&archiver_keypair.pubkey()));
|
||||
let mut transaction = Transaction::new(
|
||||
&[archiver_keypair.as_ref(), storage_keypair.as_ref()],
|
||||
message,
|
||||
blockhash,
|
||||
);
|
||||
if let Err(err) = client.send_and_confirm_transaction(
|
||||
&[archiver_keypair.as_ref(), storage_keypair.as_ref()],
|
||||
&mut transaction,
|
||||
10,
|
||||
0,
|
||||
) {
|
||||
error!("Error: {:?}; while sending mining proof", err);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn close(self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
self.join()
|
||||
}
|
||||
|
||||
pub fn join(self) {
|
||||
for handle in self.thread_handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn get_segment_config(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
client_commitment: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
let rpc_peers = {
|
||||
let cluster_info = cluster_info.read().unwrap();
|
||||
cluster_info.all_rpc_peers()
|
||||
};
|
||||
debug!("rpc peers: {:?}", rpc_peers);
|
||||
if !rpc_peers.is_empty() {
|
||||
let rpc_client = {
|
||||
let node_index = thread_rng().gen_range(0, rpc_peers.len());
|
||||
RpcClient::new_socket(rpc_peers[node_index].rpc)
|
||||
};
|
||||
Ok(rpc_client
|
||||
.send(
|
||||
&RpcRequest::GetSlotsPerSegment,
|
||||
serde_json::json!([client_commitment]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
ArchiverError::ClientError(err)
|
||||
})?
|
||||
.as_u64()
|
||||
.unwrap())
|
||||
} else {
|
||||
Err(ArchiverError::NoRpcPeers)
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits until the first segment is ready, and returns the current segment
|
||||
fn poll_for_segment(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
slots_per_segment: u64,
|
||||
previous_blockhash: &Hash,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(Hash, u64)> {
|
||||
loop {
|
||||
let (blockhash, turn_slot) = Self::poll_for_blockhash_and_slot(
|
||||
cluster_info,
|
||||
slots_per_segment,
|
||||
previous_blockhash,
|
||||
exit,
|
||||
)?;
|
||||
if get_complete_segment_from_slot(turn_slot, slots_per_segment).is_some() {
|
||||
return Ok((blockhash, turn_slot));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Poll for a different blockhash and associated max_slot than `previous_blockhash`
|
||||
fn poll_for_blockhash_and_slot(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
slots_per_segment: u64,
|
||||
previous_blockhash: &Hash,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(Hash, u64)> {
|
||||
info!("waiting for the next turn...");
|
||||
loop {
|
||||
let rpc_peers = {
|
||||
let cluster_info = cluster_info.read().unwrap();
|
||||
cluster_info.all_rpc_peers()
|
||||
};
|
||||
debug!("rpc peers: {:?}", rpc_peers);
|
||||
if !rpc_peers.is_empty() {
|
||||
let rpc_client = {
|
||||
let node_index = thread_rng().gen_range(0, rpc_peers.len());
|
||||
RpcClient::new_socket(rpc_peers[node_index].rpc)
|
||||
};
|
||||
let response = rpc_client
|
||||
.send(
|
||||
&RpcRequest::GetStorageTurn,
|
||||
serde_json::value::Value::Null,
|
||||
0,
|
||||
)
|
||||
.map_err(|err| {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
ArchiverError::ClientError(err)
|
||||
})?;
|
||||
let RpcStorageTurn {
|
||||
blockhash: storage_blockhash,
|
||||
slot: turn_slot,
|
||||
} = serde_json::from_value::<RpcStorageTurn>(response)
|
||||
.map_err(ArchiverError::JsonError)?;
|
||||
let turn_blockhash = storage_blockhash.parse().map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Blockhash parse failure: {:?} on {:?}",
|
||||
err, storage_blockhash
|
||||
),
|
||||
)
|
||||
})?;
|
||||
if turn_blockhash != *previous_blockhash {
|
||||
info!("turn slot: {}", turn_slot);
|
||||
if get_segment_from_slot(turn_slot, slots_per_segment) != 0 {
|
||||
return Ok((turn_blockhash, turn_slot));
|
||||
}
|
||||
}
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Err(ArchiverError::IO(io::Error::new(
|
||||
ErrorKind::Other,
|
||||
"exit signalled...",
|
||||
)));
|
||||
}
|
||||
sleep(Duration::from_secs(5));
|
||||
}
|
||||
}
|
||||
|
||||
/// Ask an archiver to populate a given blockstore with its segment.
|
||||
/// Return the slot at the start of the archiver's segment
|
||||
///
|
||||
/// It is recommended to use a temporary blockstore for this since the download will not verify
|
||||
/// shreds received and might impact the chaining of shreds across slots
|
||||
pub fn download_from_archiver(
|
||||
serve_repair: &ServeRepair,
|
||||
archiver_info: &ContactInfo,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
slots_per_segment: u64,
|
||||
) -> Result<u64> {
|
||||
// Create a client which downloads from the archiver and see that it
|
||||
// can respond with shreds.
|
||||
let start_slot = Self::get_archiver_segment_slot(archiver_info.storage_addr);
|
||||
info!("Archiver download: start at {}", start_slot);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (s_reader, r_reader) = channel();
|
||||
let repair_socket = Arc::new(bind_in_range(VALIDATOR_PORT_RANGE).unwrap().1);
|
||||
let t_receiver = receiver(
|
||||
repair_socket.clone(),
|
||||
&exit,
|
||||
s_reader,
|
||||
Recycler::default(),
|
||||
"archiver_reeciver",
|
||||
);
|
||||
let id = serve_repair.keypair().pubkey();
|
||||
info!(
|
||||
"Sending repair requests from: {} to: {}",
|
||||
serve_repair.my_info().id,
|
||||
archiver_info.gossip
|
||||
);
|
||||
let repair_slot_range = RepairSlotRange {
|
||||
start: start_slot,
|
||||
end: start_slot + slots_per_segment,
|
||||
};
|
||||
// try for upto 180 seconds //TODO needs tuning if segments are huge
|
||||
for _ in 0..120 {
|
||||
// Strategy used by archivers
|
||||
let repairs = RepairService::generate_repairs_in_range(
|
||||
blockstore,
|
||||
repair_service::MAX_REPAIR_LENGTH,
|
||||
&repair_slot_range,
|
||||
);
|
||||
//iter over the repairs and send them
|
||||
if let Ok(repairs) = repairs {
|
||||
let reqs: Vec<_> = repairs
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
serve_repair
|
||||
.map_repair_request(&repair_request)
|
||||
.map(|result| ((archiver_info.gossip, result), repair_request))
|
||||
.ok()
|
||||
})
|
||||
.collect();
|
||||
|
||||
for ((to, req), repair_request) in reqs {
|
||||
if let Ok(local_addr) = repair_socket.local_addr() {
|
||||
datapoint_info!(
|
||||
"archiver_download",
|
||||
("repair_request", format!("{:?}", repair_request), String),
|
||||
("to", to.to_string(), String),
|
||||
("from", local_addr.to_string(), String),
|
||||
("id", id.to_string(), String)
|
||||
);
|
||||
}
|
||||
repair_socket
|
||||
.send_to(&req, archiver_info.gossip)
|
||||
.unwrap_or_else(|e| {
|
||||
error!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
});
|
||||
}
|
||||
}
|
||||
let res = r_reader.recv_timeout(Duration::new(1, 0));
|
||||
if let Ok(mut packets) = res {
|
||||
while let Ok(mut more) = r_reader.try_recv() {
|
||||
packets.packets.append_pinned(&mut more.packets);
|
||||
}
|
||||
let shreds: Vec<Shred> = packets
|
||||
.packets
|
||||
.into_iter()
|
||||
.filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok())
|
||||
.collect();
|
||||
blockstore.insert_shreds(shreds, None, false)?;
|
||||
}
|
||||
// check if all the slots in the segment are complete
|
||||
if Self::segment_complete(start_slot, slots_per_segment, blockstore) {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().unwrap();
|
||||
|
||||
// check if all the slots in the segment are complete
|
||||
if !Self::segment_complete(start_slot, slots_per_segment, blockstore) {
|
||||
return Err(ArchiverError::SegmentDownloadError);
|
||||
}
|
||||
Ok(start_slot)
|
||||
}
|
||||
|
||||
fn segment_complete(
|
||||
start_slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> bool {
|
||||
for slot in start_slot..(start_slot + slots_per_segment) {
|
||||
if !blockstore.is_full(slot) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn get_archiver_segment_slot(to: SocketAddr) -> u64 {
|
||||
let (_port, socket) = bind_in_range(VALIDATOR_PORT_RANGE).unwrap();
|
||||
socket
|
||||
.set_read_timeout(Some(Duration::from_secs(5)))
|
||||
.unwrap();
|
||||
|
||||
let req = ArchiverRequest::GetSlotHeight(socket.local_addr().unwrap());
|
||||
let serialized_req = bincode::serialize(&req).unwrap();
|
||||
for _ in 0..10 {
|
||||
socket.send_to(&serialized_req, to).unwrap();
|
||||
let mut buf = [0; 1024];
|
||||
if let Ok((size, _addr)) = socket.recv_from(&mut buf) {
|
||||
// Ignore bad packet and try again
|
||||
if let Ok(slot) = bincode::config()
|
||||
.limit(PACKET_DATA_SIZE as u64)
|
||||
.deserialize(&buf[..size])
|
||||
{
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
panic!("Couldn't get segment slot from archiver!");
|
||||
}
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
use serde_json;
|
||||
use solana_client::client_error;
|
||||
use solana_ledger::blockstore;
|
||||
use solana_sdk::transport;
|
||||
use std::any::Any;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ArchiverError {
|
||||
#[error("IO error")]
|
||||
IO(#[from] std::io::Error),
|
||||
|
||||
#[error("blockstore error")]
|
||||
BlockstoreError(#[from] blockstore::BlockstoreError),
|
||||
|
||||
#[error("crossbeam error")]
|
||||
CrossbeamSendError(#[from] crossbeam_channel::SendError<u64>),
|
||||
|
||||
#[error("send error")]
|
||||
SendError(#[from] std::sync::mpsc::SendError<u64>),
|
||||
|
||||
#[error("join error")]
|
||||
JoinError(Box<dyn Any + Send + 'static>),
|
||||
|
||||
#[error("transport error")]
|
||||
TransportError(#[from] transport::TransportError),
|
||||
|
||||
#[error("client error")]
|
||||
ClientError(#[from] client_error::ClientError),
|
||||
|
||||
#[error("Json parsing error")]
|
||||
JsonError(#[from] serde_json::error::Error),
|
||||
|
||||
#[error("Storage account has no balance")]
|
||||
EmptyStorageAccountBalance,
|
||||
|
||||
#[error("No RPC peers..")]
|
||||
NoRpcPeers,
|
||||
|
||||
#[error("Couldn't download full segment")]
|
||||
SegmentDownloadError,
|
||||
}
|
||||
|
||||
impl std::convert::From<Box<dyn Any + Send + 'static>> for ArchiverError {
|
||||
fn from(e: Box<dyn Any + Send + 'static>) -> ArchiverError {
|
||||
ArchiverError::JoinError(e)
|
||||
}
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.0.0"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-chacha = { path = "../chacha", version = "1.0.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.0" }
|
||||
solana-logger = { path = "../logger", version = "1.0.0" }
|
||||
solana-perf = { path = "../perf", version = "1.0.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_utils"
|
@ -1,120 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use solana_sdk::hash::{Hash, Hasher};
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufReader, ErrorKind, Read, Seek, SeekFrom};
|
||||
use std::mem::size_of;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
|
||||
let in_file = File::open(in_path)?;
|
||||
let metadata = in_file.metadata()?;
|
||||
let mut buffer_file = BufReader::new(in_file);
|
||||
|
||||
let mut hasher = Hasher::default();
|
||||
let sample_size = size_of::<Hash>();
|
||||
let sample_size64 = sample_size as u64;
|
||||
let mut buf = vec![0; sample_size];
|
||||
|
||||
let file_len = metadata.len();
|
||||
if file_len < sample_size64 {
|
||||
return Err(io::Error::new(ErrorKind::Other, "file too short!"));
|
||||
}
|
||||
for offset in sample_offsets {
|
||||
if *offset > (file_len - sample_size64) / sample_size64 {
|
||||
return Err(io::Error::new(ErrorKind::Other, "offset too large"));
|
||||
}
|
||||
buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?;
|
||||
trace!("sampling @ {} ", *offset);
|
||||
match buffer_file.read(&mut buf) {
|
||||
Ok(size) => {
|
||||
assert_eq!(size, buf.len());
|
||||
hasher.hash(&buf);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Error sampling file");
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(hasher.result())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::fs::{create_dir_all, remove_file};
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
|
||||
extern crate hex;
|
||||
|
||||
fn tmp_file_path(name: &str) -> PathBuf {
|
||||
use std::env;
|
||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let mut rand_bits = [0u8; 32];
|
||||
thread_rng().fill(&mut rand_bits[..]);
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
path.push(out_dir);
|
||||
path.push("tmp");
|
||||
create_dir_all(&path).unwrap();
|
||||
|
||||
path.push(format!("{}-{:?}", name, hex::encode(rand_bits)));
|
||||
println!("path: {:?}", path);
|
||||
path
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file() {
|
||||
solana_logger::setup();
|
||||
let in_path = tmp_file_path("test_sample_file_input.txt");
|
||||
let num_strings = 4096;
|
||||
let string = "12foobar";
|
||||
{
|
||||
let mut in_file = File::create(&in_path).unwrap();
|
||||
for _ in 0..num_strings {
|
||||
in_file.write(string.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
let num_samples = (string.len() * num_strings / size_of::<Hash>()) as u64;
|
||||
let samples: Vec<_> = (0..num_samples).collect();
|
||||
let res = sample_file(&in_path, samples.as_slice());
|
||||
let ref_hash: Hash = Hash::new(&[
|
||||
173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144,
|
||||
151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52,
|
||||
]);
|
||||
let res = res.unwrap();
|
||||
assert_eq!(res, ref_hash);
|
||||
|
||||
// Sample just past the end
|
||||
assert!(sample_file(&in_path, &[num_samples]).is_err());
|
||||
remove_file(&in_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file_invalid_offset() {
|
||||
let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt");
|
||||
{
|
||||
let mut in_file = File::create(&in_path).unwrap();
|
||||
for _ in 0..4096 {
|
||||
in_file.write("123456foobar".as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
let samples = [0, 200000];
|
||||
let res = sample_file(&in_path, &samples);
|
||||
assert!(res.is_err());
|
||||
remove_file(in_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file_missing_file() {
|
||||
let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt");
|
||||
let samples = [0, 5];
|
||||
let res = sample_file(&in_path, &samples);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.0.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.2"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.0" }
|
||||
solana-core = { path = "../core", version = "1.0.0" }
|
||||
solana-logger = { path = "../logger", version = "1.0.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.0" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.0" }
|
||||
|
@ -1,147 +0,0 @@
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use console::style;
|
||||
use solana_archiver_lib::archiver::Archiver;
|
||||
use solana_clap_utils::{
|
||||
input_validators::is_keypair,
|
||||
keypair::{
|
||||
self, keypair_input, KeypairWithSource, ASK_SEED_PHRASE_ARG,
|
||||
SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
},
|
||||
};
|
||||
use solana_core::{
|
||||
cluster_info::{Node, VALIDATOR_PORT_RANGE},
|
||||
contact_info::ContactInfo,
|
||||
};
|
||||
use solana_sdk::{commitment_config::CommitmentConfig, signature::Signer};
|
||||
use std::{net::SocketAddr, path::PathBuf, process::exit, sync::Arc};
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_clap_utils::version!())
|
||||
.arg(
|
||||
Arg::with_name("identity_keypair")
|
||||
.short("i")
|
||||
.long("identity-keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.validator(is_keypair)
|
||||
.help("File containing an identity (keypair)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("entrypoint")
|
||||
.short("n")
|
||||
.long("entrypoint")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(solana_net_utils::is_host_port)
|
||||
.help("Rendezvous with the cluster at this entry point"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR as persistent ledger location"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("storage_keypair")
|
||||
.short("s")
|
||||
.long("storage-keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.validator(is_keypair)
|
||||
.help("File containing the storage account keypair"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ASK_SEED_PHRASE_ARG.name)
|
||||
.long(ASK_SEED_PHRASE_ARG.long)
|
||||
.value_name("KEYPAIR NAME")
|
||||
.multiple(true)
|
||||
.takes_value(true)
|
||||
.possible_values(&["identity-keypair", "storage-keypair"])
|
||||
.help(ASK_SEED_PHRASE_ARG.help),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
||||
.requires(ASK_SEED_PHRASE_ARG.name)
|
||||
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let ledger_path = PathBuf::from(matches.value_of("ledger").unwrap());
|
||||
|
||||
let identity_keypair = keypair_input(&matches, "identity_keypair")
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Identity keypair input failed: {}", err);
|
||||
exit(1);
|
||||
})
|
||||
.keypair;
|
||||
let KeypairWithSource {
|
||||
keypair: storage_keypair,
|
||||
source: storage_keypair_source,
|
||||
} = keypair_input(&matches, "storage_keypair").unwrap_or_else(|err| {
|
||||
eprintln!("Storage keypair input failed: {}", err);
|
||||
exit(1);
|
||||
});
|
||||
if storage_keypair_source == keypair::Source::Generated {
|
||||
clap::Error::with_description(
|
||||
"The `storage-keypair` argument was not found",
|
||||
clap::ErrorKind::ArgumentNotFound,
|
||||
)
|
||||
.exit();
|
||||
}
|
||||
|
||||
let entrypoint_addr = matches
|
||||
.value_of("entrypoint")
|
||||
.map(|entrypoint| {
|
||||
solana_net_utils::parse_host_port(entrypoint)
|
||||
.expect("failed to parse entrypoint address")
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let gossip_addr = {
|
||||
let ip = solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap();
|
||||
let mut addr = SocketAddr::new(ip, 0);
|
||||
addr.set_ip(solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap());
|
||||
addr
|
||||
};
|
||||
let node = Node::new_archiver_with_external_ip(
|
||||
&identity_keypair.pubkey(),
|
||||
&gossip_addr,
|
||||
VALIDATOR_PORT_RANGE,
|
||||
);
|
||||
|
||||
println!(
|
||||
"{} version {} (branch={}, commit={})",
|
||||
style(crate_name!()).bold(),
|
||||
solana_clap_utils::version!(),
|
||||
option_env!("CI_BRANCH").unwrap_or("unknown"),
|
||||
option_env!("CI_COMMIT").unwrap_or("unknown")
|
||||
);
|
||||
solana_metrics::set_host_id(identity_keypair.pubkey().to_string());
|
||||
println!(
|
||||
"replicating the data with identity_keypair={:?} gossip_addr={:?}",
|
||||
identity_keypair.pubkey(),
|
||||
gossip_addr
|
||||
);
|
||||
|
||||
let entrypoint_info = ContactInfo::new_gossip_entry_point(&entrypoint_addr);
|
||||
let archiver = Archiver::new(
|
||||
&ledger_path,
|
||||
node,
|
||||
entrypoint_info,
|
||||
Arc::new(identity_keypair),
|
||||
Arc::new(storage_keypair),
|
||||
CommitmentConfig::recent(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
archiver.join();
|
||||
}
|
@ -2,19 +2,27 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.0.0"
|
||||
version = "1.2.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "1.0.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.0" }
|
||||
solana-logger = { path = "../logger", version = "1.0.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.0" }
|
||||
solana-measure = { path = "../measure", version = "1.0.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.0" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.3"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.2.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.13" }
|
||||
solana-perf = { path = "../perf", version = "1.2.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.13" }
|
||||
solana-logger = { path = "../logger", version = "1.2.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.13" }
|
||||
solana-measure = { path = "../measure", version = "1.2.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.13" }
|
||||
solana-version = { path = "../version", version = "1.2.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,30 +1,38 @@
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::banking_stage::{create_test_recorder, BankingStage};
|
||||
use solana_core::cluster_info::ClusterInfo;
|
||||
use solana_core::cluster_info::Node;
|
||||
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::poh_recorder::PohRecorder;
|
||||
use solana_core::poh_recorder::WorkingBankEntry;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_core::{
|
||||
banking_stage::{create_test_recorder, BankingStage},
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info::Node,
|
||||
poh_recorder::PohRecorder,
|
||||
poh_recorder::WorkingBankEntry,
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::timing::{duration_as_us, timestamp};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::{Duration, Instant};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Keypair,
|
||||
signature::Signature,
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
fn check_txs(
|
||||
receiver: &Arc<Receiver<WorkingBankEntry>>,
|
||||
@ -57,15 +65,22 @@ fn check_txs(
|
||||
no_bank
|
||||
}
|
||||
|
||||
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
|
||||
fn make_accounts_txs(
|
||||
total_num_transactions: usize,
|
||||
hash: Hash,
|
||||
same_payer: bool,
|
||||
) -> Vec<Transaction> {
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
|
||||
(0..txes)
|
||||
let payer_key = Keypair::new();
|
||||
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
|
||||
(0..total_num_transactions)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.message.account_keys[0] = Pubkey::new_rand();
|
||||
if !same_payer {
|
||||
new.message.account_keys[0] = Pubkey::new_rand();
|
||||
}
|
||||
new.message.account_keys[1] = Pubkey::new_rand();
|
||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||
new
|
||||
@ -89,13 +104,61 @@ fn bytes_as_usize(bytes: &[u8]) -> usize {
|
||||
bytes[0] as usize | (bytes[1] as usize) << 8
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_version::version!())
|
||||
.arg(
|
||||
Arg::with_name("num_chunks")
|
||||
.long("num-chunks")
|
||||
.takes_value(true)
|
||||
.value_name("SIZE")
|
||||
.help("Number of transaction chunks."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("packets_per_chunk")
|
||||
.long("packets-per-chunk")
|
||||
.takes_value(true)
|
||||
.value_name("SIZE")
|
||||
.help("Packets per chunk"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("skip_sanity")
|
||||
.long("skip-sanity")
|
||||
.takes_value(false)
|
||||
.help("Skip transaction sanity execution"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("same_payer")
|
||||
.long("same-payer")
|
||||
.takes_value(false)
|
||||
.help("Use the same payer for transfers"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("iterations")
|
||||
.long("iterations")
|
||||
.takes_value(true)
|
||||
.help("Number of iterations"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_threads")
|
||||
.long("num-threads")
|
||||
.takes_value(true)
|
||||
.help("Number of iterations"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let num_threads =
|
||||
value_t!(matches, "num_threads", usize).unwrap_or(BankingStage::num_threads() as usize);
|
||||
// a multiple of packet chunk duplicates to avoid races
|
||||
const CHUNKS: usize = 8 * 2;
|
||||
const PACKETS_PER_BATCH: usize = 192;
|
||||
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
|
||||
let num_chunks = value_t!(matches, "num_chunks", usize).unwrap_or(16);
|
||||
let packets_per_chunk = value_t!(matches, "packets_per_chunk", usize).unwrap_or(192);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(1000);
|
||||
|
||||
let total_num_transactions = num_chunks * num_threads * packets_per_chunk;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@ -106,37 +169,47 @@ fn main() {
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
info!("threads: {} txs: {}", num_threads, txes);
|
||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||
|
||||
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_config.hash());
|
||||
let same_payer = matches.is_present("same_payer");
|
||||
let mut transactions =
|
||||
make_accounts_txs(total_num_transactions, genesis_config.hash(), same_payer);
|
||||
|
||||
// fund all the accounts
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = system_transaction::transfer(
|
||||
let mut fund = system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&tx.message.account_keys[0],
|
||||
mint_total / txes as u64,
|
||||
mint_total / total_num_transactions as u64,
|
||||
genesis_config.hash(),
|
||||
);
|
||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
|
||||
let skip_sanity = matches.is_present("skip_sanity");
|
||||
if !skip_sanity {
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
|
||||
}
|
||||
bank.clear_signatures();
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
||||
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
@ -145,7 +218,7 @@ fn main() {
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
@ -155,7 +228,7 @@ fn main() {
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
let chunk_len = verified.len() / CHUNKS;
|
||||
let chunk_len = verified.len() / num_chunks;
|
||||
let mut start = 0;
|
||||
|
||||
// This is so that the signal_receiver does not go out of scope after the closure.
|
||||
@ -164,17 +237,17 @@ fn main() {
|
||||
let signal_receiver = Arc::new(signal_receiver);
|
||||
let mut total_us = 0;
|
||||
let mut tx_total_us = 0;
|
||||
let base_tx_count = bank.transaction_count();
|
||||
let mut txs_processed = 0;
|
||||
let mut root = 1;
|
||||
let collector = Pubkey::new_rand();
|
||||
const ITERS: usize = 1_000;
|
||||
let config = Config {
|
||||
packets_per_batch: PACKETS_PER_BATCH,
|
||||
packets_per_batch: packets_per_chunk,
|
||||
chunk_len,
|
||||
num_threads,
|
||||
};
|
||||
let mut total_sent = 0;
|
||||
for _ in 0..ITERS {
|
||||
for _ in 0..iterations {
|
||||
let now = Instant::now();
|
||||
let mut sent = 0;
|
||||
|
||||
@ -215,7 +288,11 @@ fn main() {
|
||||
sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if check_txs(&signal_receiver, txes / CHUNKS, &poh_recorder) {
|
||||
if check_txs(
|
||||
&signal_receiver,
|
||||
total_num_transactions / num_chunks,
|
||||
&poh_recorder,
|
||||
) {
|
||||
debug!(
|
||||
"resetting bank {} tx count: {} txs_proc: {}",
|
||||
bank.slot(),
|
||||
@ -246,7 +323,7 @@ fn main() {
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
bank_forks.set_root(root, &None);
|
||||
bank_forks.set_root(root, &None, None);
|
||||
root += 1;
|
||||
}
|
||||
debug!(
|
||||
@ -267,7 +344,7 @@ fn main() {
|
||||
debug!(
|
||||
"time: {} us checked: {} sent: {}",
|
||||
duration_as_us(&now.elapsed()),
|
||||
txes / CHUNKS,
|
||||
total_num_transactions / num_chunks,
|
||||
sent,
|
||||
);
|
||||
total_sent += sent;
|
||||
@ -278,20 +355,26 @@ fn main() {
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
}
|
||||
|
||||
start += chunk_len;
|
||||
start %= verified.len();
|
||||
}
|
||||
let txs_processed = bank_forks.working_bank().transaction_count();
|
||||
debug!("processed: {} base: {}", txs_processed, base_tx_count);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_total', 'median': '{}'}}",
|
||||
"{{'name': 'banking_bench_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
|
||||
);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
|
||||
"{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
|
||||
);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64),
|
||||
);
|
||||
|
||||
drop(verified_sender);
|
||||
drop(vote_sender);
|
||||
|
@ -2,33 +2,37 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.0.0"
|
||||
version = "1.2.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.32.0"
|
||||
itertools = "0.8.2"
|
||||
clap = "2.33.1"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.8"
|
||||
num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rayon = "1.2.0"
|
||||
serde_json = "1.0.46"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.0" }
|
||||
solana-core = { path = "../core", version = "1.0.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.0" }
|
||||
solana-client = { path = "../client", version = "1.0.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.0" }
|
||||
solana-logger = { path = "../logger", version = "1.0.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.0" }
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.13" }
|
||||
solana-core = { path = "../core", version = "1.2.13" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.13" }
|
||||
solana-client = { path = "../client", version = "1.2.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.13" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.13" }
|
||||
solana-logger = { path = "../logger", version = "1.2.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.13" }
|
||||
solana-version = { path = "../version", version = "1.2.13" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -14,6 +14,7 @@ use solana_metrics::datapoint_info;
|
||||
use solana_sdk::{
|
||||
client::{Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
timing::{duration_as_ms, duration_as_s},
|
||||
@ -449,7 +450,7 @@ fn swapper<T>(
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
let to_swap_txs: Vec<_> = to_swap
|
||||
@ -457,16 +458,14 @@ fn swapper<T>(
|
||||
.map(|(signer, swap, profit)| {
|
||||
let s: &Keypair = &signer;
|
||||
let owner = &signer.pubkey();
|
||||
Transaction::new_signed_instructions(
|
||||
&[s],
|
||||
vec![exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
&profit,
|
||||
)],
|
||||
blockhash,
|
||||
)
|
||||
let instruction = exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
&profit,
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&s.pubkey()));
|
||||
Transaction::new(&[s], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -577,7 +576,7 @@ fn trader<T>(
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
|
||||
@ -588,28 +587,26 @@ fn trader<T>(
|
||||
let owner_pubkey = &owner.pubkey();
|
||||
let trade_pubkey = &trade.pubkey();
|
||||
let space = mem::size_of::<ExchangeState>() as u64;
|
||||
Transaction::new_signed_instructions(
|
||||
&[owner.as_ref(), trade],
|
||||
vec![
|
||||
system_instruction::create_account(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
1,
|
||||
space,
|
||||
&id(),
|
||||
),
|
||||
exchange_instruction::trade_request(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
*side,
|
||||
pair,
|
||||
tokens,
|
||||
price,
|
||||
src,
|
||||
),
|
||||
],
|
||||
blockhash,
|
||||
)
|
||||
let instructions = [
|
||||
system_instruction::create_account(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
1,
|
||||
space,
|
||||
&id(),
|
||||
),
|
||||
exchange_instruction::trade_request(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
*side,
|
||||
pair,
|
||||
tokens,
|
||||
price,
|
||||
src,
|
||||
),
|
||||
];
|
||||
let message = Message::new(&instructions, Some(&owner_pubkey));
|
||||
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -747,22 +744,19 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
(
|
||||
k.clone(),
|
||||
Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
||||
&k.pubkey(),
|
||||
&m,
|
||||
)),
|
||||
)
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(k.clone(), Transaction::new_unsigned(message))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut retries = 0;
|
||||
let amount = chunk[0].1[0].1;
|
||||
while !to_fund_txs.is_empty() {
|
||||
let receivers = to_fund_txs
|
||||
let receivers: usize = to_fund_txs
|
||||
.iter()
|
||||
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
|
||||
.map(|(_, tx)| tx.message().instructions.len())
|
||||
.sum();
|
||||
|
||||
debug!(
|
||||
" {} to {} in {} txs",
|
||||
@ -775,7 +769,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
to_fund_txs.len(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("blockhash");
|
||||
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
||||
@ -847,16 +841,18 @@ pub fn create_token_accounts<T: Client>(
|
||||
);
|
||||
let request_ix =
|
||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
||||
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
|
||||
(
|
||||
(from_keypair, new_keypair),
|
||||
Transaction::new_unsigned_instructions(vec![create_ix, request_ix]),
|
||||
Transaction::new_unsigned(message),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let accounts = to_create_txs
|
||||
let accounts: usize = to_create_txs
|
||||
.iter()
|
||||
.fold(0, |len, (_, tx)| len + tx.message().instructions.len() / 2);
|
||||
.map(|(_, tx)| tx.message().instructions.len() / 2)
|
||||
.sum();
|
||||
|
||||
debug!(
|
||||
" Creating {} accounts in {} txs",
|
||||
@ -866,7 +862,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
|
||||
let mut retries = 0;
|
||||
while !to_create_txs.is_empty() {
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
to_create_txs
|
||||
@ -995,7 +991,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
|
@ -11,7 +11,7 @@ fn main() {
|
||||
solana_logger::setup();
|
||||
solana_metrics::set_panic_hook("bench-exchange");
|
||||
|
||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
||||
let cli::Config {
|
||||
@ -54,10 +54,9 @@ fn main() {
|
||||
);
|
||||
} else {
|
||||
info!("Connecting to the cluster");
|
||||
let (nodes, _archivers) =
|
||||
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
|
@ -59,7 +59,7 @@ fn test_exchange_local_cluster() {
|
||||
let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let (nodes, _) =
|
||||
let nodes =
|
||||
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
@ -86,7 +86,7 @@ fn test_exchange_bank_client() {
|
||||
solana_logger::setup();
|
||||
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
|
||||
let mut bank = Bank::new(&genesis_config);
|
||||
bank.add_instruction_processor(id(), process_instruction);
|
||||
bank.add_builtin_program("exchange_program", id(), process_instruction);
|
||||
let clients = vec![BankClient::new(bank)];
|
||||
|
||||
let mut config = Config::default();
|
||||
|
@ -2,14 +2,18 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.0.0"
|
||||
version = "1.2.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.0" }
|
||||
solana-core = { path = "../core", version = "1.0.0" }
|
||||
solana-logger = { path = "../logger", version = "1.0.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.0" }
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.13" }
|
||||
solana-logger = { path = "../logger", version = "1.2.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.13" }
|
||||
solana-version = { path = "../version", version = "1.2.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_core::streamer::{receiver, PacketReceiver};
|
||||
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_streamer::streamer::{receiver, PacketReceiver};
|
||||
use std::cmp::max;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
@ -52,7 +52,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_clap_utils::version!())
|
||||
.version(solana_version::version!())
|
||||
.arg(
|
||||
Arg::with_name("num-recv-sockets")
|
||||
.long("num-recv-sockets")
|
||||
@ -67,7 +67,8 @@ fn main() -> Result<()> {
|
||||
}
|
||||
|
||||
let mut port = 0;
|
||||
let mut addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let mut addr = SocketAddr::new(ip_addr, 0);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
@ -75,7 +76,7 @@ fn main() -> Result<()> {
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketsRecycler::default();
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(port, false).unwrap();
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
|
||||
addr = read.local_addr().unwrap();
|
||||
|
@ -2,36 +2,40 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.0.0"
|
||||
version = "1.2.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
clap = "2.33.0"
|
||||
clap = "2.33.1"
|
||||
log = "0.4.8"
|
||||
rayon = "1.2.0"
|
||||
serde_json = "1.0.46"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.0" }
|
||||
solana-core = { path = "../core", version = "1.0.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.0" }
|
||||
solana-client = { path = "../client", version = "1.0.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.0" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.0.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.0.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.0" }
|
||||
solana-measure = { path = "../measure", version = "1.0.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.0", optional = true }
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.13" }
|
||||
solana-core = { path = "../core", version = "1.2.13" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.13" }
|
||||
solana-client = { path = "../client", version = "1.2.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.13" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.2.13", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.2.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.13" }
|
||||
solana-measure = { path = "../measure", version = "1.2.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.13" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.13", optional = true }
|
||||
solana-version = { path = "../version", version = "1.2.13" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.3.2"
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.13" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -14,6 +14,7 @@ use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
@ -26,9 +27,9 @@ use std::{
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, RwLock,
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
@ -55,7 +56,9 @@ type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
loop {
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
|
||||
Ok((blockhash, fee_calculator)) => return (blockhash, fee_calculator),
|
||||
Ok((blockhash, fee_calculator, _last_valid_slot)) => {
|
||||
return (blockhash, fee_calculator)
|
||||
}
|
||||
Err(err) => {
|
||||
info!("Couldn't get recent blockhash: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
@ -64,105 +67,63 @@ fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
fn wait_for_target_slots_per_epoch<T>(target_slots_per_epoch: u64, client: &Arc<T>)
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
let Config {
|
||||
id,
|
||||
threads,
|
||||
thread_batch_sleep_ms,
|
||||
duration,
|
||||
tx_count,
|
||||
sustained,
|
||||
..
|
||||
} = config;
|
||||
|
||||
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
||||
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
||||
assert!(gen_keypairs.len() >= 2 * tx_count);
|
||||
for chunk in gen_keypairs.chunks_exact(2 * tx_count) {
|
||||
source_keypair_chunks.push(chunk[..tx_count].iter().collect());
|
||||
dest_keypair_chunks.push(chunk[tx_count..].iter().collect());
|
||||
}
|
||||
|
||||
let first_tx_count = loop {
|
||||
match client.get_transaction_count() {
|
||||
Ok(count) => break count,
|
||||
Err(err) => {
|
||||
info!("Couldn't get transaction count: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
if target_slots_per_epoch != 0 {
|
||||
info!(
|
||||
"Waiting until epochs are {} slots long..",
|
||||
target_slots_per_epoch
|
||||
);
|
||||
loop {
|
||||
if let Ok(epoch_info) = client.get_epoch_info() {
|
||||
if epoch_info.slots_in_epoch >= target_slots_per_epoch {
|
||||
info!("Done epoch_info: {:?}", epoch_info);
|
||||
break;
|
||||
}
|
||||
info!(
|
||||
"Waiting for epoch: {} now: {}",
|
||||
target_slots_per_epoch, epoch_info.slots_in_epoch
|
||||
);
|
||||
}
|
||||
sleep(Duration::from_secs(3));
|
||||
}
|
||||
};
|
||||
info!("Initial transaction count {}", first_tx_count);
|
||||
}
|
||||
}
|
||||
|
||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
fn create_sampler_thread<T>(
|
||||
client: &Arc<T>,
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
sample_period: u64,
|
||||
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
|
||||
) -> JoinHandle<()>
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
info!("Sampling TPS every {} second...", sample_period);
|
||||
let sample_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let blockhash_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let recent_blockhash = recent_blockhash.clone();
|
||||
let client = client.clone();
|
||||
let id = id.pubkey();
|
||||
Builder::new()
|
||||
.name("solana-blockhash-poller".to_string())
|
||||
.spawn(move || {
|
||||
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let s_threads: Vec<_> = (0..threads)
|
||||
.map(|_| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let shared_txs = shared_txs.clone();
|
||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||
let total_tx_sent_count = total_tx_sent_count.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sender".to_string())
|
||||
.spawn(move || {
|
||||
do_tx_transfers(
|
||||
&exit_signal,
|
||||
&shared_txs,
|
||||
&shared_tx_active_thread_count,
|
||||
&total_tx_sent_count,
|
||||
thread_batch_sleep_ms,
|
||||
&client,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
||||
})
|
||||
.collect();
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn generate_chunked_transfers(
|
||||
recent_blockhash: Arc<RwLock<Hash>>,
|
||||
shared_txs: &SharedTransactions,
|
||||
shared_tx_active_thread_count: Arc<AtomicIsize>,
|
||||
source_keypair_chunks: Vec<Vec<&Keypair>>,
|
||||
dest_keypair_chunks: &mut Vec<VecDeque<&Keypair>>,
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) {
|
||||
// generate and send transactions for the specified duration
|
||||
let start = Instant::now();
|
||||
let keypair_chunks = source_keypair_chunks.len();
|
||||
@ -170,7 +131,7 @@ where
|
||||
let mut chunk_index = 0;
|
||||
while start.elapsed() < duration {
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
shared_txs,
|
||||
&recent_blockhash,
|
||||
&source_keypair_chunks[chunk_index],
|
||||
&dest_keypair_chunks[chunk_index],
|
||||
@ -206,6 +167,135 @@ where
|
||||
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_sender_threads<T>(
|
||||
client: &Arc<T>,
|
||||
shared_txs: &SharedTransactions,
|
||||
thread_batch_sleep_ms: usize,
|
||||
total_tx_sent_count: &Arc<AtomicUsize>,
|
||||
threads: usize,
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
shared_tx_active_thread_count: &Arc<AtomicIsize>,
|
||||
) -> Vec<JoinHandle<()>>
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
(0..threads)
|
||||
.map(|_| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let shared_txs = shared_txs.clone();
|
||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||
let total_tx_sent_count = total_tx_sent_count.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sender".to_string())
|
||||
.spawn(move || {
|
||||
do_tx_transfers(
|
||||
&exit_signal,
|
||||
&shared_txs,
|
||||
&shared_tx_active_thread_count,
|
||||
&total_tx_sent_count,
|
||||
thread_batch_sleep_ms,
|
||||
&client,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
let Config {
|
||||
id,
|
||||
threads,
|
||||
thread_batch_sleep_ms,
|
||||
duration,
|
||||
tx_count,
|
||||
sustained,
|
||||
target_slots_per_epoch,
|
||||
..
|
||||
} = config;
|
||||
|
||||
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
||||
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
||||
assert!(gen_keypairs.len() >= 2 * tx_count);
|
||||
for chunk in gen_keypairs.chunks_exact(2 * tx_count) {
|
||||
source_keypair_chunks.push(chunk[..tx_count].iter().collect());
|
||||
dest_keypair_chunks.push(chunk[tx_count..].iter().collect());
|
||||
}
|
||||
|
||||
let first_tx_count = loop {
|
||||
match client.get_transaction_count() {
|
||||
Ok(count) => break count,
|
||||
Err(err) => {
|
||||
info!("Couldn't get transaction count: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
};
|
||||
info!("Initial transaction count {}", first_tx_count);
|
||||
|
||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
let sample_thread = create_sampler_thread(&client, &exit_signal, sample_period, &maxes);
|
||||
|
||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let blockhash_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let recent_blockhash = recent_blockhash.clone();
|
||||
let client = client.clone();
|
||||
let id = id.pubkey();
|
||||
Builder::new()
|
||||
.name("solana-blockhash-poller".to_string())
|
||||
.spawn(move || {
|
||||
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let s_threads = create_sender_threads(
|
||||
&client,
|
||||
&shared_txs,
|
||||
thread_batch_sleep_ms,
|
||||
&total_tx_sent_count,
|
||||
threads,
|
||||
&exit_signal,
|
||||
&shared_tx_active_thread_count,
|
||||
);
|
||||
|
||||
wait_for_target_slots_per_epoch(target_slots_per_epoch, &client);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
generate_chunked_transfers(
|
||||
recent_blockhash,
|
||||
&shared_txs,
|
||||
shared_tx_active_thread_count,
|
||||
source_keypair_chunks,
|
||||
&mut dest_keypair_chunks,
|
||||
threads,
|
||||
duration,
|
||||
sustained,
|
||||
libra_args,
|
||||
);
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
exit_signal.store(true, Ordering::Relaxed);
|
||||
@ -563,11 +653,9 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||
.par_iter()
|
||||
.map(|(k, t)| {
|
||||
let tx = Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
||||
&k.pubkey(),
|
||||
&t,
|
||||
));
|
||||
(*k, tx)
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(*k, Transaction::new_unsigned(message))
|
||||
})
|
||||
.collect();
|
||||
make_txs.stop();
|
||||
@ -603,7 +691,9 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let too_many_failures = Arc::new(AtomicBool::new(false));
|
||||
let loops = if starting_txs < 1000 { 3 } else { 1 };
|
||||
// Only loop multiple times for small (quick) transaction batches
|
||||
let time = Arc::new(Mutex::new(Instant::now()));
|
||||
for _ in 0..loops {
|
||||
let time = time.clone();
|
||||
let failed_verify = Arc::new(AtomicUsize::new(0));
|
||||
let client = client.clone();
|
||||
let verified_txs = &verified_txs;
|
||||
@ -634,11 +724,15 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
}
|
||||
if remaining_count % 100 == 0 {
|
||||
info!(
|
||||
"Verifying transfers... {} remaining, {} verified, {} failures",
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
if remaining_count > 0 {
|
||||
let mut time_l = time.lock().unwrap();
|
||||
if time_l.elapsed().as_secs() > 2 {
|
||||
info!(
|
||||
"Verifying transfers... {} remaining, {} verified, {} failures",
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
*time_l = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
verified
|
||||
@ -876,7 +970,7 @@ fn fund_move_keys<T: Client>(
|
||||
let libra_funding_key = Keypair::new();
|
||||
let tx = librapay_transaction::create_account(funding_key, &libra_funding_key, 1, blockhash);
|
||||
client
|
||||
.send_message(&[funding_key, &libra_funding_key], tx.message)
|
||||
.send_and_confirm_message(&[funding_key, &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("minting to funding keypair");
|
||||
@ -889,7 +983,7 @@ fn fund_move_keys<T: Client>(
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_message(&[funding_key, libra_genesis_key], tx.message)
|
||||
.send_and_confirm_message(&[funding_key, libra_genesis_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("creating {} move accounts...", keypairs.len());
|
||||
@ -911,7 +1005,7 @@ fn fund_move_keys<T: Client>(
|
||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
||||
let mut keys = vec![funding_key];
|
||||
keys.extend(&keypairs);
|
||||
client.send_message(&keys, tx.message).unwrap();
|
||||
client.send_and_confirm_message(&keys, tx.message).unwrap();
|
||||
|
||||
if i % 10 == 0 {
|
||||
info!(
|
||||
@ -929,12 +1023,12 @@ fn fund_move_keys<T: Client>(
|
||||
.iter()
|
||||
.map(|key| (key.pubkey(), total / NUM_FUNDING_KEYS as u64))
|
||||
.collect();
|
||||
let tx = Transaction::new_signed_instructions(
|
||||
&[funding_key],
|
||||
system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts),
|
||||
blockhash,
|
||||
);
|
||||
client.send_message(&[funding_key], tx.message).unwrap();
|
||||
let instructions = system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts);
|
||||
let message = Message::new(&instructions, Some(&funding_key.pubkey()));
|
||||
let tx = Transaction::new(&[funding_key], message, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key], tx.message)
|
||||
.unwrap();
|
||||
let mut balance = 0;
|
||||
for _ in 0..20 {
|
||||
if let Ok(balance_) = client
|
||||
@ -957,7 +1051,7 @@ fn fund_move_keys<T: Client>(
|
||||
for (i, key) in libra_funding_keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::create_account(&funding_keys[i], &key, 1, blockhash);
|
||||
client
|
||||
.send_message(&[&funding_keys[i], &key], tx.message)
|
||||
.send_and_confirm_message(&[&funding_keys[i], &key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
let tx = librapay_transaction::transfer(
|
||||
@ -970,7 +1064,7 @@ fn fund_move_keys<T: Client>(
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.send_and_confirm_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("funded libra funding key {}", i);
|
||||
@ -1059,8 +1153,8 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
// pay for the transaction fees in a new run.
|
||||
let enough_lamports = 8 * lamports_per_account / 10;
|
||||
if first_keypair_balance < enough_lamports || last_keypair_balance < enough_lamports {
|
||||
let (_blockhash, fee_calculator) = get_recent_blockhash(client.as_ref());
|
||||
let max_fee = fee_calculator.max_lamports_per_signature;
|
||||
let fee_rate_governor = client.get_fee_rate_governor().unwrap();
|
||||
let max_fee = fee_rate_governor.max_lamports_per_signature;
|
||||
let extra_fees = extra * max_fee;
|
||||
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||
let mut total = lamports_per_account * total_keypairs + extra_fees;
|
||||
@ -1134,7 +1228,7 @@ mod tests {
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::client::SyncClient;
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::genesis_config::create_genesis_config;
|
||||
|
||||
#[test]
|
||||
@ -1181,8 +1275,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_bench_tps_fund_keys_with_fees() {
|
||||
let (mut genesis_config, id) = create_genesis_config(10_000);
|
||||
let fee_calculator = FeeCalculator::new(11, 0);
|
||||
genesis_config.fee_calculator = fee_calculator;
|
||||
let fee_rate_governor = FeeRateGovernor::new(11, 0);
|
||||
genesis_config.fee_rate_governor = fee_rate_governor;
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let keypair_count = 20;
|
||||
|
@ -1,6 +1,6 @@
|
||||
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::signature::{read_keypair_file, Keypair};
|
||||
use std::{net::SocketAddr, process::exit, time::Duration};
|
||||
|
||||
@ -25,6 +25,7 @@ pub struct Config {
|
||||
pub multi_client: bool,
|
||||
pub use_move: bool,
|
||||
pub num_lamports_per_account: u64,
|
||||
pub target_slots_per_epoch: u64,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -43,10 +44,11 @@ impl Default for Config {
|
||||
client_ids_and_stake_file: String::new(),
|
||||
write_to_client_file: false,
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
||||
target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature,
|
||||
multi_client: true,
|
||||
use_move: false,
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
target_slots_per_epoch: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -172,6 +174,15 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
"Number of lamports per account.",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_slots_per_epoch")
|
||||
.long("target-slots-per-epoch")
|
||||
.value_name("SLOTS")
|
||||
.takes_value(true)
|
||||
.help(
|
||||
"Wait until epochs are this many slots long.",
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
/// Parses a clap `ArgMatches` structure into a `Config`
|
||||
@ -259,5 +270,12 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");
|
||||
}
|
||||
|
||||
if let Some(t) = matches.value_of("target_slots_per_epoch") {
|
||||
args.target_slots_per_epoch = t
|
||||
.to_string()
|
||||
.parse()
|
||||
.expect("can't parse target slots per epoch");
|
||||
}
|
||||
|
||||
args
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate
|
||||
use solana_bench_tps::cli;
|
||||
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_program;
|
||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
||||
@ -15,7 +15,7 @@ fn main() {
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
solana_metrics::set_panic_hook("bench-tps");
|
||||
|
||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
||||
let cli::Config {
|
||||
@ -41,7 +41,7 @@ fn main() {
|
||||
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
||||
let num_accounts = keypairs.len() as u64;
|
||||
let max_fee =
|
||||
FeeCalculator::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
||||
/ num_accounts
|
||||
+ num_lamports_per_account;
|
||||
@ -67,11 +67,10 @@ fn main() {
|
||||
}
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let (nodes, _archivers) =
|
||||
discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
1
book/.gitattributes
vendored
1
book/.gitattributes
vendored
@ -1 +0,0 @@
|
||||
theme/highlight.js binary
|
@ -1,26 +0,0 @@
|
||||
Building the Solana book
|
||||
---
|
||||
|
||||
Install the book's dependencies, build, and test the book:
|
||||
|
||||
```bash
|
||||
$ ./build.sh
|
||||
```
|
||||
|
||||
Run any Rust tests in the markdown:
|
||||
|
||||
```bash
|
||||
$ make test
|
||||
```
|
||||
|
||||
Render markdown as HTML:
|
||||
|
||||
```bash
|
||||
$ make build
|
||||
```
|
||||
|
||||
Render and view the book:
|
||||
|
||||
```bash
|
||||
$ make open
|
||||
```
|
@ -1,20 +0,0 @@
|
||||
|
||||
.----------------------------------------.
|
||||
| Solana Runtime |
|
||||
| |
|
||||
| .------------. .------------. |
|
||||
| | | | | |
|
||||
.-------->| Verifier +-->| Accounts | |
|
||||
| | | | | | |
|
||||
.----------. | | `------------` `------------` |
|
||||
| +--------` | ^ |
|
||||
| Client | | LoadAccounts | |
|
||||
| +--------. | .----------------` |
|
||||
`----------` | | | |
|
||||
| | .------+-----. .-------------. |
|
||||
| | | | | | |
|
||||
`-------->| Loader +-->| Interpreter | |
|
||||
| | | | | |
|
||||
| `------------` `-------------` |
|
||||
| |
|
||||
`----------------------------------------`
|
@ -1,22 +0,0 @@
|
||||
.--------.
|
||||
| Leader |
|
||||
`--------`
|
||||
^
|
||||
|
|
||||
.------------------------------------|--------------------.
|
||||
| TVU | |
|
||||
| | |
|
||||
| .-------. .------------. .----+---. .---------. |
|
||||
.------------. | | Shred | | Retransmit | | Replay | | Storage | |
|
||||
| Upstream +----->| Fetch +-->| Stage +-->| Stage +-->| Stage | |
|
||||
| Validators | | | Stage | | | | | | | |
|
||||
`------------` | `-------` `----+-------` `----+---` `---------` |
|
||||
| ^ | | |
|
||||
| | | | |
|
||||
`--------|----------|----------------|--------------------`
|
||||
| | |
|
||||
| V v
|
||||
.+-----------. .------.
|
||||
| Gossip | | Bank |
|
||||
| Service | `------`
|
||||
`------------`
|
@ -1,10 +0,0 @@
|
||||
[book]
|
||||
title = "Solana: Blockchain Rebuilt for Scale"
|
||||
authors = ["The Solana Team"]
|
||||
|
||||
[build]
|
||||
build-dir = "html"
|
||||
create-missing = false
|
||||
|
||||
[output.html]
|
||||
theme = "theme"
|
@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# md check
|
||||
find src -name '*.md' -a \! -name SUMMARY.md |
|
||||
while read -r file; do
|
||||
if ! grep -q '('"${file#src/}"')' src/SUMMARY.md; then
|
||||
echo "Error: $file missing from SUMMARY.md"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
make -j"$(nproc)" test
|
@ -1,43 +0,0 @@
|
||||
BOB_SRCS=$(wildcard art/*.bob)
|
||||
MSC_SRCS=$(wildcard art/*.msc)
|
||||
MD_SRCS=$(wildcard src/*.md src/*/*.md)
|
||||
|
||||
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/.gitbook/assets/%.svg) $(MSC_SRCS:art/%.msc=src/.gitbook/assets/%.svg)
|
||||
|
||||
TARGET=html/index.html
|
||||
TEST_STAMP=src/tests.ok
|
||||
|
||||
all: $(TARGET)
|
||||
|
||||
svg: $(SVG_IMGS)
|
||||
|
||||
test: $(TEST_STAMP)
|
||||
|
||||
open: $(TEST_STAMP)
|
||||
mdbook build --open
|
||||
|
||||
watch: $(SVG_IMGS)
|
||||
mdbook watch
|
||||
|
||||
src/.gitbook/assets/%.svg: art/%.bob
|
||||
@mkdir -p $(@D)
|
||||
svgbob < $< > $@
|
||||
|
||||
src/.gitbook/assets/%.svg: art/%.msc
|
||||
@mkdir -p $(@D)
|
||||
mscgen -T svg -i $< -o $@
|
||||
|
||||
src/%.md: %.md
|
||||
@mkdir -p $(@D)
|
||||
@cp $< $@
|
||||
|
||||
$(TEST_STAMP): $(TARGET)
|
||||
mdbook test
|
||||
touch $@
|
||||
|
||||
$(TARGET): $(SVG_IMGS) $(MD_SRCS)
|
||||
mdbook build
|
||||
|
||||
clean:
|
||||
rm -f $(SVG_IMGS) src/tests.ok
|
||||
rm -rf html
|
@ -1,44 +0,0 @@
|
||||
# Programming Model
|
||||
|
||||
An _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to user-contributed _programs_. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transfered. Instructions are executed sequentially and atomically. If any instruction is invalid, any changes made within the transaction are discarded.
|
||||
|
||||
### Accounts and Signatures
|
||||
|
||||
Each transaction explicitly lists all account public keys referenced by the transaction's instructions. A subset of those public keys are each accompanied by a transaction signature. Those signatures signal on-chain programs that the account holder has authorized the transaction. Typically, the program uses the authorization to permit debiting the account or modifying its data.
|
||||
|
||||
The transaction also marks some accounts as _read-only accounts_. The runtime permits read-only accounts to be read concurrently. If a program attempts to modify a read-only account, the transaction is rejected by the runtime.
|
||||
|
||||
### Recent Blockhash
|
||||
|
||||
A Transaction includes a recent blockhash to prevent duplication and to give transactions lifetimes. Any transaction that is completely identical to a previous one is rejected, so adding a newer blockhash allows multiple transactions to repeat the exact same action. Transactions also have lifetimes that are defined by the blockhash, as any transaction whose blockhash is too old will be rejected.
|
||||
|
||||
### Instructions
|
||||
|
||||
Each instruction specifies a single program account \(which must be marked executable\), a subset of the transaction's accounts that should be passed to the program, and a data byte array instruction that is passed to the program. The program interprets the data array and operates on the accounts specified by the instructions. The program can return successfully, or with an error code. An error return causes the entire transaction to fail immediately.
|
||||
|
||||
## Deploying Programs to a Cluster
|
||||
|
||||

|
||||
|
||||
As shown in the diagram above a client creates a program and compiles it to an ELF shared object containing BPF bytecode and sends it to the Solana cluster. The cluster stores the program locally and makes it available to clients via a _program ID_. The program ID is a _public key_ generated by the client and is used to reference the program in subsequent transactions.
|
||||
|
||||
A program may be written in any programming language that can target the Berkley Packet Filter \(BPF\) safe execution environment. The Solana SDK offers the best support for C programs, which is compiled to BPF using the [LLVM compiler infrastructure](https://llvm.org).
|
||||
|
||||
## Storing State between Transactions
|
||||
|
||||
If the program needs to store state between transactions, it does so using _accounts_. Accounts are similar to files in operating systems such as Linux. Like a file, an account may hold arbitrary data and that data persists beyond the lifetime of a program. Also like a file, an account includes metadata that tells the runtime who is allowed to access the data and how. Unlike a file, the account includes metadata for the lifetime of the file. That lifetime is expressed in "tokens", which is a number of fractional native tokens, called _lamports_. Accounts are held in validator memory and pay "rent" to stay there. Each validator periodically scan all accounts and collects rent. Any account that drops to zero lamports is purged.
|
||||
|
||||
If an account is marked "executable", it will only be used by a _loader_ to run programs. For example, a BPF-compiled program is marked executable and loaded by the BPF loader. No program is allowed to modify the contents of an executable account.
|
||||
|
||||
An account also includes "owner" metadata. The owner is a program ID. The runtime grants the program write access to the account if its ID matches the owner. If an account is not owned by a program, the program is permitted to read its data and credit the account.
|
||||
|
||||
In the same way that a Linux user uses a path to look up a file, a Solana client uses public keys to look up accounts. To create an account, the client generates a _keypair_ and registers its public key using the `CreateAccount` instruction. The account created by `CreateAccount` is called a _system account_ and is owned by a built-in program called the System program. The System program allows clients to transfer lamports and assign account ownership.
|
||||
|
||||
The runtime only permits the owner to debit the account or modify its data. The program then defines additional rules for whether the client can modify accounts it owns. In the case of the System program, it allows users to transfer lamports by recognizing transaction signatures. If it sees the client signed the transaction using the keypair's _private key_, it knows the client authorized the token transfer.
|
||||
|
||||
After the runtime executes each of the transaction's instructions, it uses the account metadata to verify that none of the access rules were violated. If a program violates an access rule, the runtime discards all account changes made by all instructions and marks the transaction as failed.
|
||||
|
||||
## Smart Contracts
|
||||
|
||||
Programs don't always require transaction signatures, as the System program does. Instead, the program may manage _smart contracts_. A smart contract is a set of constraints that once satisfied, signal to a program that a token transfer or account update is permitted. For example, one could use the Budget program to create a smart contract that authorizes a token transfer only after some date. Once evidence that the date has past, the contract progresses, and token transfer completes.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,22 +0,0 @@
|
||||
# Example: Tic-Tac-Toe
|
||||
|
||||
[Click here to play Tic-Tac-Toe](https://solana-example-tictactoe.herokuapp.com/) on the Solana testnet. Open the link and wait for another player to join, or open the link in a second browser tab to play against yourself. You will see that every move a player makes stores a transaction on the ledger.
|
||||
|
||||
## Build and run Tic-Tac-Toe locally
|
||||
|
||||
First fetch the latest release of the example code:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/example-tictactoe.git
|
||||
$ cd example-tictactoe
|
||||
$ TAG=$(git describe --tags $(git rev-list --tags
|
||||
--max-count=1))
|
||||
$ git checkout $TAG
|
||||
```
|
||||
|
||||
Next, follow the steps in the git repository's [README](https://github.com/solana-labs/example-tictactoe/blob/master/README.md).
|
||||
|
||||
## Getting lamports to users
|
||||
|
||||
You may have noticed you interacted with the Solana cluster without first needing to acquire lamports to pay transaction fees. Under the hood, the web app creates a new ephemeral identity and sends a request to an off-chain service for a signed transaction authorizing a user to start a new game. The service is called a _drone_. When the app sends the signed transaction to the Solana cluster, the drone's lamports are spent to pay the transaction fee and start the game. In a real world app, the drone might request the user watch an ad or pass a CAPTCHA before signing over its lamports.
|
||||
|
@ -1,5 +0,0 @@
|
||||
# Using Solana from the Command-line
|
||||
|
||||
This chapter describes the command-line tools for interacting with Solana. One
|
||||
could use these tools to send payments, stake validators, and check account
|
||||
balances.
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +0,0 @@
|
||||
# Implemented Design Proposals
|
||||
|
||||
The following design proposals are fully implemented.
|
||||
|
@ -1,15 +0,0 @@
|
||||
# Cluster Economics
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
Solana’s crypto-economic system is designed to promote a healthy, long term self-sustaining economy with participant incentives aligned to the security and decentralization of the network. The main participants in this economy are validation-clients and replication-clients. Their contributions to the network, state validation and data storage respectively, and their requisite incentive mechanisms are discussed below.
|
||||
|
||||
The main channels of participant remittances are referred to as protocol-based rewards and transaction fees. Protocol-based rewards are issuances from a global, protocol-defined, inflation rate. These rewards will constitute the total reward delivered to replication and validation clients, the remaining sourced from transaction fees. In the early days of the network, it is likely that protocol-based rewards, deployed based on predefined issuance schedule, will drive the majority of participant incentives to participate in the network.
|
||||
|
||||
These protocol-based rewards, to be distributed to participating validation and replication clients, are to be a result of a global supply inflation rate, calculated per Solana epoch and distributed amongst the active validator set. As discussed further below, the per annum inflation rate is based on a pre-determined disinflationary schedule. This provides the network with monetary supply predictability which supports long term economic stability and security.
|
||||
|
||||
Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction \(be it a state execution or proof-of-replication verification\). A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below.
|
||||
|
||||
A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics/) chapter will review the Solana network design for global ledger storage/redundancy and archiver-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with an archiver-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
|
||||
|
||||
**Figure 1**: Schematic overview of Solana economic incentive design.
|
@ -1,14 +0,0 @@
|
||||
# Attack Vectors
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
## Colluding validation and replication clients
|
||||
|
||||
A colluding validation-client, may take the strategy to mark PoReps from non-colluding archiver nodes as invalid as an attempt to maximize the rewards for the colluding archiver nodes. In this case, it isn’t feasible for the offended-against archiver nodes to petition the network for resolution as this would result in a network-wide vote on each offending PoRep and create too much overhead for the network to progress adequately. Also, this mitigation attempt would still be vulnerable to a >= 51% staked colluder.
|
||||
|
||||
Alternatively, transaction fees from submitted PoReps are pooled and distributed across validation-clients in proportion to the number of valid PoReps discounted by the number of invalid PoReps as voted by each validator-client. Thus invalid votes are directly dis-incentivized through this reward channel. Invalid votes that are revealed by archiver nodes as fishing PoReps, will not be discounted from the payout PoRep count.
|
||||
|
||||
Another collusion attack involves a validator-client who may take the strategy to ignore invalid PoReps from colluding archiver and vote them as valid. In this case, colluding archiver-clients would not have to store the data while still receiving rewards for validated PoReps. Additionally, colluding validator nodes would also receive rewards for validating these PoReps. To mitigate this attack, validators must randomly sample PoReps corresponding to the ledger block they are validating and because of this, there will be multiple validators that will receive the colluding archiver’s invalid submissions. These non-colluding validators will be incentivized to mark these PoReps as invalid as they have no way to determine whether the proposed invalid PoRep is actually a fishing PoRep, for which a confirmation vote would result in the validator’s stake being slashed.
|
||||
|
||||
In this case, the proportion of time a colluding pair will be successful has an upper limit determined by the % of stake of the network claimed by the colluding validator. This also sets bounds to the value of such an attack. For example, if a colluding validator controls 10% of the total validator stake, transaction fees will be lost \(likely sent to mining pool\) by the colluding archiver 90% of the time and so the attack vector is only profitable if the per-PoRep reward at least 90% higher than the average PoRep transaction fee. While, probabilistically, some colluding archiver-client PoReps will find their way to colluding validation-clients, the network can also monitor rates of paired \(validator + archiver\) discrepancies in voting patterns and censor identified colluders in these cases.
|
||||
|
@ -1,14 +0,0 @@
|
||||
# Economic Sustainability
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
Long term economic sustainability is one of the guiding principles of Solana’s economic design. While it is impossible to predict how decentralized economies will develop over time, especially economies with flexible decentralized governances, we can arrange economic components such that, under certain conditions, a sustainable economy may take shape in the long term. In the case of Solana’s network, these components take the form of token issuance \(via inflation\) and token burning’.
|
||||
|
||||
The dominant remittances from the Solana mining pool are validator and archiver rewards. The disinflationary mechanism is a flat, protocol-specified and adjusted, % of each transaction fee.
|
||||
|
||||
The Archiver rewards are to be delivered to archivers as a portion of the network inflation after successful PoRep validation. The per-PoRep reward amount is determined as a function of the total network storage redundancy at the time of the PoRep validation and the network goal redundancy. This function is likely to take the form of a discount from a base reward to be delivered when the network has achieved and maintained its goal redundancy. An example of such a reward function is shown in **Figure 3**
|
||||
|
||||
**Figure 3**: Example PoRep reward design as a function of global network storage redundancy.
|
||||
|
||||
In the example shown in Figure 1, multiple per PoRep base rewards are explored \(as a % of Tx Fee\) to be delivered when the global ledger replication redundancy meets 10X. When the global ledger replication redundancy is less than 10X, the base reward is discounted as a function of the square of the ratio of the actual ledger replication redundancy to the goal redundancy \(i.e. 10X\).
|
||||
|
@ -1,16 +0,0 @@
|
||||
# Economic Design MVP
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
The preceeding sections, outlined in the [Economic Design Overview](./), describe a long-term vision of a sustainable Solana economy. Of course, we don't expect the final implementation to perfectly match what has been described above. We intend to fully engage with network stakeholders throughout the implementation phases \(i.e. pre-testnet, testnet, mainnet\) to ensure the system supports, and is representative of, the various network participants' interests. The first step toward this goal, however, is outlining a some desired MVP economic features to be available for early pre-testnet and testnet participants. Below is a rough sketch outlining basic economic functionality from which a more complete and functional system can be developed.
|
||||
|
||||
## MVP Economic Features
|
||||
|
||||
* Faucet to deliver testnet SOLs to validators for staking and application development.
|
||||
* Mechanism by which validators are rewarded via network inflation.
|
||||
* Ability to delegate tokens to validator nodes
|
||||
* Validator set commission fees on interest from delegated tokens.
|
||||
* Archivers to receive fixed, arbitrary reward for submitting validated PoReps. Reward size mechanism \(i.e. PoRep reward as a function of total ledger redundancy\) to come later.
|
||||
* Pooling of archiver PoRep transaction fees and weighted distribution to validators based on PoRep verification \(see [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). It will be useful to test this protection against attacks on testnet.
|
||||
* Nice-to-have: auto-delegation of archiver rewards to validator.
|
||||
|
@ -1,6 +0,0 @@
|
||||
# Replication-client Economics
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
Replication-clients should be rewarded for providing the network with storage space. Incentivization of the set of archivers provides data security through redundancy of the historical ledger. Replication nodes are rewarded in proportion to the amount of ledger data storage provided, as proved by successfully submitting Proofs-of-Replication to the cluster.. These rewards are captured by generating and entering Proofs of Replication \(PoReps\) into the PoH stream which can be validated by Validation nodes as described above in the [Replication-validation Transaction Fees](../ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md) chapter.
|
||||
|
@ -1,8 +0,0 @@
|
||||
# Replication-client Reward Auto-delegation
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
The ability for Solana network participants to earn rewards by providing storage service is a unique on-boarding path that requires little hardware overhead and minimal upfront capital. It offers an avenue for individuals with extra-storage space on their home laptops or PCs to contribute to the security of the network and become integrated into the Solana economy.
|
||||
|
||||
To enhance this on-boarding ramp and facilitate further participation and investment in the Solana economy, replication-clients have the opportunity to auto-delegate their rewards to validation-clients of their choice. Much like the automatic reinvestment of stock dividends, in this scenario, an archiver-client can earn Solana tokens by providing some storage capacity to the network \(i.e. via submitting valid PoReps\), have the protocol-based rewards automatically assigned as delegation to a staked validator node of the archiver's choice and earn interest, less a fee, from the validation-client's network participation.
|
||||
|
@ -1,8 +0,0 @@
|
||||
# Storage-replication Rewards
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
Archiver-clients download, encrypt and submit PoReps for ledger block sections.3 PoReps submitted to the PoH stream, and subsequently validated, function as evidence that the submitting archiver client is indeed storing the assigned ledger block sections on local hard drive space as a service to the network. Therefore, archiver clients should earn protocol rewards proportional to the amount of storage, and the number of successfully validated PoReps, that they are verifiably providing to the network.
|
||||
|
||||
Additionally, archiver clients have the opportunity to capture a portion of slashed bounties \[TBD\] of dishonest validator clients. This can be accomplished by an archiver client submitting a verifiably false PoRep for which a dishonest validator client receives and signs as a valid PoRep. This reward incentive is to prevent lazy validators and minimize validator-archiver collusion attacks, more on this below.
|
||||
|
@ -1,18 +0,0 @@
|
||||
## Storage Rent Economics
|
||||
|
||||
Each transaction that is submitted to the Solana ledger imposes costs. Transaction fees paid by the submitter, and collected by a validator, in theory, account for the acute, transacitonal, costs of validating and adding that data to the ledger. At the same time, our compensation design for archivers (see [Replication-client Economics](ed_replication_client_economics.md)), in theory, accounts for the long term storage of the historical ledger. Unaccounted in this process is the mid-term storage of active ledger state, necessarily maintined by the rotating validator set. This type of storage imposes costs not only to validators but also to the broader network as active state grows so does data transmission and validation overhead. To account for these costs, we describe here our preliminary design and implementation of storage rent.
|
||||
|
||||
Storage rent can be paid via one of two methods:
|
||||
|
||||
Method 1: Set it and forget it
|
||||
|
||||
With this approach, accounts with two-years worth of rent deposits secured are exempt from network rent charges. By maintaining this minimum-balance, the broader network benefits from reduced liquitity and the account holder can trust that their `Account::data` will be retained for continual access/usage.
|
||||
|
||||
Method 2: Pay per byte
|
||||
|
||||
If an account has less than two-years worth of deposited rent the network charges rent on a per-epoch basis, in credit for the next epoch (but in arrears when necessary). This rent is deducted at a rate specified in genesis, in lamports per kilobyte-year.
|
||||
|
||||
For information on the technical implementation details of this design, see the [Rent](rent.md) section.
|
||||
|
||||
|
||||
|
@ -1,8 +0,0 @@
|
||||
# Validation-client Economics
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
Validator-clients are eligible to receive protocol-based \(i.e. inflation-based\) rewards issued via stake-based annual interest rates \(calculated per epoch\) by providing compute \(CPU+GPU\) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. The network is expected to launch with an annual inflation rate around 15%, set to decrease by 15% per year until a long-term stable rate of 1-2% is reached. These issuances are to be split and distributed to participating validators and archivers, with around 90% of the issued tokens allocated for validator rewards. Because the network will be distributing a fixed amount of inflation rewards across the stake-weighted valdiator set, any individual validator's interest rate will be a function of the amount of staked SOL in relation to the circulating SOL.
|
||||
|
||||
Additionally, validator clients may earn revenue through fees via state-validation transactions and Proof-of-Replication \(PoRep\) transactions. For clarity, we separately describe the design and motivation of these revenue distriubutions for validation-clients below: state-validation protocol-based rewards, state-validation transaction fees and rent, and PoRep-validation transaction fees.
|
||||
|
@ -1,11 +0,0 @@
|
||||
# Replication-validation Transaction Fees
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
As previously mentioned, validator-clients will also be responsible for validating PoReps submitted into the PoH stream by archiver-clients. In this case, validators are providing compute \(CPU/GPU\) and light storage resources to confirm that these replication proofs could only be generated by a client that is storing the referenced PoH leger block.
|
||||
|
||||
While replication-clients are incentivized and rewarded through protocol-based rewards schedule \(see [Replication-client Economics](../ed_replication_client_economics/)\), validator-clients will be incentivized to include and validate PoReps in PoH through collection of transaction fees associated with the submitted PoReps and distribution of protocol rewards proportional to the validated PoReps. As will be described in detail in the Section 3.1, replication-client rewards are protocol-based and designed to reward based on a global data redundancy factor. I.e. the protocol will incentivize replication-client participation through rewards based on a target ledger redundancy \(e.g. 10x data redundancy\).
|
||||
|
||||
The validation of PoReps by validation-clients is computationally more expensive than state-validation \(detail in the [Economic Sustainability](../ed_economic_sustainability.md) chapter\), thus the transaction fees are expected to be proportionally higher.
|
||||
|
||||
There are various attack vectors available for colluding validation and replication clients, also described in detail below in [Economic Sustainability](../ed_economic_sustainability/README.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the archivers challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\).
|
@ -1,30 +0,0 @@
|
||||
# State-validation Protocol-based Rewards
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
Validator-clients have two functional roles in the Solana network:
|
||||
|
||||
* Validate \(vote\) the current global state of that PoH along with any Proofs-of-Replication \(see [Replication Client Economics](../ed_replication_client_economics/)\) that they are eligible to validate.
|
||||
* Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and Proofs-of-Replication and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity.
|
||||
|
||||
Validator-client rewards for these services are to be distributed at the end of each Solana epoch. As previously discussed, compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator \(see below\) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed \(see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)\). PoRep transaction fees are also collected by the leader client and validator PoRep rewards are distributed in proportion to the number of validated PoReps less the number of PoReps that mismatch an archiver's challenge. \(see [Replication-client Transaction Fees](ed_vce_replication_validation_transaction_fees.md)\)
|
||||
|
||||
The effective protocol-based annual interest rate \(%\) per epoch received by validation-clients is to be a function of:
|
||||
|
||||
* the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule \(see [Validation-client Economics](.)\)
|
||||
* the fraction of staked SOLs out of the current total circulating supply,
|
||||
* the up-time/participation \[% of available slots that validator had opportunity to vote on\] of a given validator over the previous epoch.
|
||||
|
||||
The first factor is a function of protocol parameters only \(i.e. independent of validator behavior in a given epoch\) and results in a global validation reward schedule designed to incentivize early participation, provide clear montetary stability and provide optimal security in the network.
|
||||
|
||||
At any given point in time, a specific validator's interest rate can be determined based on the porportion of circulating supply that is staked by the network and the validator's uptime/activity in the previous epoch. For example, consider a hypothetical instance of the network with an initial circulating token supply of 250MM tokens with an additional 250MM vesting over 3 years. Additionally an inflation rate is specified at network launch of 7.5%, and a disinflationary schedule of 20% decrease in inflation rate per year \(the actual rates to be implemented are to be worked out during the testnet experimentation phase of mainnet launch\). With these broad assumptions, the 10-year inflation rate \(adjusted daily for this example\) is shown in **Figure 2**, while the total circulating token supply is illustrated in **Figure 3**. Neglected in this toy-model is the inflation supression due to the portion of each transaction fee that is to be destroyed.
|
||||
|
||||
 \*\*Figure 2:\*\* In this example schedule, the annual inflation rate \[%\] reduces at around 20% per year, until it reaches the long-term, fixed, 1.5% rate.
|
||||
|
||||
 \*\*Figure 3:\*\* The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in \*\*Figure 2\*\* Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabalize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for state-validation and ledger storage replication \(PoReps\) are not accounted for here. Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators and archiver nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assummed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in\*\* Figure 4\*\*.
|
||||
|
||||

|
||||
|
||||
**Figure 4:** Shown here are example validator interest rates over time, neglecting transaction fees, segmented by fraction of total circulating supply bonded as stake.
|
||||
|
||||
This epoch-specific protocol-defined interest rate sets an upper limit of _protocol-generated_ annual interest rate \(not absolute total interest rate\) possible to be delivered to any validator-client per epoch. The distributed interest rate per epoch is then discounted from this value based on the participation of the validator-client during the previous epoch.
|
@ -1,32 +0,0 @@
|
||||
# Validation Stake Delegation
|
||||
|
||||
**Subject to change.**
|
||||
|
||||
Running a Solana validation-client required relatively modest upfront hardware capital investment. **Table 2** provides an example hardware configuration to support ~1M tx/s with estimated ‘off-the-shelf’ costs:
|
||||
|
||||
| Component | Example | Estimated Cost |
|
||||
| :--- | :--- | :--- |
|
||||
| GPU | 2x 2080 Ti | $2500 |
|
||||
| or | 4x 1080 Ti | $2800 |
|
||||
| OS/Ledger Storage | Samsung 860 Evo 2TB | $370 |
|
||||
| Accounts storage | 2x Samsung 970 Pro M.2 512GB | $340 |
|
||||
| RAM | 32 Gb | $300 |
|
||||
| Motherboard | AMD x399 | $400 |
|
||||
| CPU | AMD Threadripper 2920x | $650 |
|
||||
| Case | | $100 |
|
||||
| Power supply | EVGA 1600W | $300 |
|
||||
| Network | > 500 mbps | |
|
||||
| Network \(1\) | Google webpass business bay area 1gbps unlimited | $5500/mo |
|
||||
| Network \(2\) | Hurricane Electric bay area colo 1gbps | $500/mo |
|
||||
|
||||
**Table 2** example high-end hardware setup for running a Solana client.
|
||||
|
||||
Despite the low-barrier to entry as a validation-client, from a capital investment perspective, as in any developing economy, there will be much opportunity and need for trusted validation services as evidenced by node reliability, UX/UI, APIs and other software accessibility tools. Additionally, although Solana’s validator node startup costs are nominal when compared to similar networks, they may still be somewhat restrictive for some potential participants. In the spirit of developing a true decentralized, permissionless network, these interested parties still have two options to become involved in the Solana network/economy:
|
||||
|
||||
1. Delegation of previously acquired tokens with a reliable validation node to earn a portion of interest generated
|
||||
2. Provide local storage space as a replication-client and receive rewards by submitting Proof-of-Replication \(see [Replication-client Economics](../ed_replication_client_economics/)\).
|
||||
|
||||
a. This participant has the additional option to directly delegate their earned storage rewards \([Replication-client Reward Auto-delegation](../ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md)\)
|
||||
|
||||
Delegation of tokens to validation-clients, via option 1, provides a way for passive Solana token holders to become part of the active Solana economy and earn interest rates proportional to the interest rate generated by the delegated validation-client. Additionally, this feature intends to create a healthy validation-client market, with potential validation-client nodes competing to build reliable, transparent and profitable delegation services.
|
||||
|
@ -1,52 +0,0 @@
|
||||
# Rent
|
||||
|
||||
Accounts on Solana may have owner-controlled state \(`Account::data`\) that's separate from the account's balance \(`Account::lamports`\). Since validators on the network need to maintain a working copy of this state in memory, the network charges a time-and-space based fee for this resource consumption, also known as Rent.
|
||||
|
||||
## Two-tiered rent regime
|
||||
|
||||
Accounts which maintain a minimum balance equivalent to 2 years of rent payments are exempt. Accounts whose balance falls below this threshold are charged rent at a rate specified in genesis, in lamports per kilobyte-year. The network charges rent on a per-epoch basis, in credit for the next epoch \(but in arrears when necessary\), and `Account::rent_epoch` keeps track of the next time rent should be collected from the account.
|
||||
|
||||
## Collecting rent
|
||||
|
||||
Rent is due at account creation time for one epoch's worth of time, and the new account has `Account::rent_epoch` of `current_epoch + 1`. After that, the bank deducts rent from accounts during normal transaction processing as part of the load phase.
|
||||
|
||||
If the account is in the exempt regime, `Account::rent_epoch` is simply pushed to `current_epoch + 1`.
|
||||
|
||||
If the account is non-exempt, the difference between the next epoch and `Account::rent_epoch` is used to calculate the amount of rent owed by this account \(via `Rent::due()`\). Any fractional lamports of the calculation are truncated. Rent due is deducted from `Account::lamports` and `Account::rent_epoch` is updated to the next epoch. If the amount of rent due is less than one lamport, no changes are made to the account.
|
||||
|
||||
Accounts whose balance is insufficient to satisfy the rent that would be due simply fail to load.
|
||||
|
||||
A percentage of the rent collected is destroyed. The rest is distributed to validator accounts by stake weight, a la transaction fees, at the end of every slot.
|
||||
|
||||
## Read-only accounts
|
||||
|
||||
Read-only accounts are not being charged rent in current implementation.
|
||||
|
||||
## Design considerations, others considered
|
||||
|
||||
Under this design, it is possible to have accounts that linger, never get touched, and never have to pay rent. `Noop` instructions that name these accounts can be used to "garbage collect", but it'd also be possible for accounts that never get touched to migrate out of a validator's working set, thereby reducing memory consumption and obviating the need to charge rent.
|
||||
|
||||
### Ad-hoc collection
|
||||
|
||||
Collecting rent on an as-needed basis \(i.e. whenever accounts were loaded/accessed\) was considered. The issues with such an approach are:
|
||||
|
||||
* accounts loaded as "credit only" for a transaction could very reasonably be expected to have rent due,
|
||||
|
||||
but would not be writable during any such transaction
|
||||
|
||||
* a mechanism to "beat the bushes" \(i.e. go find accounts that need to pay rent\) is desirable,
|
||||
|
||||
lest accounts that are loaded infrequently get a free ride
|
||||
|
||||
### System instruction for collecting rent
|
||||
|
||||
Collecting rent via a system instruction was considered, as it would naturally have distributed rent to active and stake-weighted nodes and could have been done incrementally. However:
|
||||
|
||||
* it would have adversely affected network throughput
|
||||
* it would require special-casing by the runtime, as accounts with non-SystemProgram owners may be debited by this instruction
|
||||
* someone would have to issue the transactions
|
||||
|
||||
### Account scans on every epoch
|
||||
|
||||
Scanning the entire Bank for accounts that owe rent at the beginning of each epoch was considered. This would have been an expensive operation, and would require that the entire current state of the network be present on every validator at the beginning of each epoch.
|
||||
|
@ -1,37 +0,0 @@
|
||||
# Introduction
|
||||
|
||||
## What is Solana?
|
||||
|
||||
Solana is an open source project implementing a new, high-performance, permissionless blockchain. Solana is also the name of a company headquartered in San Francisco that maintains the open source project.
|
||||
|
||||
## About this Book
|
||||
|
||||
This book describes the Solana open source project, a blockchain built from the ground up for scale. The book covers why Solana is useful, how to use it, how it works, and why it will continue to work long after the company Solana closes its doors. The goal of the Solana architecture is to demonstrate there exists a set of software algorithms that when used in combination to implement a blockchain, removes software as a performance bottleneck, allowing transaction throughput to scale proportionally with network bandwidth. The architecture goes on to satisfy all three desirable properties of a proper blockchain: it is scalable, secure and decentralized.
|
||||
|
||||
The architecture describes a theoretical upper bound of 710 thousand transactions per second \(tps\) on a standard gigabit network and 28.4 million tps on 40 gigabit. Furthermore, the architecture supports safe, concurrent execution of programs authored in general purpose programming languages such as C or Rust.
|
||||
|
||||
## Disclaimer
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore, nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
## History of the Solana Codebase
|
||||
|
||||
In November of 2017, Anatoly Yakovenko published a whitepaper describing Proof of History, a technique for keeping time between computers that do not trust one another. From Anatoly's previous experience designing distributed systems at Qualcomm, Mesosphere and Dropbox, he knew that a reliable clock makes network synchronization very simple. When synchronization is simple the resulting network can be blazing fast, bound only by network bandwidth.
|
||||
|
||||
Anatoly watched as blockchain systems without clocks, such as Bitcoin and Ethereum, struggled to scale beyond 15 transactions per second worldwide when centralized payment systems such as Visa required peaks of 65,000 tps. Without a clock, it was clear they'd never graduate to being the global payment system or global supercomputer most had dreamed them to be. When Anatoly solved the problem of getting computers that don’t trust each other to agree on time, he knew he had the key to bring 40 years of distributed systems research to the world of blockchain. The resulting cluster wouldn't be just 10 times faster, or a 100 times, or a 1,000 times, but 10,000 times faster, right out of the gate!
|
||||
|
||||
Anatoly's implementation began in a private codebase and was implemented in the C programming language. Greg Fitzgerald, who had previously worked with Anatoly at semiconductor giant Qualcomm Incorporated, encouraged him to reimplement the project in the Rust programming language. Greg had worked on the LLVM compiler infrastructure, which underlies both the Clang C/C++ compiler as well as the Rust compiler. Greg claimed that the language's safety guarantees would improve software productivity and that its lack of a garbage collector would allow programs to perform as well as those written in C. Anatoly gave it a shot and just two weeks later, had migrated his entire codebase to Rust. Sold. With plans to weave all the world's transactions together on a single, scalable blockchain, Anatoly called the project Loom.
|
||||
|
||||
On February 13th of 2018, Greg began prototyping the first open source implementation of Anatoly's whitepaper. The project was published to GitHub under the name Silk in the loomprotocol organization. On February 28th, Greg made his first release, demonstrating 10 thousand signed transactions could be verified and processed in just over half a second. Shortly after, another former Qualcomm cohort, Stephen Akridge, demonstrated throughput could be massively improved by offloading signature verification to graphics processors. Anatoly recruited Greg, Stephen and three others to co-found a company, then called Loom.
|
||||
|
||||
Around the same time, Ethereum-based project Loom Network sprung up and many people were confused about whether they were the same project. The Loom team decided it would rebrand. They chose the name Solana, a nod to a small beach town North of San Diego called Solana Beach, where Anatoly, Greg and Stephen lived and surfed for three years when they worked for Qualcomm. On March 28th, the team created the Solana Labs GitHub organization and renamed Greg's prototype Silk to Solana.
|
||||
|
||||
In June of 2018, the team scaled up the technology to run on cloud-based networks and on July 19th, published a 50-node, permissioned, public testnet consistently supporting bursts of 250,000 transactions per second. In a later release in December, called v0.10 Pillbox, the team published a permissioned testnet running 150 nodes on a gigabit network and demonstrated soak tests processing an _average_ of 200 thousand transactions per second with bursts over 500 thousand. The project was also extended to support on-chain programs written in the C programming language and run concurrently in a safe execution environment called BPF.
|
||||
|
||||
## What is a Solana Cluster?
|
||||
|
||||
A cluster is a set of computers that work together and can be viewed from the outside as a single system. A Solana cluster is a set of independently owned computers working together \(and sometimes against each other\) to verify the output of untrusted, user-submitted programs. A Solana cluster can be utilized any time a user wants to preserve an immutable record of events in time or programmatic interpretations of those events. One use is to track which of the computers did meaningful work to keep the cluster running. Another use might be to track the possession of real-world assets. In each case, the cluster produces a record of events called the ledger. It will be preserved for the lifetime of the cluster. As long as someone somewhere in the world maintains a copy of the ledger, the output of its programs \(which may contain a record of who possesses what\) will forever be reproducible, independent of the organization that launched it.
|
||||
|
||||
## What are SOLs?
|
||||
|
||||
A SOL is the name of Solana's native token, which can be passed to nodes in a Solana cluster in exchange for running an on-chain program or validating its output. The system may perform micropayments of fractional SOLs, which are called _lamports_. They are named in honor of Solana's biggest technical influence, [Leslie Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value of 0.000000001 SOL.
|
@ -1,85 +0,0 @@
|
||||
# Offline Transaction Signing
|
||||
|
||||
Some security models require keeping signing keys, and thus the signing
|
||||
process, separated from transaction creation and network broadcast. Examples
|
||||
include:
|
||||
* Collecting signatures from geographically disparate signers in a
|
||||
[multi-signature scheme](../api-reference/cli.md#multiple-witnesses)
|
||||
* Signing transactions using an [airgapped](https://en.wikipedia.org/wiki/Air_gap_(networking))
|
||||
signing device
|
||||
|
||||
This document describes using Solana's CLI to separately sign and submit a
|
||||
transaction.
|
||||
|
||||
## Commands Supporting Offline Signing
|
||||
|
||||
At present, the following commands support offline signing:
|
||||
* [`delegate-stake`](../api-reference/cli.md#solana-delegate-stake)
|
||||
* [`deactivate-stake`](../api-reference/cli.md#solana-deactivate-stake)
|
||||
* [`pay`](../api-reference/cli.md#solana-pay)
|
||||
|
||||
## Signing Transactions Offline
|
||||
|
||||
To sign a transaction offline, pass the following arguments on the command line
|
||||
1) `--sign-only`, prevents the client from submitting the signed transaction
|
||||
to the network. Instead, the pubkey/signature pairs are printed to stdout.
|
||||
2) `--blockhash BASE58_HASH`, allows the caller to specify the value used to
|
||||
fill the transaction's `recent_blockhash` field. This serves a number of
|
||||
purposes, namely:
|
||||
* Eliminates the need to connect to the network and query a recent blockhash
|
||||
via RPC
|
||||
* Enables the signers to coordinate the blockhash in a multiple-signature
|
||||
scheme
|
||||
|
||||
### Example: Offline Signing a Payment
|
||||
|
||||
Command
|
||||
|
||||
```bash
|
||||
solana@offline$ solana pay --sign-only --blockhash 5Tx8F3jgSHx21CbtjwmdaKPLM5tWmreWAnPrbqHomSJF \
|
||||
recipient-keypair.json 1
|
||||
```
|
||||
|
||||
Output
|
||||
|
||||
```text
|
||||
|
||||
Blockhash: 5Tx8F3jgSHx21CbtjwmdaKPLM5tWmreWAnPrbqHomSJF
|
||||
Signers (Pubkey=Signature):
|
||||
FhtzLVsmcV7S5XqGD79ErgoseCLhZYmEZnz9kQg1Rp7j=4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN
|
||||
|
||||
{"blockhash":"5Tx8F3jgSHx21CbtjwmdaKPLM5tWmreWAnPrbqHomSJF","signers":["FhtzLVsmcV7S5XqGD79ErgoseCLhZYmEZnz9kQg1Rp7j=4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN"]}'
|
||||
```
|
||||
|
||||
## Submitting Offline Signed Transactions to the Network
|
||||
|
||||
To submit a transaction that has been signed offline to the network, pass the
|
||||
following arguments on the command line
|
||||
1) `--blockhash BASE58_HASH`, must be the same blockhash as was used to sign
|
||||
2) `--signer BASE58_PUBKEY=BASE58_SIGNATURE`, one for each offline signer. This
|
||||
includes the pubkey/signature pairs directly in the transaction rather than
|
||||
signing it with any local keypair(s)
|
||||
|
||||
### Example: Submitting an Offline Signed Payment
|
||||
|
||||
Command
|
||||
|
||||
```bash
|
||||
solana@online$ solana pay --blockhash 5Tx8F3jgSHx21CbtjwmdaKPLM5tWmreWAnPrbqHomSJF \
|
||||
--signer FhtzLVsmcV7S5XqGD79ErgoseCLhZYmEZnz9kQg1Rp7j=4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN
|
||||
recipient-keypair.json 1
|
||||
```
|
||||
|
||||
Output
|
||||
|
||||
```text
|
||||
4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN
|
||||
```
|
||||
|
||||
## Buying More Time to Sign
|
||||
|
||||
Typically a Solana transaction must be signed and accepted by the network within
|
||||
a number of slots from the blockhash in its `recent_blockhash` field (~2min at
|
||||
the time of this writing). If your signing procedure takes longer than this, a
|
||||
[Durable Transaction Nonce](durable-nonce.md) can give you the extra time you
|
||||
need.
|
@ -1,22 +0,0 @@
|
||||
# Paper Wallet
|
||||
|
||||
This document describes how to create and use a paper wallet with the Solana CLI
|
||||
tools.
|
||||
|
||||
{% hint style="info" %}
|
||||
We do not intend to advise on how to *securely* create or manage paper wallets.
|
||||
Please research the security concerns carefully.
|
||||
{% endhint %}
|
||||
|
||||
## Overview
|
||||
|
||||
Solana provides a key generation tool to derive keys from BIP39 compliant seed
|
||||
phrases. Solana CLI commands for running a validator and staking tokens all
|
||||
support keypair input via seed phrases.
|
||||
|
||||
To learn more about the BIP39 standard, visit the Bitcoin BIPs Github repository
|
||||
[here](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki).
|
||||
|
||||
{% page-ref page="installation.md" %}
|
||||
|
||||
{% page-ref page="usage.md" %}
|
@ -1,51 +0,0 @@
|
||||
# Installation Guide
|
||||
Follow this guide to setup Solana's key generation tool called `solana-keygen`
|
||||
|
||||
{% hint style="warn" %}
|
||||
After installation, ensure your version is `0.23.1` or higher by running `solana-keygen -V`
|
||||
{% endhint %}
|
||||
|
||||
## Download
|
||||
First, download the latest release tarball from GitHub.
|
||||
|
||||
1. Setup download url
|
||||
|
||||
```bash
|
||||
solana_downloads=https://github.com/solana-labs/solana/releases/latest/download
|
||||
```
|
||||
|
||||
2. Specify the download file based on your machine
|
||||
|
||||
**MacOS**
|
||||
```bash
|
||||
solana_release=solana-release-x86_64-apple-darwin.tar.bz2
|
||||
```
|
||||
|
||||
**Linux**
|
||||
```bash
|
||||
solana_release=solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
```
|
||||
|
||||
3. Download
|
||||
|
||||
```bash
|
||||
curl -L -sSf -o solana-release.tar.bz2 $solana_downloads/$solana_release
|
||||
```
|
||||
|
||||
## Extract
|
||||
Next, extract the tarball
|
||||
```bash
|
||||
tar xf solana-release.tar.bz2
|
||||
```
|
||||
|
||||
## Add to "PATH"
|
||||
Now add the tool to your PATH environment variable with the following command
|
||||
```bash
|
||||
export PATH="$(pwd)/solana-release/bin:${PATH}"
|
||||
```
|
||||
|
||||
## Check
|
||||
Finally, check that `solana-keygen` can be run by running
|
||||
```bash
|
||||
solana-keygen -V
|
||||
```
|
@ -1,280 +0,0 @@
|
||||
# Paper Wallet Usage
|
||||
|
||||
Solana commands can be run without ever saving a keypair to disk on a machine.
|
||||
If avoiding writing a private key to disk is a security concern of yours, you've
|
||||
come to the right place.
|
||||
|
||||
{% hint style="warning" %}
|
||||
Even using this secure input method, it's still possible that a private key gets
|
||||
written to disk by unencrypted memory swaps. It is the user's responsibility to
|
||||
protect against this scenario.
|
||||
{% endhint %}
|
||||
|
||||
## Creating a Paper Wallet
|
||||
|
||||
Using the `solana-keygen` tool, it is possible to generate new seed phrases as
|
||||
well as derive a keypair from an existing seed phrase and (optional) passphrase.
|
||||
The seed phrase and passphrase can be used together as a paper wallet. As long
|
||||
as you keep your seed phrase and passphrase stored safely, you can use them to
|
||||
access your account.
|
||||
|
||||
{% hint style="info" %}
|
||||
For more information about how seed phrases work, review this
|
||||
[Bitcoin Wiki page](https://en.bitcoin.it/wiki/Seed_phrase).
|
||||
{% endhint %}
|
||||
|
||||
### Seed Phrase Generation
|
||||
|
||||
Generating a new keypair can be done using the `solana-keygen new` command. The
|
||||
command will generate a random seed phrase, ask you to enter an optional
|
||||
passphrase, and then will display the derived public key and the generated seed
|
||||
phrase for your paper wallet.
|
||||
|
||||
After copying down your seed phrase, you can use the
|
||||
[public key derivation](#public-key-derivation) instructions to verify that you
|
||||
have not made any errors.
|
||||
|
||||
```bash
|
||||
solana-keygen new --no-outfile
|
||||
```
|
||||
|
||||
{% hint style="warning" %}
|
||||
If the `--no-outfile` flag is **omitted**, the default behavior is to write the
|
||||
keypair to `~/.config/solana/id.json`
|
||||
{% endhint %}
|
||||
|
||||
{% hint style="info" %}
|
||||
For added security, increase the seed phrase word count using the `--word-count`
|
||||
argument
|
||||
{% endhint %}
|
||||
|
||||
For full usage details run:
|
||||
|
||||
```bash
|
||||
solana-keygen new --help
|
||||
```
|
||||
|
||||
### Public Key Derivation
|
||||
|
||||
Public keys can be derived from a seed phrase and a passphrase if you choose to
|
||||
use one. This is useful for using an offline-generated seed phrase to
|
||||
derive a valid public key. The `solana-keygen pubkey` command will walk you
|
||||
through entering your seed phrase and a passphrase if you chose to use one.
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ASK
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
Note that you could potentially use different passphrases for the same seed
|
||||
phrase. Each unique passphrase will yield a different keypair.
|
||||
{% endhint %}
|
||||
|
||||
The `solana-keygen` tool uses the same BIP39 standard English word list as it
|
||||
does to generate seed phrases. If your seed phrase was generated with another
|
||||
tool that uses a different word list, you can still use `solana-keygen`, but
|
||||
will need to pass the `--skip-seed-phrase-validation` argument and forego this
|
||||
validation.
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ASK --skip-seed-phrase-validation
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
Copy the derived public key to a USB stick for easy usage on networked computers
|
||||
{% endhint %}
|
||||
|
||||
{% hint style="info" %}
|
||||
A common next step is to [check the balance](#checking-account-balance) of the
|
||||
account associated with a public key
|
||||
{% endhint %}
|
||||
|
||||
For full usage details run:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey --help
|
||||
```
|
||||
|
||||
## Verifying the Keypair
|
||||
|
||||
A keypair can be verified by following a variation on the
|
||||
[offline signing](../offline-signing/README.md) procedure with a dummy transaction.
|
||||
|
||||
### Create and Sign a Dummy Transaction
|
||||
|
||||
Use offline signing to acquire the signature of a dummy transaction that can
|
||||
be verified in the next step. A 0 Lamport [transfer](../cli/usage.md#solana-transfer)
|
||||
is used to prevent inadvertent loss of funds. Additionally, an improbable _blockhash_
|
||||
value is specified, as well as using the address of the _system program_ for the `TO`
|
||||
argument, to ensure the transaction would be rejected by the _cluster_ should
|
||||
it be submitted in error.
|
||||
|
||||
Command
|
||||
|
||||
```text
|
||||
solana transfer 11111111111111111111111111111111 0 --sign-only \
|
||||
--ask-seed-phrase keypair --blockhash 11111111111111111111111111111111
|
||||
```
|
||||
|
||||
Prompt for seed phrase
|
||||
|
||||
```text
|
||||
[keypair] seed phrase:
|
||||
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
Recovered pubkey `AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi`. Continue? (y/n): y
|
||||
```
|
||||
|
||||
Output
|
||||
|
||||
```text
|
||||
Blockhash: 11111111111111111111111111111111
|
||||
Signers (Pubkey=Signature):
|
||||
AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA
|
||||
|
||||
{"blockhash":"11111111111111111111111111111111","signers":["AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA"]}
|
||||
```
|
||||
|
||||
### Verify the Signature
|
||||
|
||||
Using the _Signers_ output from the [previous step](#create-and-sign-a-dummy-transaction)
|
||||
to reconstruct the transaction, this time specifying the _pubkey_ and _signature_
|
||||
as in the submission step of [offline signing](../offline-signing/README.md). That is, the `--from` and
|
||||
`--fee-payer` are explicitly set to the _pubkey_ rather than being taken from
|
||||
the keypair (which is not queried this time).
|
||||
|
||||
Command
|
||||
|
||||
```text
|
||||
solana transfer 11111111111111111111111111111111 0 --sign-only --from AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi \
|
||||
--signer AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA \
|
||||
--blockhash 11111111111111111111111111111111 --fee-payer AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi
|
||||
```
|
||||
|
||||
Output
|
||||
|
||||
```text
|
||||
Blockhash: 11111111111111111111111111111111
|
||||
Signers (Pubkey=Signature):
|
||||
AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA
|
||||
|
||||
{"blockhash":"11111111111111111111111111111111","signers":["AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA"]}
|
||||
```
|
||||
|
||||
### An Example of Failure
|
||||
|
||||
To simulate an error the [verification step](#verify-the-signature) is repeated,
|
||||
but with a corrupted _signature_ (the last letter is changed from "A" to "B").
|
||||
|
||||
Command
|
||||
|
||||
```text
|
||||
solana transfer 11111111111111111111111111111111 0 --sign-only --from AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi \
|
||||
--signer AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWB \
|
||||
--blockhash 11111111111111111111111111111111 --fee-payer AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi
|
||||
```
|
||||
|
||||
Output (Error)
|
||||
|
||||
```text
|
||||
Error: BadParameter("Transaction construction failed, incorrect signature or public key provided")
|
||||
```
|
||||
|
||||
## Checking Account Balance
|
||||
|
||||
All that is needed to check an account balance is the public key of an account.
|
||||
To retrieve public keys securely from a paper wallet, follow the
|
||||
[Public Key Derivation](#public-key-derivation) instructions on an
|
||||
[air gapped computer](https://en.wikipedia.org/wiki/Air_gap_\(networking\)).
|
||||
Public keys can then be typed manually or transferred via a USB stick to a
|
||||
networked machine.
|
||||
|
||||
Next, configure the `solana` CLI tool to connect to a particular cluster:
|
||||
|
||||
```bash
|
||||
solana config set --url <CLUSTER URL> # (i.e. http://devnet.solana.com:8899)
|
||||
```
|
||||
|
||||
Finally, to check the balance, run the following command:
|
||||
|
||||
```bash
|
||||
solana balance <PUBKEY>
|
||||
```
|
||||
|
||||
In order to check a list of public keys quickly, append public keys to a file,
|
||||
one per line, like so:
|
||||
|
||||
`public_keys.txt`
|
||||
```bash
|
||||
7hTw3XhprjT2DkVxVixtig9eZwHTZ2rksTSYN7Jh5niZ
|
||||
9ufAiSyboCZmmEsoStgLYQfnx9KfqP1ZtDK8Wr1j8SJV
|
||||
# ...
|
||||
```
|
||||
|
||||
And run the following command:
|
||||
```bash
|
||||
while read PUBLIC_KEY;
|
||||
do echo "$PUBLIC_KEY: $(solana balance "$PUBLIC_KEY" | tail -n1)";
|
||||
done < public_keys.txt
|
||||
```
|
||||
|
||||
## Running a Validator
|
||||
|
||||
In order to run a validator, you will need to specify an "identity keypair"
|
||||
which will be used to fund all of the vote transactions signed by your validator.
|
||||
Rather than specifying a path with `--identity-keypair <PATH>` you can use the
|
||||
`--ask-seed-phrase` option.
|
||||
|
||||
```bash
|
||||
solana-validator --ask-seed-phrase identity-keypair --ledger ...
|
||||
|
||||
[identity-keypair] seed phrase: 🔒
|
||||
[identity-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
The `--ask-seed-phrase` option accepts multiple keypairs. If you wish to use this
|
||||
input method for your voting keypair as well you can do the following:
|
||||
|
||||
```bash
|
||||
solana-validator --ask-seed-phrase identity-keypair voting-keypair --ledger ...
|
||||
|
||||
[identity-keypair] seed phrase: 🔒
|
||||
[identity-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
[voting-keypair] seed phrase: 🔒
|
||||
[voting-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Refer to the following page for a comprehensive guide on running a validator:
|
||||
{% page-ref page="../running-validator/README.md" %}
|
||||
|
||||
## Delegating Stake
|
||||
|
||||
Solana CLI tooling supports secure keypair input for stake delegation. To do so,
|
||||
first create a stake account with some SOL. Use the special `ASK` keyword to
|
||||
trigger a seed phrase input prompt for the stake account and use
|
||||
`--keypair ASK` to securely input the funding keypair.
|
||||
|
||||
```bash
|
||||
solana create-stake-account ASK 1 --keypair ASK
|
||||
|
||||
[stake_account] seed phrase: 🔒
|
||||
[stake_account] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
[keypair] seed phrase: 🔒
|
||||
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Then, to delegate that stake to a validator, use `--keypair ASK` to
|
||||
securely input the funding keypair.
|
||||
|
||||
```bash
|
||||
solana delegate-stake --keypair ASK <STAKE_ACCOUNT_PUBKEY> <VOTE_ACCOUNT_PUBKEY>
|
||||
|
||||
[keypair] seed phrase: 🔒
|
||||
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Refer to the following page for a comprehensive guide on delegating stake:
|
||||
{% page-ref page="../running-validator/validator-stake.md" %}
|
||||
|
||||
---
|
||||
|
||||
{% page-ref page="../api-reference/cli.md" %}
|
@ -1,4 +0,0 @@
|
||||
# Accepted Design Proposals
|
||||
|
||||
The following architectural proposals have been accepted by the Solana team, but are not yet fully implemented. The proposals may be implemented as described, implemented differently as issues in the designs become evident, or not implemented at all. If implemented, the descriptions will be moved from this section to earlier chapters in a future version of this book.
|
||||
|
@ -1,71 +0,0 @@
|
||||
# Cross-Program Invocation
|
||||
|
||||
## Problem
|
||||
|
||||
In today's implementation a client can create a transaction that modifies two accounts, each owned by a separate on-chain program:
|
||||
|
||||
```text
|
||||
let message = Message::new(vec![
|
||||
token_instruction::pay(&alice_pubkey),
|
||||
acme_instruction::launch_missiles(&bob_pubkey),
|
||||
]);
|
||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||
```
|
||||
|
||||
The current implementation does not, however, allow the `acme` program to conveniently invoke `token` instructions on the client's behalf:
|
||||
|
||||
```text
|
||||
let message = Message::new(vec![
|
||||
acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey),
|
||||
]);
|
||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||
```
|
||||
|
||||
Currently, there is no way to create instruction `pay_and_launch_missiles` that executes `token_instruction::pay` from the `acme` program. The workaround is to extend the `acme` program with the implementation of the `token` program, and create `token` accounts with `ACME_PROGRAM_ID`, which the `acme` program is permitted to modify. With that workaround, `acme` can modify token-like accounts created by the `acme` program, but not token accounts created by the `token` program.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
The goal of this design is to modify Solana's runtime such that an on-chain program can invoke an instruction from another program.
|
||||
|
||||
Given two on-chain programs `token` and `acme`, each implementing instructions `pay()` and `launch_missiles()` respectively, we would ideally like to implement the `acme` module with a call to a function defined in the `token` module:
|
||||
|
||||
```text
|
||||
use token;
|
||||
|
||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
...
|
||||
}
|
||||
|
||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
token::pay(&keyed_accounts[1..])?;
|
||||
|
||||
launch_missiles(keyed_accounts)?;
|
||||
}
|
||||
```
|
||||
|
||||
The above code would require that the `token` crate be dynamically linked, so that a custom linker could intercept calls and validate accesses to `keyed_accounts`. That is, even though the client intends to modify both `token` and `acme` accounts, only `token` program is permitted to modify the `token` account, and only the `acme` program is permitted to modify the `acme` account.
|
||||
|
||||
Backing off from that ideal cross-program call, a slightly more verbose solution is to expose token's existing `process_instruction()` entrypoint to the acme program:
|
||||
|
||||
```text
|
||||
use token_instruction;
|
||||
|
||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
...
|
||||
}
|
||||
|
||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
let alice_pubkey = keyed_accounts[1].key;
|
||||
let instruction = token_instruction::pay(&alice_pubkey);
|
||||
process_instruction(&instruction)?;
|
||||
|
||||
launch_missiles(keyed_accounts)?;
|
||||
}
|
||||
```
|
||||
|
||||
where `process_instruction()` is built into Solana's runtime and responsible for routing the given instruction to the `token` program via the instruction's `program_id` field. Before invoking `pay()`, the runtime must also ensure that `acme` didn't modify any accounts owned by `token`. It does this by calling `runtime::verify_account_changes()` and then afterward updating all the `pre_*` variables to tentatively commit `acme`'s account modifications. After `pay()` completes, the runtime must again ensure that `token` didn't modify any accounts owned by `acme`. It should call `verify_account_changes()` again, but this time with the `token` program ID. Lastly, after `pay_and_launch_missiles()` completes, the runtime must call `verify_account_changes()` one more time, where it normally would, but using all updated `pre_*` variables. If executing `pay_and_launch_missiles()` up to `pay()` made no invalid account changes, `pay()` made no invalid changes, and executing from `pay()` until `pay_and_launch_missiles()` returns made no invalid changes, then the runtime can transitively assume `pay_and_launch_missiles()` as whole made no invalid account changes, and therefore commit all account modifications.
|
||||
|
||||
### Setting `KeyedAccount.is_signer`
|
||||
|
||||
When `process_instruction()` is invoked, the runtime must create a new `KeyedAccounts` parameter using the signatures from the _original_ transaction data. Since the `token` program is immutable and existed on-chain prior to the `acme` program, the runtime can safely treat the transaction signature as a signature of a transaction with a `token` instruction. When the runtime sees the given instruction references `alice_pubkey`, it looks up the key in the transaction to see if that key corresponds to a transaction signature. In this case it does and so sets `KeyedAccount.is_signer`, thereby authorizing the `token` program to modify Alice's account.
|
||||
|
@ -1,137 +0,0 @@
|
||||
# Ledger Replication
|
||||
|
||||
Replication behavior yet to be implemented.
|
||||
|
||||
## Storage epoch
|
||||
|
||||
The storage epoch should be the number of slots which results in around 100GB-1TB of ledger to be generated for archivers to store. Archivers will start storing ledger when a given fork has a high probability of not being rolled back.
|
||||
|
||||
## Validator behavior
|
||||
|
||||
1. Every NUM\_KEY\_ROTATION\_TICKS it also validates samples received from
|
||||
|
||||
archivers. It signs the PoH hash at that point and uses the following
|
||||
|
||||
algorithm with the signature as the input:
|
||||
|
||||
* The low 5 bits of the first byte of the signature creates an index into
|
||||
|
||||
another starting byte of the signature.
|
||||
|
||||
* The validator then looks at the set of storage proofs where the byte of
|
||||
|
||||
the proof's sha state vector starting from the low byte matches exactly
|
||||
|
||||
with the chosen byte\(s\) of the signature.
|
||||
|
||||
* If the set of proofs is larger than the validator can handle, then it
|
||||
|
||||
increases to matching 2 bytes in the signature.
|
||||
|
||||
* Validator continues to increase the number of matching bytes until a
|
||||
|
||||
workable set is found.
|
||||
|
||||
* It then creates a mask of valid proofs and fake proofs and sends it to
|
||||
|
||||
the leader. This is a storage proof confirmation transaction.
|
||||
|
||||
2. After a lockout period of NUM\_SECONDS\_STORAGE\_LOCKOUT seconds, the
|
||||
|
||||
validator then submits a storage proof claim transaction which then causes the
|
||||
|
||||
distribution of the storage reward if no challenges were seen for the proof to
|
||||
|
||||
the validators and archivers party to the proofs.
|
||||
|
||||
## Archiver behavior
|
||||
|
||||
1. The archiver then generates another set of offsets which it submits a fake
|
||||
|
||||
proof with an incorrect sha state. It can be proven to be fake by providing the
|
||||
|
||||
seed for the hash result.
|
||||
|
||||
* A fake proof should consist of an archiver hash of a signature of a PoH
|
||||
|
||||
value. That way when the archiver reveals the fake proof, it can be
|
||||
|
||||
verified on chain.
|
||||
|
||||
2. The archiver monitors the ledger, if it sees a fake proof integrated, it
|
||||
|
||||
creates a challenge transaction and submits it to the current leader. The
|
||||
|
||||
transacation proves the validator incorrectly validated a fake storage proof.
|
||||
|
||||
The archiver is rewarded and the validator's staking balance is slashed or
|
||||
|
||||
frozen.
|
||||
|
||||
## Storage proof contract logic
|
||||
|
||||
Each archiver and validator will have their own storage account. The validator's account would be separate from their gossip id similiar to their vote account. These should be implemented as two programs one which handles the validator as the keysigner and one for the archiver. In that way when the programs reference other accounts, they can check the program id to ensure it is a validator or archiver account they are referencing.
|
||||
|
||||
### SubmitMiningProof
|
||||
|
||||
```text
|
||||
SubmitMiningProof {
|
||||
slot: u64,
|
||||
sha_state: Hash,
|
||||
signature: Signature,
|
||||
};
|
||||
keys = [archiver_keypair]
|
||||
```
|
||||
|
||||
Archivers create these after mining their stored ledger data for a certain hash value. The slot is the end slot of the segment of ledger they are storing, the sha\_state the result of the archiver using the hash function to sample their encrypted ledger segment. The signature is the signature that was created when they signed a PoH value for the current storage epoch. The list of proofs from the current storage epoch should be saved in the account state, and then transfered to a list of proofs for the previous epoch when the epoch passes. In a given storage epoch a given archiver should only submit proofs for one segment.
|
||||
|
||||
The program should have a list of slots which are valid storage mining slots. This list should be maintained by keeping track of slots which are rooted slots in which a significant portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT number of slots would be added to this set. The program should check that the slot is in this set. The set can be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state.
|
||||
|
||||
The program should do a signature verify check on the signature, public key from the transaction submitter and the message of the previous storage epoch PoH value.
|
||||
|
||||
### ProofValidation
|
||||
|
||||
```text
|
||||
ProofValidation {
|
||||
proof_mask: Vec<ProofStatus>,
|
||||
}
|
||||
keys = [validator_keypair, archiver_keypair(s) (unsigned)]
|
||||
```
|
||||
|
||||
A validator will submit this transaction to indicate that a set of proofs for a given segment are valid/not-valid or skipped where the validator did not look at it. The keypairs for the archivers that it looked at should be referenced in the keys so the program logic can go to those accounts and see that the proofs are generated in the previous epoch. The sampling of the storage proofs should be verified ensuring that the correct proofs are skipped by the validator according to the logic outlined in the validator behavior of sampling.
|
||||
|
||||
The included archiver keys will indicate the the storage samples which are being referenced; the length of the proof\_mask should be verified against the set of storage proofs in the referenced archiver account\(s\), and should match with the number of proofs submitted in the previous storage epoch in the state of said archiver account.
|
||||
|
||||
### ClaimStorageReward
|
||||
|
||||
```text
|
||||
ClaimStorageReward {
|
||||
}
|
||||
keys = [validator_keypair or archiver_keypair, validator/archiver_keypairs (unsigned)]
|
||||
```
|
||||
|
||||
Archivers and validators will use this transaction to get paid tokens from a program state where SubmitStorageProof, ProofValidation and ChallengeProofValidations are in a state where proofs have been submitted and validated and there are no ChallengeProofValidations referencing those proofs. For a validator, it should reference the archiver keypairs to which it has validated proofs in the relevant epoch. And for an archiver it should reference validator keypairs for which it has validated and wants to be rewarded.
|
||||
|
||||
### ChallengeProofValidation
|
||||
|
||||
```text
|
||||
ChallengeProofValidation {
|
||||
proof_index: u64,
|
||||
hash_seed_value: Vec<u8>,
|
||||
}
|
||||
keys = [archiver_keypair, validator_keypair]
|
||||
```
|
||||
|
||||
This transaction is for catching lazy validators who are not doing the work to validate proofs. An archiver will submit this transaction when it sees a validator has approved a fake SubmitMiningProof transaction. Since the archiver is a light client not looking at the full chain, it will have to ask a validator or some set of validators for this information maybe via RPC call to obtain all ProofValidations for a certain segment in the previous storage epoch. The program will look in the validator account state see that a ProofValidation is submitted in the previous storage epoch and hash the hash\_seed\_value and see that the hash matches the SubmitMiningProof transaction and that the validator marked it as valid. If so, then it will save the challenge to the list of challenges that it has in its state.
|
||||
|
||||
### AdvertiseStorageRecentBlockhash
|
||||
|
||||
```text
|
||||
AdvertiseStorageRecentBlockhash {
|
||||
hash: Hash,
|
||||
slot: u64,
|
||||
}
|
||||
```
|
||||
|
||||
Validators and archivers will submit this to indicate that a new storage epoch has passed and that the storage proofs which are current proofs should now be for the previous epoch. Other transactions should check to see that the epoch that they are referencing is accurate according to current chain state.
|
||||
|
@ -1,69 +0,0 @@
|
||||
# Tick Verification
|
||||
|
||||
This design the criteria and validation of ticks in a slot. It also describes
|
||||
error handling and slashing conditions encompassing how the system handles
|
||||
transmissions that do not meet these requirements.
|
||||
|
||||
# Slot structure
|
||||
|
||||
Each slot must contain an expected `ticks_per_slot` number of ticks. The last
|
||||
shred in a slot must contain only the entirety of the last tick, and nothing
|
||||
else. The leader must also mark this shred containing the last tick with the
|
||||
`LAST_SHRED_IN_SLOT` flag. Between ticks, there must be `hashes_per_tick`
|
||||
number of hashes.
|
||||
|
||||
# Handling bad transmissions
|
||||
|
||||
Malicious transmissions `T` are handled in two ways:
|
||||
|
||||
1) If a leader can generate some erronenous transmission `T` and also some
|
||||
alternate transmission `T'` for the same slot without violating any slashing
|
||||
rules for duplicate transmissions (for instance if `T'` is a subset of `T`),
|
||||
then the cluster must handle the possibility of both transmissions being live.
|
||||
|
||||
Thus this means we cannot mark the erronenous transmission `T` as dead because
|
||||
the cluster may have reached consensus on `T'`. These cases necessitate a
|
||||
slashing proof to punish this bad behavior.
|
||||
|
||||
2) Otherwise, we can simply mark the slot as dead and not playable. A slashing
|
||||
proof may or may not be necessary depending on feasibility.
|
||||
|
||||
# Blockstore receiving shreds
|
||||
|
||||
When blockstore receives a new shred `s`, there are two cases:
|
||||
|
||||
1) `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred
|
||||
`s'` in blockstore for that slot where `s'.index > s.index` If so, together `s`
|
||||
and `s'` constitute a slashing proof.
|
||||
|
||||
2) Blockstore has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT`
|
||||
with index `i`. If `s.index > i`, then together `s` and `s'`constitute a
|
||||
slashing proof. In this case, blockstore will also not insert `s`.
|
||||
|
||||
3) Duplicate shreds for the same index are ignored. Non-duplicate shreds for
|
||||
the same index are a slashable condition. Details for this case are covered
|
||||
in the `Leader Duplicate Block Slashing` section.
|
||||
|
||||
|
||||
# Replaying and validating ticks
|
||||
|
||||
1) Replay stage replays entries from blockstore, keeping track of the number of
|
||||
ticks it has seen per slot, and verifying there are `hashes_per_tick` number of
|
||||
hashes between ticcks. After the tick from this last shred has been played,
|
||||
replay stage then checks the total number of ticks.
|
||||
|
||||
Failure scenario 1: If ever there are two consecutive ticks between which the
|
||||
number of hashes is `!= hashes_per_tick`, mark this slot as dead.
|
||||
|
||||
Failure scenario 2: If the number of ticks != `ticks_per_slot`, mark slot as
|
||||
dead.
|
||||
|
||||
Failure scenario 3: If the number of ticks reaches `ticks_per_slot`, but we still
|
||||
haven't seen the `LAST_SHRED_IN_SLOT`, mark this slot as dead.
|
||||
|
||||
2) When ReplayStage reaches a shred marked as the last shred, it checks if this
|
||||
last shred is a tick.
|
||||
|
||||
Failure scenario: If the signed shred with the `LAST_SHRED_IN_SLOT` flag cannot
|
||||
be deserialized into a tick (either fails to deserialize or deserializes into
|
||||
an entry), mark this slot as dead.
|
@ -1,156 +0,0 @@
|
||||
# Running an Archiver
|
||||
|
||||
This document describes how to setup an archiver in the testnet
|
||||
|
||||
Please note some of the information and instructions described here may change in future releases.
|
||||
|
||||
## Overview
|
||||
|
||||
Archivers are specialized light clients. They download a part of the ledger \(a.k.a Segment\) and store it. They earn rewards for storing segments.
|
||||
|
||||
The testnet features a validator running at devnet.solana.com, which serves as the entrypoint to the cluster for your archiver node.
|
||||
|
||||
Additionally there is a blockexplorer available at [http://devnet.solana.com/](http://devnet.solana.com/).
|
||||
|
||||
The testnet is configured to reset the ledger daily, or sooner should the hourly automated cluster sanity test fail.
|
||||
|
||||
## Machine Requirements
|
||||
|
||||
Archivers don't need specialized hardware. Anything with more than 128GB of disk space will be able to participate in the cluster as an archiver node.
|
||||
|
||||
Currently the disk space requirements are very low but we expect them to change in the future.
|
||||
|
||||
Prebuilt binaries are available for Linux x86\_64 \(Ubuntu 18.04 recommended\), macOS, and Windows.
|
||||
|
||||
### Confirm The Testnet Is Reachable
|
||||
|
||||
Before starting an archiver node, sanity check that the cluster is accessible to your machine by running some simple commands. If any of the commands fail, please retry 5-10 minutes later to confirm the testnet is not just restarting itself before debugging further.
|
||||
|
||||
Fetch the current transaction count over JSON RPC:
|
||||
|
||||
```bash
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://devnet.solana.com:8899
|
||||
```
|
||||
|
||||
Inspect the blockexplorer at [http://devnet.solana.com/](http://devnet.solana.com/) for activity.
|
||||
|
||||
View the [metrics dashboard](https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet) for more detail on cluster activity.
|
||||
|
||||
## Archiver Setup
|
||||
|
||||
#### Obtaining The Software
|
||||
|
||||
#### Bootstrap with `solana-install`
|
||||
|
||||
The `solana-install` tool can be used to easily install and upgrade the cluster software.
|
||||
|
||||
#### Linux and mac OS
|
||||
|
||||
```bash
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
|
||||
|
||||
```bash
|
||||
solana-install init
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
Download and install **solana-install-init** from [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest)
|
||||
|
||||
After a successful install, `solana-install update` may be used to easily update the software to a newer version at any time.
|
||||
|
||||
#### Download Prebuilt Binaries
|
||||
|
||||
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
|
||||
|
||||
#### Linux
|
||||
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
#### mac OS
|
||||
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-pc-windows-msvc.tar.bz2**, then extract it into a folder. It is a good idea to add this extracted folder to your windows PATH.
|
||||
|
||||
## Starting The Archiver
|
||||
|
||||
Try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||
|
||||
```bash
|
||||
solana-gossip spy --entrypoint devnet.solana.com:8001
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
Now configure the keypairs for your archiver by running:
|
||||
|
||||
Navigate to the solana install location and open a cmd prompt
|
||||
|
||||
```bash
|
||||
solana-keygen new -o archiver-keypair.json
|
||||
solana-keygen new -o storage-keypair.json
|
||||
```
|
||||
|
||||
Use solana-keygen to show the public keys for each of the keypairs, they will be needed in the next step:
|
||||
|
||||
* Windows
|
||||
|
||||
```bash
|
||||
# The archiver's identity
|
||||
solana-keygen pubkey archiver-keypair.json
|
||||
solana-keygen pubkey storage-keypair.json
|
||||
```
|
||||
|
||||
* Linux and mac OS
|
||||
|
||||
\`\`\`bash
|
||||
|
||||
export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
|
||||
|
||||
export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
|
||||
|
||||
```text
|
||||
Then set up the storage accounts for your archiver by running:
|
||||
```bash
|
||||
solana --keypair archiver-keypair.json airdrop .0001
|
||||
solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
|
||||
```
|
||||
|
||||
Note: Every time the testnet restarts, run the steps to setup the archiver accounts again.
|
||||
|
||||
To start the archiver:
|
||||
|
||||
```bash
|
||||
solana-archiver --entrypoint devnet.solana.com:8001 --identity-keypair archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
|
||||
```
|
||||
|
||||
## Verify Archiver Setup
|
||||
|
||||
From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running:
|
||||
|
||||
```bash
|
||||
solana-gossip spy --entrypoint devnet.solana.com:8001
|
||||
```
|
||||
|
||||
Provide the **storage account pubkey** to the `solana storage-account` command to view the recent mining activity from your archiver:
|
||||
|
||||
```bash
|
||||
solana --keypair storage-keypair.json storage-account $STORAGE_IDENTITY
|
||||
```
|
@ -1,32 +0,0 @@
|
||||
# Running a Validator
|
||||
|
||||
This document describes how to participate in the Solana testnet as a validator
|
||||
node.
|
||||
|
||||
Please note some of the information and instructions described here may change
|
||||
in future releases, and documentation will be updated for mainnet participation.
|
||||
|
||||
## Overview
|
||||
|
||||
Solana currently maintains several testnets, each featuring a validator that can
|
||||
serve as the entrypoint to the cluster for your validator.
|
||||
|
||||
Current testnet entrypoints:
|
||||
|
||||
* Developer testnet, devnet.solana.com
|
||||
|
||||
Solana may launch special testnets for validator participation; we will provide
|
||||
you with a specific entrypoint URL to use.
|
||||
|
||||
Prior to mainnet, the testnets may be running different versions of solana
|
||||
software, which may feature breaking changes. For information on choosing a
|
||||
testnet and finding software version info, jump to [Choosing a Testnet](validator-testnet.md).
|
||||
|
||||
The testnets are configured to reset the ledger daily, or sooner, should the
|
||||
hourly automated cluster sanity test fail.
|
||||
|
||||
There is a network explorer that shows the status of solana testnets available
|
||||
at [http://explorer.solana.com/](https://explorer.solana.com/).
|
||||
|
||||
Also we'd love it if you choose to register your validator node with us at
|
||||
[https://forms.gle/LfFscZqJELbuUP139](https://forms.gle/LfFscZqJELbuUP139).
|
@ -1,50 +0,0 @@
|
||||
# Validator Requirements
|
||||
|
||||
## Hardware
|
||||
|
||||
* CPU Recommendations
|
||||
* We recommend a CPU with the highest number of cores as possible. AMD Threadripper or Intel Server \(Xeon\) CPUs are fine.
|
||||
* We recommend AMD Threadripper as you get a larger number of cores for parallelization compared to Intel.
|
||||
* Threadripper also has a cost-per-core advantage and a greater number of PCIe lanes compared to the equivalent Intel part. PoH \(Proof of History\) is based on sha256 and Threadripper also supports sha256 hardware instructions.
|
||||
* SSD size and I/O style \(SATA vs NVMe/M.2\) for a validator
|
||||
* Minimum example - Samsung 860 Evo 2TB
|
||||
* Mid-range example - Samsung 860 Evo 4TB
|
||||
* High-end example - Samsung 860 Evo 4TB
|
||||
* GPUs
|
||||
* While a CPU-only node may be able to keep up with the initial idling network, once transaction throughput increases, GPUs will be necessary
|
||||
* What kind of GPU?
|
||||
* We recommend Nvidia 2080Ti or 1080Ti series consumer GPU or Tesla series server GPUs.
|
||||
* We do not currently support OpenCL and therefore do not support AMD GPUs. We have a bounty out for someone to port us to OpenCL. Interested? [Check out our GitHub.](https://github.com/solana-labs/solana)
|
||||
* Power Consumption
|
||||
* Approximate power consumption for a validator node running an AMD Threadripper 2950W and 2x 2080Ti GPUs is 800-1000W.
|
||||
|
||||
### Preconfigured Setups
|
||||
|
||||
Here are our recommendations for low, medium, and high end machine specifications:
|
||||
|
||||
| | Low end | Medium end | High end | Notes |
|
||||
| :--- | :--- | :--- | :--- | :--- |
|
||||
| CPU | AMD Threadripper 1900x | AMD Threadripper 2920x | AMD Threadripper 2950x | Consider a 10Gb-capable motherboard with as many PCIe lanes and m.2 slots as possible. |
|
||||
| RAM | 16GB | 32GB | 64GB | |
|
||||
| OS Drive | Samsung 860 Evo 2TB | Samsung 860 Evo 4TB | Samsung 860 Evo 4TB | Or equivalent SSD |
|
||||
| Accounts Drive\(s\) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | |
|
||||
| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. |
|
||||
|
||||
## Software
|
||||
|
||||
* We build and run on Ubuntu 18.04. Some users have had trouble when running on Ubuntu 16.04
|
||||
* See [Validator Software](validator-software.md) for the current Solana software release.
|
||||
|
||||
Be sure to ensure that the machine used is not behind a residential NAT to avoid
|
||||
NAT traversal issues. A cloud-hosted machine works best. **Ensure that IP ports 8000 through 10000 are not blocked for Internet inbound and outbound traffic.**
|
||||
For more information on port forwarding with regards to residential networks,
|
||||
see [this document](http://www.mcs.sdsmt.edu/lpyeatt/courses/314/PortForwardingSetup.pdf).
|
||||
|
||||
Prebuilt binaries are available for Linux x86\_64 \(Ubuntu 18.04 recommended\).
|
||||
MacOS or WSL users may build from source.
|
||||
|
||||
## GPU Requirements
|
||||
|
||||
CUDA is required to make use of the GPU on your system. The provided Solana
|
||||
release binaries are built on Ubuntu 18.04 with [CUDA Toolkit 10.1 update 1](https://developer.nvidia.com/cuda-toolkit-archive). If your machine is using
|
||||
a different CUDA version then you will need to rebuild from source.
|
@ -1,79 +0,0 @@
|
||||
# Installing the Validator Software
|
||||
|
||||
Install the Solana release
|
||||
[v0.23.1](https://github.com/solana-labs/solana/releases/tag/v0.23.1) on your
|
||||
machine by running:
|
||||
|
||||
```bash
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.23.1/install/solana-install-init.sh | sh -s - 0.23.1
|
||||
```
|
||||
|
||||
If you are connecting to a different testnet, you can replace `0.23.1` with the
|
||||
release tag matching the software version of your desired testnet, or replace it
|
||||
with the named channel `stable`, `beta`, or `edge`.
|
||||
|
||||
The following output indicates a successful update:
|
||||
|
||||
```text
|
||||
looking for latest release
|
||||
downloading v0.23.1 installer
|
||||
Configuration: /home/solana/.config/solana/install/config.yml
|
||||
Active release directory: /home/solana/.local/share/solana/install/active_release
|
||||
* Release version: 0.23.1
|
||||
* Release URL: https://github.com/solana-labs/solana/releases/download/v0.23.1/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
Update successful
|
||||
```
|
||||
|
||||
After a successful install, `solana-install update` may be used to easily update
|
||||
the cluster software to a newer version at any time.
|
||||
|
||||
## Download Prebuilt Binaries
|
||||
|
||||
If you would rather not use `solana-install` to manage the install, you can
|
||||
manually download and install the binaries.
|
||||
|
||||
### Linux
|
||||
|
||||
Download the binaries by navigating to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the
|
||||
archive:
|
||||
|
||||
```bash
|
||||
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
Download the binaries by navigating to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the
|
||||
archive:
|
||||
|
||||
```bash
|
||||
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
## Build From Source
|
||||
|
||||
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
||||
from source, navigate to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
and download the **Source Code** archive. Extract the code and build the
|
||||
binaries with:
|
||||
|
||||
```bash
|
||||
./scripts/cargo-install-all.sh .
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
You can then run the following command to obtain the same result as with
|
||||
prebuilt binaries:
|
||||
|
||||
```bash
|
||||
solana-install init
|
||||
```
|
@ -1,39 +0,0 @@
|
||||
# Choosing a Testnet
|
||||
|
||||
Solana maintains several testnets, each featuring a Solana-owned validator
|
||||
that serves as an entrypoint to the cluster.
|
||||
|
||||
Current testnet entrypoints:
|
||||
|
||||
* Stable: devnet.solana.com
|
||||
|
||||
Application developers should target the Stable testnet. Key differences
|
||||
between the Stable testnet and what will be mainnet:
|
||||
|
||||
* Stable testnet tokens are not real
|
||||
* Stable testnet includes a token faucet for application testing
|
||||
* Stable testnet may be subject to ledger resets
|
||||
* Stable testnet typically runs a newer software version than mainnet
|
||||
* Stable testnet may be maintained by different validators than mainnet
|
||||
|
||||
The Beta testnet is used to showcase and stabilize new features before they
|
||||
are tagged for release. Application developers are free to target the Beta
|
||||
testnet, but should expect instability and periodic ledger resets. Regarding
|
||||
stability, all that can be said is that CI automation was successful.
|
||||
|
||||
### Get Testnet Version
|
||||
|
||||
You can submit a JSON-RPC request to see the specific software version of the
|
||||
cluster. Use this to specify [the software version to install](validator-software.md).
|
||||
|
||||
```bash
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' devnet.solana.com:8899
|
||||
```
|
||||
Example result:
|
||||
`{"jsonrpc":"2.0","result":{"solana-core":"0.21.0"},"id":1}`
|
||||
|
||||
## Using a Different Testnet
|
||||
|
||||
This guide is written in the context of devnet.solana.com, our most stable
|
||||
cluster. To participate in another testnet, modify the commands in the following
|
||||
pages, replacing `devnet.solana.com` with your desired testnet.
|
@ -1,19 +0,0 @@
|
||||
# Troubleshooting
|
||||
|
||||
There is a **\#validator-support** Discord channel available to reach other
|
||||
testnet participants, [https://discord.gg/pquxPsq](https://discord.gg/pquxPsq).
|
||||
|
||||
## Useful Links & Discussion
|
||||
* [Network Explorer](http://explorer.solana.com/)
|
||||
* [Testnet Metrics Dashboard](https://metrics.solana.com:3000/d/testnet-edge/testnet-monitor-edge?refresh=60s&orgId=2)
|
||||
* Validator chat channels
|
||||
* [\#validator-support](https://discord.gg/rZsenD) General support channel for any Validator related queries.
|
||||
* [\#tourdesol](https://discord.gg/BdujK2) Discussion and support channel for Tour de SOL participants ([What is Tour de SOL?](https://solana.com/tds/)).
|
||||
* [\#tourdesol-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating to Tour de SOL
|
||||
* [\#tourdesol-stage0](https://discord.gg/Xf8tES) Discussion for events within Tour de SOL Stage 0. Stage 0 includes all the dry-run
|
||||
* [Core software repo](https://github.com/solana-labs/solana)
|
||||
* [Tour de SOL Docs](https://docs.solana.com/tour-de-sol)
|
||||
* [TdS repo](https://github.com/solana-labs/tour-de-sol)
|
||||
* [TdS metrics dashboard](https://metrics.solana.com:3000/d/testnet-edge/testnet-monitor-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds&orgId=2&var-datasource=TdS%20Metrics%20%28read-only%29)
|
||||
|
||||
Can't find what you're looking for? Send an email to ryan@solana.com or reach out to @rshea\#2622 on Discord.
|
@ -1,3 +0,0 @@
|
||||
# TPU
|
||||
|
||||

|
@ -1,4 +0,0 @@
|
||||
# TVU
|
||||
|
||||

|
||||
|
@ -1,600 +0,0 @@
|
||||
"use strict";
|
||||
|
||||
// Fix back button cache problem
|
||||
window.onunload = function () { };
|
||||
|
||||
// Global variable, shared between modules
|
||||
function playpen_text(playpen) {
|
||||
let code_block = playpen.querySelector("code");
|
||||
|
||||
if (window.ace && code_block.classList.contains("editable")) {
|
||||
let editor = window.ace.edit(code_block);
|
||||
return editor.getValue();
|
||||
} else {
|
||||
return code_block.textContent;
|
||||
}
|
||||
}
|
||||
|
||||
(function codeSnippets() {
|
||||
// Hide Rust code lines prepended with a specific character
|
||||
var hiding_character = "#";
|
||||
|
||||
function fetch_with_timeout(url, options, timeout = 6000) {
|
||||
return Promise.race([
|
||||
fetch(url, options),
|
||||
new Promise((_, reject) => setTimeout(() => reject(new Error('timeout')), timeout))
|
||||
]);
|
||||
}
|
||||
|
||||
var playpens = Array.from(document.querySelectorAll(".playpen"));
|
||||
if (playpens.length > 0) {
|
||||
fetch_with_timeout("https://play.rust-lang.org/meta/crates", {
|
||||
headers: {
|
||||
'Content-Type': "application/json",
|
||||
},
|
||||
method: 'POST',
|
||||
mode: 'cors',
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(response => {
|
||||
// get list of crates available in the rust playground
|
||||
let playground_crates = response.crates.map(item => item["id"]);
|
||||
playpens.forEach(block => handle_crate_list_update(block, playground_crates));
|
||||
});
|
||||
}
|
||||
|
||||
function handle_crate_list_update(playpen_block, playground_crates) {
|
||||
// update the play buttons after receiving the response
|
||||
update_play_button(playpen_block, playground_crates);
|
||||
|
||||
// and install on change listener to dynamically update ACE editors
|
||||
if (window.ace) {
|
||||
let code_block = playpen_block.querySelector("code");
|
||||
if (code_block.classList.contains("editable")) {
|
||||
let editor = window.ace.edit(code_block);
|
||||
editor.addEventListener("change", function (e) {
|
||||
update_play_button(playpen_block, playground_crates);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updates the visibility of play button based on `no_run` class and
|
||||
// used crates vs ones available on http://play.rust-lang.org
|
||||
function update_play_button(pre_block, playground_crates) {
|
||||
var play_button = pre_block.querySelector(".play-button");
|
||||
|
||||
// skip if code is `no_run`
|
||||
if (pre_block.querySelector('code').classList.contains("no_run")) {
|
||||
play_button.classList.add("hidden");
|
||||
return;
|
||||
}
|
||||
|
||||
// get list of `extern crate`'s from snippet
|
||||
var txt = playpen_text(pre_block);
|
||||
var re = /extern\s+crate\s+([a-zA-Z_0-9]+)\s*;/g;
|
||||
var snippet_crates = [];
|
||||
var item;
|
||||
while (item = re.exec(txt)) {
|
||||
snippet_crates.push(item[1]);
|
||||
}
|
||||
|
||||
// check if all used crates are available on play.rust-lang.org
|
||||
var all_available = snippet_crates.every(function (elem) {
|
||||
return playground_crates.indexOf(elem) > -1;
|
||||
});
|
||||
|
||||
if (all_available) {
|
||||
play_button.classList.remove("hidden");
|
||||
} else {
|
||||
play_button.classList.add("hidden");
|
||||
}
|
||||
}
|
||||
|
||||
function run_rust_code(code_block) {
|
||||
var result_block = code_block.querySelector(".result");
|
||||
if (!result_block) {
|
||||
result_block = document.createElement('code');
|
||||
result_block.className = 'result hljs language-bash';
|
||||
|
||||
code_block.append(result_block);
|
||||
}
|
||||
|
||||
let text = playpen_text(code_block);
|
||||
|
||||
var params = {
|
||||
version: "stable",
|
||||
optimize: "0",
|
||||
code: text
|
||||
};
|
||||
|
||||
if (text.indexOf("#![feature") !== -1) {
|
||||
params.version = "nightly";
|
||||
}
|
||||
|
||||
result_block.innerText = "Running...";
|
||||
|
||||
fetch_with_timeout("https://play.rust-lang.org/evaluate.json", {
|
||||
headers: {
|
||||
'Content-Type': "application/json",
|
||||
},
|
||||
method: 'POST',
|
||||
mode: 'cors',
|
||||
body: JSON.stringify(params)
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(response => result_block.innerText = response.result)
|
||||
.catch(error => result_block.innerText = "Playground Communication: " + error.message);
|
||||
}
|
||||
|
||||
// Syntax highlighting Configuration
|
||||
hljs.configure({
|
||||
tabReplace: ' ', // 4 spaces
|
||||
languages: [], // Languages used for auto-detection
|
||||
});
|
||||
|
||||
if (window.ace) {
|
||||
// language-rust class needs to be removed for editable
|
||||
// blocks or highlightjs will capture events
|
||||
Array
|
||||
.from(document.querySelectorAll('code.editable'))
|
||||
.forEach(function (block) { block.classList.remove('language-rust'); });
|
||||
|
||||
Array
|
||||
.from(document.querySelectorAll('code:not(.editable)'))
|
||||
.forEach(function (block) { hljs.highlightBlock(block); });
|
||||
} else {
|
||||
Array
|
||||
.from(document.querySelectorAll('code'))
|
||||
.forEach(function (block) { hljs.highlightBlock(block); });
|
||||
}
|
||||
|
||||
// Adding the hljs class gives code blocks the color css
|
||||
// even if highlighting doesn't apply
|
||||
Array
|
||||
.from(document.querySelectorAll('code'))
|
||||
.forEach(function (block) { block.classList.add('hljs'); });
|
||||
|
||||
Array.from(document.querySelectorAll("code.language-rust")).forEach(function (block) {
|
||||
|
||||
var code_block = block;
|
||||
var pre_block = block.parentNode;
|
||||
// hide lines
|
||||
var lines = code_block.innerHTML.split("\n");
|
||||
var first_non_hidden_line = false;
|
||||
var lines_hidden = false;
|
||||
var trimmed_line = "";
|
||||
|
||||
for (var n = 0; n < lines.length; n++) {
|
||||
trimmed_line = lines[n].trim();
|
||||
if (trimmed_line[0] == hiding_character && trimmed_line[1] != hiding_character) {
|
||||
if (first_non_hidden_line) {
|
||||
lines[n] = "<span class=\"hidden\">" + "\n" + lines[n].replace(/(\s*)# ?/, "$1") + "</span>";
|
||||
}
|
||||
else {
|
||||
lines[n] = "<span class=\"hidden\">" + lines[n].replace(/(\s*)# ?/, "$1") + "\n" + "</span>";
|
||||
}
|
||||
lines_hidden = true;
|
||||
}
|
||||
else if (first_non_hidden_line) {
|
||||
lines[n] = "\n" + lines[n];
|
||||
}
|
||||
else {
|
||||
first_non_hidden_line = true;
|
||||
}
|
||||
if (trimmed_line[0] == hiding_character && trimmed_line[1] == hiding_character) {
|
||||
lines[n] = lines[n].replace("##", "#")
|
||||
}
|
||||
}
|
||||
code_block.innerHTML = lines.join("");
|
||||
|
||||
// If no lines were hidden, return
|
||||
if (!lines_hidden) { return; }
|
||||
|
||||
var buttons = document.createElement('div');
|
||||
buttons.className = 'buttons';
|
||||
buttons.innerHTML = "<button class=\"fa fa-expand\" title=\"Show hidden lines\" aria-label=\"Show hidden lines\"></button>";
|
||||
|
||||
// add expand button
|
||||
pre_block.insertBefore(buttons, pre_block.firstChild);
|
||||
|
||||
pre_block.querySelector('.buttons').addEventListener('click', function (e) {
|
||||
if (e.target.classList.contains('fa-expand')) {
|
||||
var lines = pre_block.querySelectorAll('span.hidden');
|
||||
|
||||
e.target.classList.remove('fa-expand');
|
||||
e.target.classList.add('fa-compress');
|
||||
e.target.title = 'Hide lines';
|
||||
e.target.setAttribute('aria-label', e.target.title);
|
||||
|
||||
Array.from(lines).forEach(function (line) {
|
||||
line.classList.remove('hidden');
|
||||
line.classList.add('unhidden');
|
||||
});
|
||||
} else if (e.target.classList.contains('fa-compress')) {
|
||||
var lines = pre_block.querySelectorAll('span.unhidden');
|
||||
|
||||
e.target.classList.remove('fa-compress');
|
||||
e.target.classList.add('fa-expand');
|
||||
e.target.title = 'Show hidden lines';
|
||||
e.target.setAttribute('aria-label', e.target.title);
|
||||
|
||||
Array.from(lines).forEach(function (line) {
|
||||
line.classList.remove('unhidden');
|
||||
line.classList.add('hidden');
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
Array.from(document.querySelectorAll('pre code')).forEach(function (block) {
|
||||
var pre_block = block.parentNode;
|
||||
if (!pre_block.classList.contains('playpen')) {
|
||||
var buttons = pre_block.querySelector(".buttons");
|
||||
if (!buttons) {
|
||||
buttons = document.createElement('div');
|
||||
buttons.className = 'buttons';
|
||||
pre_block.insertBefore(buttons, pre_block.firstChild);
|
||||
}
|
||||
|
||||
var clipButton = document.createElement('button');
|
||||
clipButton.className = 'fa fa-copy clip-button';
|
||||
clipButton.title = 'Copy to clipboard';
|
||||
clipButton.setAttribute('aria-label', clipButton.title);
|
||||
clipButton.innerHTML = '<i class=\"tooltiptext\"></i>';
|
||||
|
||||
buttons.insertBefore(clipButton, buttons.firstChild);
|
||||
}
|
||||
});
|
||||
|
||||
// Process playpen code blocks
|
||||
Array.from(document.querySelectorAll(".playpen")).forEach(function (pre_block) {
|
||||
// Add play button
|
||||
var buttons = pre_block.querySelector(".buttons");
|
||||
if (!buttons) {
|
||||
buttons = document.createElement('div');
|
||||
buttons.className = 'buttons';
|
||||
pre_block.insertBefore(buttons, pre_block.firstChild);
|
||||
}
|
||||
|
||||
var runCodeButton = document.createElement('button');
|
||||
runCodeButton.className = 'fa fa-play play-button';
|
||||
runCodeButton.hidden = true;
|
||||
runCodeButton.title = 'Run this code';
|
||||
runCodeButton.setAttribute('aria-label', runCodeButton.title);
|
||||
|
||||
var copyCodeClipboardButton = document.createElement('button');
|
||||
copyCodeClipboardButton.className = 'fa fa-copy clip-button';
|
||||
copyCodeClipboardButton.innerHTML = '<i class="tooltiptext"></i>';
|
||||
copyCodeClipboardButton.title = 'Copy to clipboard';
|
||||
copyCodeClipboardButton.setAttribute('aria-label', copyCodeClipboardButton.title);
|
||||
|
||||
buttons.insertBefore(runCodeButton, buttons.firstChild);
|
||||
buttons.insertBefore(copyCodeClipboardButton, buttons.firstChild);
|
||||
|
||||
runCodeButton.addEventListener('click', function (e) {
|
||||
run_rust_code(pre_block);
|
||||
});
|
||||
|
||||
let code_block = pre_block.querySelector("code");
|
||||
if (window.ace && code_block.classList.contains("editable")) {
|
||||
var undoChangesButton = document.createElement('button');
|
||||
undoChangesButton.className = 'fa fa-history reset-button';
|
||||
undoChangesButton.title = 'Undo changes';
|
||||
undoChangesButton.setAttribute('aria-label', undoChangesButton.title);
|
||||
|
||||
buttons.insertBefore(undoChangesButton, buttons.firstChild);
|
||||
|
||||
undoChangesButton.addEventListener('click', function () {
|
||||
let editor = window.ace.edit(code_block);
|
||||
editor.setValue(editor.originalCode);
|
||||
editor.clearSelection();
|
||||
});
|
||||
}
|
||||
});
|
||||
})();
|
||||
|
||||
(function themes() {
|
||||
var html = document.querySelector('html');
|
||||
var themeToggleButton = document.getElementById('theme-toggle');
|
||||
var themePopup = document.getElementById('theme-list');
|
||||
var themeColorMetaTag = document.querySelector('meta[name="theme-color"]');
|
||||
var stylesheets = {
|
||||
ayuHighlight: document.querySelector("[href$='ayu-highlight.css']"),
|
||||
tomorrowNight: document.querySelector("[href$='tomorrow-night.css']"),
|
||||
highlight: document.querySelector("[href$='highlight.css']"),
|
||||
};
|
||||
|
||||
function showThemes() {
|
||||
themePopup.style.display = 'block';
|
||||
themeToggleButton.setAttribute('aria-expanded', true);
|
||||
themePopup.querySelector("button#" + document.body.className).focus();
|
||||
}
|
||||
|
||||
function hideThemes() {
|
||||
themePopup.style.display = 'none';
|
||||
themeToggleButton.setAttribute('aria-expanded', false);
|
||||
themeToggleButton.focus();
|
||||
}
|
||||
|
||||
function set_theme(theme) {
|
||||
let ace_theme;
|
||||
|
||||
if (theme == 'coal' || theme == 'navy') {
|
||||
stylesheets.ayuHighlight.disabled = true;
|
||||
stylesheets.tomorrowNight.disabled = false;
|
||||
stylesheets.highlight.disabled = true;
|
||||
|
||||
ace_theme = "ace/theme/tomorrow_night";
|
||||
} else if (theme == 'ayu') {
|
||||
stylesheets.ayuHighlight.disabled = false;
|
||||
stylesheets.tomorrowNight.disabled = true;
|
||||
stylesheets.highlight.disabled = true;
|
||||
|
||||
ace_theme = "ace/theme/tomorrow_night";
|
||||
} else {
|
||||
stylesheets.ayuHighlight.disabled = true;
|
||||
stylesheets.tomorrowNight.disabled = true;
|
||||
stylesheets.highlight.disabled = false;
|
||||
|
||||
ace_theme = "ace/theme/dawn";
|
||||
}
|
||||
|
||||
setTimeout(function () {
|
||||
themeColorMetaTag.content = getComputedStyle(document.body).backgroundColor;
|
||||
}, 1);
|
||||
|
||||
if (window.ace && window.editors) {
|
||||
window.editors.forEach(function (editor) {
|
||||
editor.setTheme(ace_theme);
|
||||
});
|
||||
}
|
||||
|
||||
var previousTheme;
|
||||
try { previousTheme = localStorage.getItem('mdbook-theme'); } catch (e) { }
|
||||
if (previousTheme === null || previousTheme === undefined) { previousTheme = 'light'; }
|
||||
|
||||
try { localStorage.setItem('mdbook-theme', theme); } catch (e) { }
|
||||
|
||||
document.body.className = theme;
|
||||
html.classList.remove(previousTheme);
|
||||
html.classList.add(theme);
|
||||
}
|
||||
|
||||
// Set theme
|
||||
var theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = 'light'; }
|
||||
|
||||
set_theme(theme);
|
||||
|
||||
// themeToggleButton.addEventListener('click', function () {
|
||||
// if (themePopup.style.display === 'block') {
|
||||
// hideThemes();
|
||||
// } else {
|
||||
// showThemes();
|
||||
// }
|
||||
// });
|
||||
|
||||
themePopup.addEventListener('click', function (e) {
|
||||
var theme = e.target.id || e.target.parentElement.id;
|
||||
set_theme(theme);
|
||||
});
|
||||
|
||||
themePopup.addEventListener('focusout', function(e) {
|
||||
// e.relatedTarget is null in Safari and Firefox on macOS (see workaround below)
|
||||
if (!!e.relatedTarget && !themeToggleButton.contains(e.relatedTarget) && !themePopup.contains(e.relatedTarget)) {
|
||||
hideThemes();
|
||||
}
|
||||
});
|
||||
|
||||
// Should not be needed, but it works around an issue on macOS & iOS: https://github.com/rust-lang-nursery/mdBook/issues/628
|
||||
document.addEventListener('click', function(e) {
|
||||
if (themePopup.style.display === 'block' && !themeToggleButton.contains(e.target) && !themePopup.contains(e.target)) {
|
||||
hideThemes();
|
||||
}
|
||||
});
|
||||
|
||||
document.addEventListener('keydown', function (e) {
|
||||
if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; }
|
||||
if (!themePopup.contains(e.target)) { return; }
|
||||
|
||||
switch (e.key) {
|
||||
case 'Escape':
|
||||
e.preventDefault();
|
||||
hideThemes();
|
||||
break;
|
||||
case 'ArrowUp':
|
||||
e.preventDefault();
|
||||
var li = document.activeElement.parentElement;
|
||||
if (li && li.previousElementSibling) {
|
||||
li.previousElementSibling.querySelector('button').focus();
|
||||
}
|
||||
break;
|
||||
case 'ArrowDown':
|
||||
e.preventDefault();
|
||||
var li = document.activeElement.parentElement;
|
||||
if (li && li.nextElementSibling) {
|
||||
li.nextElementSibling.querySelector('button').focus();
|
||||
}
|
||||
break;
|
||||
case 'Home':
|
||||
e.preventDefault();
|
||||
themePopup.querySelector('li:first-child button').focus();
|
||||
break;
|
||||
case 'End':
|
||||
e.preventDefault();
|
||||
themePopup.querySelector('li:last-child button').focus();
|
||||
break;
|
||||
}
|
||||
});
|
||||
})();
|
||||
|
||||
(function sidebar() {
|
||||
var html = document.querySelector("html");
|
||||
var sidebar = document.getElementById("sidebar");
|
||||
var sidebarLinks = document.querySelectorAll('#sidebar a');
|
||||
var sidebarToggleButton = document.getElementById("sidebar-toggle");
|
||||
var firstContact = null;
|
||||
|
||||
function showSidebar() {
|
||||
html.classList.remove('sidebar-hidden')
|
||||
html.classList.add('sidebar-visible');
|
||||
Array.from(sidebarLinks).forEach(function (link) {
|
||||
link.setAttribute('tabIndex', 0);
|
||||
});
|
||||
sidebarToggleButton.setAttribute('aria-expanded', true);
|
||||
sidebar.setAttribute('aria-hidden', false);
|
||||
try { localStorage.setItem('mdbook-sidebar', 'visible'); } catch (e) { }
|
||||
}
|
||||
|
||||
function hideSidebar() {
|
||||
html.classList.remove('sidebar-visible')
|
||||
html.classList.add('sidebar-hidden');
|
||||
Array.from(sidebarLinks).forEach(function (link) {
|
||||
link.setAttribute('tabIndex', -1);
|
||||
});
|
||||
sidebarToggleButton.setAttribute('aria-expanded', false);
|
||||
sidebar.setAttribute('aria-hidden', true);
|
||||
try { localStorage.setItem('mdbook-sidebar', 'hidden'); } catch (e) { }
|
||||
}
|
||||
|
||||
// Toggle sidebar
|
||||
sidebarToggleButton.addEventListener('click', function sidebarToggle() {
|
||||
if (html.classList.contains("sidebar-hidden")) {
|
||||
showSidebar();
|
||||
} else if (html.classList.contains("sidebar-visible")) {
|
||||
hideSidebar();
|
||||
} else {
|
||||
if (getComputedStyle(sidebar)['transform'] === 'none') {
|
||||
hideSidebar();
|
||||
} else {
|
||||
showSidebar();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
document.addEventListener('touchstart', function (e) {
|
||||
firstContact = {
|
||||
x: e.touches[0].clientX,
|
||||
time: Date.now()
|
||||
};
|
||||
}, { passive: true });
|
||||
|
||||
document.addEventListener('touchmove', function (e) {
|
||||
if (!firstContact)
|
||||
return;
|
||||
|
||||
var curX = e.touches[0].clientX;
|
||||
var xDiff = curX - firstContact.x,
|
||||
tDiff = Date.now() - firstContact.time;
|
||||
|
||||
if (tDiff < 250 && Math.abs(xDiff) >= 150) {
|
||||
if (xDiff >= 0 && firstContact.x < Math.min(document.body.clientWidth * 0.25, 300))
|
||||
showSidebar();
|
||||
else if (xDiff < 0 && curX < 300)
|
||||
hideSidebar();
|
||||
|
||||
firstContact = null;
|
||||
}
|
||||
}, { passive: true });
|
||||
|
||||
// Scroll sidebar to current active section
|
||||
var activeSection = sidebar.querySelector(".active");
|
||||
if (activeSection) {
|
||||
sidebar.scrollTop = activeSection.offsetTop;
|
||||
}
|
||||
})();
|
||||
|
||||
(function chapterNavigation() {
|
||||
document.addEventListener('keydown', function (e) {
|
||||
if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; }
|
||||
if (window.search && window.search.hasFocus()) { return; }
|
||||
|
||||
switch (e.key) {
|
||||
case 'ArrowRight':
|
||||
e.preventDefault();
|
||||
var nextButton = document.querySelector('.nav-chapters.next');
|
||||
if (nextButton) {
|
||||
window.location.href = nextButton.href;
|
||||
}
|
||||
break;
|
||||
case 'ArrowLeft':
|
||||
e.preventDefault();
|
||||
var previousButton = document.querySelector('.nav-chapters.previous');
|
||||
if (previousButton) {
|
||||
window.location.href = previousButton.href;
|
||||
}
|
||||
break;
|
||||
}
|
||||
});
|
||||
})();
|
||||
|
||||
(function clipboard() {
|
||||
var clipButtons = document.querySelectorAll('.clip-button');
|
||||
|
||||
function hideTooltip(elem) {
|
||||
elem.firstChild.innerText = "";
|
||||
elem.className = 'fa fa-copy clip-button';
|
||||
}
|
||||
|
||||
function showTooltip(elem, msg) {
|
||||
elem.firstChild.innerText = msg;
|
||||
elem.className = 'fa fa-copy tooltipped';
|
||||
}
|
||||
|
||||
var clipboardSnippets = new Clipboard('.clip-button', {
|
||||
text: function (trigger) {
|
||||
hideTooltip(trigger);
|
||||
let playpen = trigger.closest("pre");
|
||||
return playpen_text(playpen);
|
||||
}
|
||||
});
|
||||
|
||||
Array.from(clipButtons).forEach(function (clipButton) {
|
||||
clipButton.addEventListener('mouseout', function (e) {
|
||||
hideTooltip(e.currentTarget);
|
||||
});
|
||||
});
|
||||
|
||||
clipboardSnippets.on('success', function (e) {
|
||||
e.clearSelection();
|
||||
showTooltip(e.trigger, "Copied!");
|
||||
});
|
||||
|
||||
clipboardSnippets.on('error', function (e) {
|
||||
showTooltip(e.trigger, "Clipboard error!");
|
||||
});
|
||||
})();
|
||||
|
||||
(function scrollToTop () {
|
||||
var menuTitle = document.querySelector('.menu-title');
|
||||
|
||||
menuTitle.addEventListener('click', function () {
|
||||
document.scrollingElement.scrollTo({ top: 0, behavior: 'smooth' });
|
||||
});
|
||||
})();
|
||||
|
||||
(function autoHideMenu() {
|
||||
var menu = document.getElementById('menu-bar');
|
||||
|
||||
var previousScrollTop = document.scrollingElement.scrollTop;
|
||||
|
||||
document.addEventListener('scroll', function () {
|
||||
if (menu.classList.contains('folded') && document.scrollingElement.scrollTop < previousScrollTop) {
|
||||
menu.classList.remove('folded');
|
||||
} else if (!menu.classList.contains('folded') && document.scrollingElement.scrollTop > previousScrollTop) {
|
||||
menu.classList.add('folded');
|
||||
}
|
||||
|
||||
if (!menu.classList.contains('bordered') && document.scrollingElement.scrollTop > 0) {
|
||||
menu.classList.add('bordered');
|
||||
}
|
||||
|
||||
if (menu.classList.contains('bordered') && document.scrollingElement.scrollTop === 0) {
|
||||
menu.classList.remove('bordered');
|
||||
}
|
||||
|
||||
previousScrollTop = document.scrollingElement.scrollTop;
|
||||
}, { passive: true });
|
||||
})();
|
@ -1,524 +0,0 @@
|
||||
/* CSS for UI elements (a.k.a. chrome) */
|
||||
@import 'variables.css';
|
||||
|
||||
::-webkit-scrollbar {
|
||||
background: var(--bg);
|
||||
}
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: var(--scrollbar);
|
||||
}
|
||||
|
||||
#searchresults a,
|
||||
a:visited,
|
||||
a > .hljs {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
#searchresults a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.content a {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
/* Menu Bar */
|
||||
|
||||
#menu-bar {
|
||||
position: -webkit-sticky;
|
||||
position: sticky;
|
||||
top: 0;
|
||||
padding: 0 15px;
|
||||
padding: 0;
|
||||
z-index: 101;
|
||||
width: 100%;
|
||||
/* margin: auto calc(0px - var(--page-padding)); */
|
||||
}
|
||||
#menu-bar > #menu-bar-sticky-container {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
background-color: var(--bg);
|
||||
border-bottom-color: var(--bg);
|
||||
border-bottom-width: 1px;
|
||||
border-bottom-style: solid;
|
||||
}
|
||||
.js #menu-bar > #menu-bar-sticky-container {
|
||||
transition: transform 0.3s;
|
||||
}
|
||||
#menu-bar.bordered > #menu-bar-sticky-container {
|
||||
border-bottom-color: var(--table-border-color);
|
||||
}
|
||||
#menu-bar i, #menu-bar .icon-button {
|
||||
position: relative;
|
||||
padding: 0 8px;
|
||||
z-index: 10;
|
||||
line-height: 50px;
|
||||
cursor: pointer;
|
||||
transition: color 0.5s;
|
||||
}
|
||||
@media only screen and (max-width: 420px) {
|
||||
#menu-bar i, #menu-bar .icon-button {
|
||||
padding: 0 5px;
|
||||
}
|
||||
}
|
||||
|
||||
.icon-button {
|
||||
border: none;
|
||||
background: none;
|
||||
padding: 0;
|
||||
color: inherit;
|
||||
}
|
||||
.icon-button i {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
#print-button {
|
||||
margin: 0 15px;
|
||||
}
|
||||
|
||||
html:not(.sidebar-visible) #menu-bar:not(:hover).folded > #menu-bar-sticky-container {
|
||||
transform: translateY(-60px);
|
||||
}
|
||||
|
||||
.left-buttons {
|
||||
display: flex;
|
||||
margin: 0 5px;
|
||||
}
|
||||
.no-js .left-buttons {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.menu-title {
|
||||
display: inline-block;
|
||||
font-weight: 200;
|
||||
font-size: 20px;
|
||||
line-height: 50px;
|
||||
text-align: center;
|
||||
margin: 0;
|
||||
flex: 1;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
.js .menu-title {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.menu-bar,
|
||||
.menu-bar:visited,
|
||||
.nav-chapters,
|
||||
.nav-chapters:visited,
|
||||
.mobile-nav-chapters,
|
||||
.mobile-nav-chapters:visited,
|
||||
.menu-bar .icon-button,
|
||||
.menu-bar a i {
|
||||
color: var(--icons);
|
||||
}
|
||||
|
||||
.menu-bar i:hover,
|
||||
.menu-bar .icon-button:hover,
|
||||
.nav-chapters:hover,
|
||||
.mobile-nav-chapters i:hover {
|
||||
color: var(--icons-hover);
|
||||
}
|
||||
|
||||
/* Nav Icons */
|
||||
|
||||
.nav-chapters {
|
||||
font-size: 2.5em;
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
|
||||
position: fixed;
|
||||
top: 50px; /* Height of menu-bar */
|
||||
bottom: 0;
|
||||
margin: 0;
|
||||
max-width: 150px;
|
||||
min-width: 90px;
|
||||
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-content: center;
|
||||
flex-direction: column;
|
||||
|
||||
transition: color 0.5s;
|
||||
}
|
||||
|
||||
.nav-chapters:hover { text-decoration: none; }
|
||||
|
||||
.nav-wrapper {
|
||||
margin-top: 50px;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.mobile-nav-chapters {
|
||||
font-size: 2.5em;
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
width: 90px;
|
||||
border-radius: 5px;
|
||||
background-color: var(--sidebar-bg);
|
||||
}
|
||||
|
||||
.previous {
|
||||
float: left;
|
||||
}
|
||||
|
||||
.next {
|
||||
float: right;
|
||||
right: 15px;
|
||||
}
|
||||
|
||||
@media only screen and (max-width: 1080px) {
|
||||
.nav-wide-wrapper { display: none; }
|
||||
.nav-wrapper { display: block; }
|
||||
}
|
||||
|
||||
@media only screen and (max-width: 1380px) {
|
||||
.sidebar-visible .nav-wide-wrapper { display: none; }
|
||||
.sidebar-visible .nav-wrapper { display: block; }
|
||||
}
|
||||
|
||||
/* Inline code */
|
||||
|
||||
:not(pre) > .hljs {
|
||||
display: inline-block;
|
||||
vertical-align: middle;
|
||||
padding: 0.1em 0.3em;
|
||||
border-radius: 3px;
|
||||
color: var(--inline-code-color);
|
||||
}
|
||||
|
||||
a:hover > .hljs {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
pre {
|
||||
position: relative;
|
||||
}
|
||||
pre > .buttons {
|
||||
position: absolute;
|
||||
z-index: 100;
|
||||
right: 5px;
|
||||
top: 5px;
|
||||
|
||||
color: var(--sidebar-fg);
|
||||
cursor: pointer;
|
||||
}
|
||||
pre > .buttons :hover {
|
||||
color: var(--sidebar-active);
|
||||
}
|
||||
pre > .buttons i {
|
||||
margin-left: 8px;
|
||||
}
|
||||
pre > .buttons button {
|
||||
color: inherit;
|
||||
background: transparent;
|
||||
border: none;
|
||||
cursor: inherit;
|
||||
}
|
||||
pre > .result {
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
/* Search */
|
||||
|
||||
#searchresults a {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
mark {
|
||||
border-radius: 2px;
|
||||
padding: 0 3px 1px 3px;
|
||||
margin: 0 -3px -1px -3px;
|
||||
background-color: var(--search-mark-bg);
|
||||
transition: background-color 300ms linear;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
mark.fade-out {
|
||||
background-color: rgba(0,0,0,0) !important;
|
||||
cursor: auto;
|
||||
}
|
||||
|
||||
.searchbar-outer {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
max-width: var(--content-max-width);
|
||||
}
|
||||
|
||||
#searchbar {
|
||||
width: 100%;
|
||||
margin: 5px auto 0px auto;
|
||||
padding: 10px 16px;
|
||||
transition: box-shadow 300ms ease-in-out;
|
||||
border: 1px solid var(--searchbar-border-color);
|
||||
border-radius: 3px;
|
||||
background-color: var(--searchbar-bg);
|
||||
color: var(--searchbar-fg);
|
||||
}
|
||||
#searchbar:focus,
|
||||
#searchbar.active {
|
||||
box-shadow: 0 0 3px var(--searchbar-shadow-color);
|
||||
}
|
||||
|
||||
.searchresults-header {
|
||||
font-weight: normal;
|
||||
font-size: 1em;
|
||||
padding: 18px 28px 0 28px;
|
||||
color: var(--searchresults-header-fg);
|
||||
}
|
||||
|
||||
.searchresults-outer {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
max-width: var(--content-max-width);
|
||||
border-bottom: 1px dashed var(--searchresults-border-color);
|
||||
}
|
||||
|
||||
ul#searchresults {
|
||||
list-style: none;
|
||||
padding-left: 28px;
|
||||
}
|
||||
ul#searchresults li {
|
||||
margin: 10px 0px;
|
||||
padding: 2px;
|
||||
border-radius: 2px;
|
||||
}
|
||||
ul#searchresults li.focus {
|
||||
background-color: var(--searchresults-li-bg);
|
||||
}
|
||||
ul#searchresults span.teaser {
|
||||
display: block;
|
||||
clear: both;
|
||||
margin: 5px 0 0 20px;
|
||||
font-size: 0.8em;
|
||||
}
|
||||
ul#searchresults span.teaser em {
|
||||
font-weight: bold;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
/* Sidebar */
|
||||
|
||||
.sidebar {
|
||||
position: fixed;
|
||||
left: 0;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
width: var(--sidebar-width);
|
||||
overflow-y: auto;
|
||||
font-size: 0.875em;
|
||||
box-sizing: border-box;
|
||||
-webkit-overflow-scrolling: touch;
|
||||
overscroll-behavior-y: contain;
|
||||
background-color: var(--sidebar-bg);
|
||||
color: var(--sidebar-fg);
|
||||
}
|
||||
.sidebar img {
|
||||
display: block;
|
||||
max-width: 70%;
|
||||
margin: 20px auto;
|
||||
}
|
||||
.js .sidebar {
|
||||
transition: transform 0.3s; /* Animation: slide away */
|
||||
}
|
||||
.sidebar code {
|
||||
line-height: 2em;
|
||||
}
|
||||
.sidebar-hidden .sidebar {
|
||||
transform: translateX(calc(0px - var(--sidebar-width)));
|
||||
}
|
||||
.sidebar::-webkit-scrollbar {
|
||||
background: var(--sidebar-bg);
|
||||
}
|
||||
.sidebar::-webkit-scrollbar-thumb {
|
||||
background: var(--scrollbar);
|
||||
}
|
||||
|
||||
.sidebar-visible .page-wrapper {
|
||||
transform: translateX(var(--sidebar-width));
|
||||
}
|
||||
@media only screen and (min-width: 620px) {
|
||||
.sidebar-visible .page-wrapper {
|
||||
transform: none;
|
||||
margin-left: var(--sidebar-width);
|
||||
}
|
||||
}
|
||||
|
||||
.chapter {
|
||||
list-style: none outside none;
|
||||
padding-left: 0;
|
||||
line-height: 2.2em;
|
||||
margin-top: 0;
|
||||
}
|
||||
.chapter li {
|
||||
color: var(--sidebar-non-existant);
|
||||
}
|
||||
.chapter li a {
|
||||
color: var(--sidebar-fg);
|
||||
display: block;
|
||||
padding: 0;
|
||||
text-decoration: none;
|
||||
padding-left: 25px;
|
||||
font-size: 13px;
|
||||
padding-top: 0.3em;
|
||||
padding-bottom: 0.3em;
|
||||
font-weight: normal;
|
||||
}
|
||||
.chapter li a strong {
|
||||
font-weight: normal;
|
||||
}
|
||||
.chapter li a:hover { color: var(--sidebar-active);
|
||||
background: #00A670; }
|
||||
.chapter li .active {
|
||||
/* Animate color change */
|
||||
color: var(--sidebar-active);
|
||||
background: #00A670;
|
||||
}
|
||||
.content a:hover {
|
||||
color: #000;
|
||||
background: none;
|
||||
}
|
||||
|
||||
.spacer {
|
||||
width: 100%;
|
||||
height: 3px;
|
||||
margin: 5px 0px;
|
||||
}
|
||||
.chapter .spacer {
|
||||
background-color: var(--sidebar-spacer);
|
||||
}
|
||||
|
||||
@media (-moz-touch-enabled: 1), (pointer: coarse) {
|
||||
/* .chapter li a { padding: 5px 0; } */
|
||||
.spacer { margin: 10px 0; }
|
||||
}
|
||||
|
||||
.section {
|
||||
list-style: none outside none;
|
||||
padding-left: 20px;
|
||||
line-height: 1.9em;
|
||||
}
|
||||
|
||||
/* Theme Menu Popup */
|
||||
|
||||
.theme-popup {
|
||||
position: absolute;
|
||||
left: 10px;
|
||||
top: 50px;
|
||||
z-index: 1000;
|
||||
border-radius: 4px;
|
||||
font-size: 0.7em;
|
||||
color: var(--fg);
|
||||
background: var(--theme-popup-bg);
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
display: none;
|
||||
}
|
||||
.theme-popup .default {
|
||||
color: var(--icons);
|
||||
}
|
||||
.theme-popup .theme {
|
||||
width: 100%;
|
||||
border: 0;
|
||||
margin: 0;
|
||||
padding: 2px 10px;
|
||||
line-height: 25px;
|
||||
white-space: nowrap;
|
||||
text-align: left;
|
||||
cursor: pointer;
|
||||
color: inherit;
|
||||
background: inherit;
|
||||
font-size: inherit;
|
||||
}
|
||||
.theme-popup .theme:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.theme-popup .theme:hover:first-child,
|
||||
.theme-popup .theme:hover:last-child {
|
||||
border-top-left-radius: inherit;
|
||||
border-top-right-radius: inherit;
|
||||
}
|
||||
|
||||
.content p {
|
||||
line-height: 1.6;
|
||||
margin-top: 0;
|
||||
padding: 0 28px;
|
||||
}
|
||||
|
||||
.content h1 {
|
||||
font-size: 25px;
|
||||
font-weight: 300;
|
||||
padding: 0 28px;
|
||||
padding-top: 0.5em;
|
||||
padding-bottom: 0.5em;
|
||||
margin-bottom: 21px;
|
||||
margin-top: 2em;
|
||||
border-top: 1px solid #e5e5e5;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
background-color: #fff;
|
||||
font-family: Poppins, sans-serif;
|
||||
|
||||
}
|
||||
|
||||
.content h2 {
|
||||
font-family: Poppins, sans-serif;
|
||||
font-size: 20px;
|
||||
font-weight: 300;
|
||||
margin-top: 2em;
|
||||
margin-bottom: 0;
|
||||
padding: 0 28px;
|
||||
padding-top: 1.2em;
|
||||
padding-bottom: 1.2em;
|
||||
}
|
||||
|
||||
.content h3, h4, h5 {
|
||||
font-size: 15px;
|
||||
margin-top: 2.5em;
|
||||
margin-bottom: 0.8em;
|
||||
padding: 0 28px;
|
||||
}
|
||||
|
||||
.content code {
|
||||
background-color: rgba(0,0,0,0.05);
|
||||
padding: 3px;
|
||||
border-radius: 3px;
|
||||
font-family: Consolas, Menlo, Monaco, "Lucida Console", "Liberation Mono", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace, serif;
|
||||
font-size: 13px;
|
||||
line-height: 1.5;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.language-ini.hljs,
|
||||
.language-manpage.hljs,
|
||||
.language-sh.hljs,
|
||||
.language-bash.hljs {
|
||||
background-color: #262B26;
|
||||
color: #fff;
|
||||
margin: 0;
|
||||
padding-top: 2em;
|
||||
padding-bottom: 2em;
|
||||
padding: 2em 28px;
|
||||
}
|
||||
|
||||
.content table {
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
.content ul {
|
||||
padding: 0 28px;
|
||||
padding-left: 43px;
|
||||
}
|
||||
.content ul li {
|
||||
line-height: 1.6;
|
||||
margin-top: 0;
|
||||
}
|
||||
.content ul p {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
.content pre {
|
||||
padding: 0 28px;
|
||||
}
|
@ -1,155 +0,0 @@
|
||||
/* Base styles and content styles */
|
||||
|
||||
@import 'variables.css';
|
||||
|
||||
html {
|
||||
font-family: Lato, 'Helvetica Neue', 'Arial', sans-serif;
|
||||
color: var(--fg);
|
||||
background-color: var(--bg);
|
||||
text-size-adjust: none;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
font-size: 1rem;
|
||||
overflow-x: hidden;
|
||||
font-family: Lato, 'Helvetica Neue', 'Arial', sans-serif;
|
||||
font-size: 16px;
|
||||
font-weight: 300;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
code {
|
||||
font-family: Consolas, Menlo, Monaco, "Lucida Console", "Liberation Mono", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace, serif;
|
||||
font-size: 13px; /* please adjust the ace font size accordingly in editor.js */
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.left { float: left; }
|
||||
.right { float: right; }
|
||||
.hidden { display: none; }
|
||||
.play-button.hidden { display: none; }
|
||||
|
||||
h1, h2, h3 { margin-top: 2.5em; }
|
||||
h4, h5 { margin-top: 2em; }
|
||||
|
||||
.header + .header h3,
|
||||
.header + .header h4,
|
||||
.header + .header h5 {
|
||||
margin-top: 1em;
|
||||
}
|
||||
|
||||
a.header:target h1:before,
|
||||
a.header:target h2:before,
|
||||
a.header:target h3:before,
|
||||
a.header:target h4:before {
|
||||
display: inline-block;
|
||||
content: "»";
|
||||
margin-left: -30px;
|
||||
width: 30px;
|
||||
}
|
||||
|
||||
.page {
|
||||
outline: 0;
|
||||
/* padding: 0 var(--page-padding); */
|
||||
}
|
||||
.page-wrapper {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
.js .page-wrapper {
|
||||
transition: margin-left 0.3s ease, transform 0.3s ease; /* Animation: slide away */
|
||||
}
|
||||
|
||||
.content {
|
||||
overflow-y: auto;
|
||||
|
||||
padding-bottom: 50px;
|
||||
}
|
||||
.content main {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
max-width: var(--content-max-width);
|
||||
}
|
||||
.content a:hover { text-decoration: underline; }
|
||||
.content img { max-width: 100%; }
|
||||
.content .header:link,
|
||||
.content .header:visited {
|
||||
color: var(--fg);
|
||||
}
|
||||
.content .header:link,
|
||||
.content .header:visited:hover {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
table {
|
||||
margin: 0 auto;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
table td {
|
||||
padding: 3px 20px;
|
||||
border: 1px var(--table-border-color) solid;
|
||||
}
|
||||
table thead {
|
||||
background: var(--table-header-bg);
|
||||
}
|
||||
table thead td {
|
||||
font-weight: 700;
|
||||
border: none;
|
||||
}
|
||||
table thead tr {
|
||||
border: 1px var(--table-header-bg) solid;
|
||||
}
|
||||
/* Alternate background colors for rows */
|
||||
table tbody tr:nth-child(2n) {
|
||||
background: var(--table-alternate-bg);
|
||||
}
|
||||
|
||||
|
||||
blockquote {
|
||||
margin: 20px 0;
|
||||
padding: 0 20px;
|
||||
color: var(--fg);
|
||||
background-color: var(--quote-bg);
|
||||
border-top: .1em solid var(--quote-border);
|
||||
border-bottom: .1em solid var(--quote-border);
|
||||
}
|
||||
|
||||
|
||||
:not(.footnote-definition) + .footnote-definition,
|
||||
.footnote-definition + :not(.footnote-definition) {
|
||||
margin-top: 2em;
|
||||
}
|
||||
.footnote-definition {
|
||||
font-size: 0.9em;
|
||||
margin: 0.5em 0;
|
||||
}
|
||||
.footnote-definition p {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
.tooltiptext {
|
||||
position: absolute;
|
||||
visibility: hidden;
|
||||
color: #fff;
|
||||
background-color: #333;
|
||||
transform: translateX(-50%); /* Center by moving tooltip 50% of its width left */
|
||||
left: -8px; /* Half of the width of the icon */
|
||||
top: -35px;
|
||||
font-size: 0.8em;
|
||||
text-align: center;
|
||||
border-radius: 6px;
|
||||
padding: 5px 8px;
|
||||
margin: 5px;
|
||||
z-index: 1000;
|
||||
}
|
||||
.tooltipped .tooltiptext {
|
||||
visibility: visible;
|
||||
}
|
||||
*:focus,
|
||||
*:active,
|
||||
*:hover {
|
||||
outline: none;
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
|
||||
#sidebar,
|
||||
#menu-bar,
|
||||
.nav-chapters,
|
||||
.mobile-nav-chapters {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#page-wrapper.page-wrapper {
|
||||
transform: none;
|
||||
margin-left: 0px;
|
||||
overflow-y: initial;
|
||||
}
|
||||
|
||||
#content {
|
||||
max-width: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.page {
|
||||
overflow-y: initial;
|
||||
}
|
||||
|
||||
code {
|
||||
background-color: #666666;
|
||||
border-radius: 5px;
|
||||
|
||||
/* Force background to be printed in Chrome */
|
||||
-webkit-print-color-adjust: exact;
|
||||
}
|
||||
|
||||
pre > .buttons {
|
||||
z-index: 2;
|
||||
}
|
||||
|
||||
a, a:visited, a:active, a:hover {
|
||||
color: #4183c4;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
page-break-inside: avoid;
|
||||
page-break-after: avoid;
|
||||
}
|
||||
|
||||
pre, code {
|
||||
page-break-inside: avoid;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
.fa {
|
||||
display: none !important;
|
||||
}
|
@ -1,210 +0,0 @@
|
||||
|
||||
/* Globals */
|
||||
|
||||
:root {
|
||||
--sidebar-width: 300px;
|
||||
/* --page-padding: 15px; */
|
||||
--content-max-width: 100%;
|
||||
}
|
||||
|
||||
/* Themes */
|
||||
|
||||
.ayu {
|
||||
--bg: hsl(210, 25%, 8%);
|
||||
--fg: #c5c5c5;
|
||||
|
||||
--sidebar-bg: #14191f;
|
||||
--sidebar-fg: #c8c9db;
|
||||
--sidebar-non-existant: #5c6773;
|
||||
--sidebar-active: #ffb454;
|
||||
--sidebar-spacer: #2d334f;
|
||||
|
||||
--scrollbar: var(--sidebar-fg);
|
||||
|
||||
--icons: #737480;
|
||||
--icons-hover: #b7b9cc;
|
||||
|
||||
--links: #0096cf;
|
||||
|
||||
--inline-code-color: #ffb454;
|
||||
|
||||
--theme-popup-bg: #14191f;
|
||||
--theme-popup-border: #5c6773;
|
||||
--theme-hover: #191f26;
|
||||
|
||||
--quote-bg: hsl(226, 15%, 17%);
|
||||
--quote-border: hsl(226, 15%, 22%);
|
||||
|
||||
--table-border-color: hsl(210, 25%, 13%);
|
||||
--table-header-bg: hsl(210, 25%, 28%);
|
||||
--table-alternate-bg: hsl(210, 25%, 11%);
|
||||
|
||||
--searchbar-border-color: #848484;
|
||||
--searchbar-bg: #424242;
|
||||
--searchbar-fg: #fff;
|
||||
--searchbar-shadow-color: #d4c89f;
|
||||
--searchresults-header-fg: #666;
|
||||
--searchresults-border-color: #888;
|
||||
--searchresults-li-bg: #252932;
|
||||
--search-mark-bg: #e3b171;
|
||||
}
|
||||
|
||||
.coal {
|
||||
--bg: hsl(200, 7%, 8%);
|
||||
--fg: #98a3ad;
|
||||
|
||||
--sidebar-bg: #292c2f;
|
||||
--sidebar-fg: #fff;
|
||||
--sidebar-non-existant: #505254;
|
||||
--sidebar-active: #fff;
|
||||
--sidebar-spacer: #393939;
|
||||
|
||||
--scrollbar: var(--sidebar-fg);
|
||||
|
||||
--icons: #43484d;
|
||||
--icons-hover: #b3c0cc;
|
||||
|
||||
--links: #2b79a2;
|
||||
|
||||
--inline-code-color: #c5c8c6;;
|
||||
|
||||
--theme-popup-bg: #141617;
|
||||
--theme-popup-border: #43484d;
|
||||
--theme-hover: #1f2124;
|
||||
|
||||
--quote-bg: hsl(234, 21%, 18%);
|
||||
--quote-border: hsl(234, 21%, 23%);
|
||||
|
||||
--table-border-color: hsl(200, 7%, 13%);
|
||||
--table-header-bg: hsl(200, 7%, 28%);
|
||||
--table-alternate-bg: hsl(200, 7%, 11%);
|
||||
|
||||
--searchbar-border-color: #aaa;
|
||||
--searchbar-bg: #b7b7b7;
|
||||
--searchbar-fg: #000;
|
||||
--searchbar-shadow-color: #aaa;
|
||||
--searchresults-header-fg: #666;
|
||||
--searchresults-border-color: #98a3ad;
|
||||
--searchresults-li-bg: #2b2b2f;
|
||||
--search-mark-bg: #355c7d;
|
||||
}
|
||||
|
||||
.light {
|
||||
--bg: #f7f7f7;
|
||||
--fg: #333333;
|
||||
|
||||
--sidebar-bg: #050505;
|
||||
--sidebar-fg: #fff;
|
||||
--sidebar-non-existant: #aaaaaa;
|
||||
--sidebar-active: #fff;
|
||||
--sidebar-spacer: #f4f4f4;
|
||||
|
||||
--scrollbar: #cccccc;
|
||||
|
||||
--icons: #cccccc;
|
||||
--icons-hover: #333333;
|
||||
|
||||
--links: #000;
|
||||
|
||||
--inline-code-color: #6e6b5e;
|
||||
|
||||
--theme-popup-bg: #fafafa;
|
||||
--theme-popup-border: #cccccc;
|
||||
--theme-hover: #e6e6e6;
|
||||
|
||||
--quote-bg: hsl(197, 37%, 96%);
|
||||
--quote-border: hsl(197, 37%, 91%);
|
||||
|
||||
--table-border-color: hsl(0, 0%, 95%);
|
||||
--table-header-bg: hsl(0, 0%, 80%);
|
||||
--table-alternate-bg: hsl(0, 0%, 97%);
|
||||
|
||||
--searchbar-border-color: #aaa;
|
||||
--searchbar-bg: #fafafa;
|
||||
--searchbar-fg: #000;
|
||||
--searchbar-shadow-color: #aaa;
|
||||
--searchresults-header-fg: #666;
|
||||
--searchresults-border-color: #888;
|
||||
--searchresults-li-bg: #e4f2fe;
|
||||
--search-mark-bg: #a2cff5;
|
||||
}
|
||||
|
||||
.navy {
|
||||
--bg: hsl(226, 23%, 11%);
|
||||
--fg: #bcbdd0;
|
||||
|
||||
--sidebar-bg: #282d3f;
|
||||
--sidebar-fg: #c8c9db;
|
||||
--sidebar-non-existant: #505274;
|
||||
--sidebar-active: #2b79a2;
|
||||
--sidebar-spacer: #2d334f;
|
||||
|
||||
--scrollbar: var(--sidebar-fg);
|
||||
|
||||
--icons: #737480;
|
||||
--icons-hover: #b7b9cc;
|
||||
|
||||
--links: #2b79a2;
|
||||
|
||||
--inline-code-color: #c5c8c6;;
|
||||
|
||||
--theme-popup-bg: #161923;
|
||||
--theme-popup-border: #737480;
|
||||
--theme-hover: #282e40;
|
||||
|
||||
--quote-bg: hsl(226, 15%, 17%);
|
||||
--quote-border: hsl(226, 15%, 22%);
|
||||
|
||||
--table-border-color: hsl(226, 23%, 16%);
|
||||
--table-header-bg: hsl(226, 23%, 31%);
|
||||
--table-alternate-bg: hsl(226, 23%, 14%);
|
||||
|
||||
--searchbar-border-color: #aaa;
|
||||
--searchbar-bg: #aeaec6;
|
||||
--searchbar-fg: #000;
|
||||
--searchbar-shadow-color: #aaa;
|
||||
--searchresults-header-fg: #5f5f71;
|
||||
--searchresults-border-color: #5c5c68;
|
||||
--searchresults-li-bg: #242430;
|
||||
--search-mark-bg: #a2cff5;
|
||||
}
|
||||
|
||||
.rust {
|
||||
--bg: hsl(60, 9%, 87%);
|
||||
--fg: #262625;
|
||||
|
||||
--sidebar-bg: #3b2e2a;
|
||||
--sidebar-fg: #c8c9db;
|
||||
--sidebar-non-existant: #505254;
|
||||
--sidebar-active: #e69f67;
|
||||
--sidebar-spacer: #45373a;
|
||||
|
||||
--scrollbar: var(--sidebar-fg);
|
||||
|
||||
--icons: #737480;
|
||||
--icons-hover: #262625;
|
||||
|
||||
--links: #2b79a2;
|
||||
|
||||
--inline-code-color: #6e6b5e;
|
||||
|
||||
--theme-popup-bg: #e1e1db;
|
||||
--theme-popup-border: #b38f6b;
|
||||
--theme-hover: #99908a;
|
||||
|
||||
--quote-bg: hsl(60, 5%, 75%);
|
||||
--quote-border: hsl(60, 5%, 70%);
|
||||
|
||||
--table-border-color: hsl(60, 9%, 82%);
|
||||
--table-header-bg: #b3a497;
|
||||
--table-alternate-bg: hsl(60, 9%, 84%);
|
||||
|
||||
--searchbar-border-color: #aaa;
|
||||
--searchbar-bg: #fafafa;
|
||||
--searchbar-fg: #000;
|
||||
--searchbar-shadow-color: #aaa;
|
||||
--searchresults-header-fg: #666;
|
||||
--searchresults-border-color: #888;
|
||||
--searchresults-li-bg: #dec2a2;
|
||||
--search-mark-bg: #e69f67;
|
||||
}
|
Binary file not shown.
Before Width: | Height: | Size: 5.5 KiB |
@ -1,69 +0,0 @@
|
||||
/* Base16 Atelier Dune Light - Theme */
|
||||
/* by Bram de Haan (http://atelierbram.github.io/syntax-highlighting/atelier-schemes/dune) */
|
||||
/* Original Base16 color scheme by Chris Kempson (https://github.com/chriskempson/base16) */
|
||||
|
||||
/* Atelier-Dune Comment */
|
||||
.hljs-comment,
|
||||
.hljs-quote {
|
||||
color: #AAA;
|
||||
}
|
||||
|
||||
/* Atelier-Dune Red */
|
||||
.hljs-variable,
|
||||
.hljs-template-variable,
|
||||
.hljs-attribute,
|
||||
.hljs-tag,
|
||||
.hljs-name,
|
||||
.hljs-regexp,
|
||||
.hljs-link,
|
||||
.hljs-name,
|
||||
.hljs-selector-id,
|
||||
.hljs-selector-class {
|
||||
color: #f92672;
|
||||
}
|
||||
|
||||
/* Atelier-Dune Orange */
|
||||
.hljs-number,
|
||||
.hljs-meta,
|
||||
.hljs-built_in,
|
||||
.hljs-builtin-name,
|
||||
.hljs-literal,
|
||||
.hljs-type,
|
||||
.hljs-params {
|
||||
color: #f6aa11;
|
||||
}
|
||||
|
||||
/* Atelier-Dune Green */
|
||||
.hljs-string,
|
||||
.hljs-symbol,
|
||||
.hljs-bullet {
|
||||
color: #60ac39;
|
||||
}
|
||||
|
||||
/* Atelier-Dune Blue */
|
||||
.hljs-title,
|
||||
.hljs-section {
|
||||
color: #6684e1;
|
||||
}
|
||||
|
||||
/* Atelier-Dune Purple */
|
||||
.hljs-keyword,
|
||||
.hljs-selector-tag {
|
||||
color: #b854d4;
|
||||
}
|
||||
|
||||
.hljs {
|
||||
display: block;
|
||||
overflow-x: auto;
|
||||
background: #f1f1f1;
|
||||
color: #6e6b5e;
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||
.hljs-emphasis {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.hljs-strong {
|
||||
font-weight: bold;
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -1,231 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="{{ language }}" class="sidebar-visible no-js">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>{{ title }}</title>
|
||||
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
|
||||
<meta name="description" content="{{ description }}">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff" />
|
||||
|
||||
<link rel="shortcut icon" href="{{ path_to_root }}{{ favicon }}">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}css/variables.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}css/general.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}css/chrome.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}css/print.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="{{ path_to_root }}FontAwesome/css/font-awesome.css">
|
||||
<link href="https://fonts.googleapis.com/css?family=Lato:300,400|Poppins:300,400" rel="stylesheet">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" href="{{ path_to_root }}highlight.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}tomorrow-night.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}ayu-highlight.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
{{#each additional_css}}
|
||||
<link rel="stylesheet" href="{{ ../path_to_root }}{{ this }}">
|
||||
{{/each}}
|
||||
|
||||
{{#if mathjax_support}}
|
||||
<!-- MathJax -->
|
||||
<script async type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
|
||||
{{/if}}
|
||||
</head>
|
||||
<body class="light">
|
||||
<!-- Provide site root to javascript -->
|
||||
<script type="text/javascript">var path_to_root = "{{ path_to_root }}";</script>
|
||||
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script type="text/javascript">
|
||||
try {
|
||||
var theme = localStorage.getItem('mdbook-theme');
|
||||
var sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script type="text/javascript">
|
||||
var theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = 'light'; }
|
||||
document.body.className = theme;
|
||||
document.querySelector('html').className = theme + ' js';
|
||||
</script>
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script type="text/javascript">
|
||||
var html = document.querySelector('html');
|
||||
var sidebar = 'hidden';
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
}
|
||||
html.classList.remove('sidebar-visible');
|
||||
html.classList.add("sidebar-" + sidebar);
|
||||
</script>
|
||||
|
||||
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<img
|
||||
src="data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSI3NTQiIGhlaWdodD0iMTUxIiB2aWV3Qm94PSIwIDAgNzU0IDE1MSI+PGcgZmlsbD0iI0ZGRiIgZmlsbC1ydWxlPSJldmVub2RkIj48cGF0aCBkPSJNMjY4IDMxaDE2LjQxOXY3NC4yNTVIMzQ5TDMzNS41NjYgMTIwSDI2OHpNNjQuMDAyIDg0LjExM0gyOC4xMjV2LS4wMWMtLjIzNC4wMDctLjQ2OC4wMS0uNzAzLjAxLTE0LjM2OCAwLTI2LjAxNi0xMS44OS0yNi4wMTYtMjYuNTU3QzEuNDA2IDQyLjg5IDEzLjA1NCAzMSAyNy40MjIgMzFjLjIzNSAwIC40Ny4wMDMuNzAzLjAxVjMxSDkwTDcxLjcxOSA0OC4yMjZIMjcuNDA1Yy01LjAzOSAwLTkuMTI0IDQuMTc3LTkuMTI0IDkuMzMgMCA1LjE1NCA0LjA4NSA5LjMzMSA5LjEyNCA5LjMzMWgzOC42ODl2LjA4NkM3OS40NzUgNjguMDcgOTAgNzkuNTAyIDkwIDkzLjQ0MyA5MCAxMDguMTEgNzguMzUyIDEyMCA2My45ODQgMTIwYy0uNzEgMC0xLjQxMy0uMDI5LTIuMTA5LS4wODZWMTIwSDBsMTkuNjg4LTE3LjIyNmg0NC4zMTRjNS4wMzggMCA5LjEyMy00LjE3NyA5LjEyMy05LjMzIDAtNS4xNTQtNC4wODUtOS4zMzEtOS4xMjMtOS4zMzF6TTE1MC40NjkgMzEuMDE1VjMxaDU5LjA2MnYuMDE1YzguMzcyLjM2NiAxNS4wOTYgNy4yMyAxNS40NTQgMTUuNzc1SDIyNXY1NS45ODRoLS4wMTVjLjAxLjIzOC4wMTUuNDc3LjAxNS43MTggMCA4Ljg3Ny02Ljg2MyAxNi4xMTctMTUuNDY5IDE2LjQ5M1YxMjBIMTUwLjQ3di0uMDE1Yy04LjYwNi0uMzc2LTE1LjQ2OS03LjYxNi0xNS40NjktMTYuNDkzIDAtLjI0LjAwNS0uNDguMDE1LS43MThIMTM1VjQ2Ljc5aC4wMTVjLjM1OC04LjU0NiA3LjA4Mi0xNS40MSAxNS40NTQtMTUuNzc1ek0xNjEuNTQzIDQ2LjhjLTUuMjMzLjIzLTkuNDM1IDQuNTQ3LTkuNjU5IDkuOTIzaC0uMDA5djM1LjIxNmguMDFjLS4wMDcuMTUtLjAxLjMtLjAxLjQ1MSAwIDUuNTg0IDQuMjkgMTAuMTM4IDkuNjY4IDEwLjM3NXYuMDFoMzYuOTE0di0uMDFjNS4zNzgtLjIzNyA5LjY2OC00Ljc5MSA5LjY2OC0xMC4zNzUgMC0uMTUxLS4wMDMtLjMwMi0uMDEtLjQ1MWguMDFWNTYuNzIzaC0uMDFjLS4yMjMtNS4zNzYtNC40MjUtOS42OTMtOS42NTgtOS45MjN2LS4wMWgtMzYuOTE0di4wMXpNNDA5Ljg3NSA1NS40MDNWNzIuNjNoNTYuMjVWNTUuNDAzYzAtNS41NS00LjQwNy0xMC4wNDgtOS44NDQtMTAuMDQ4SDQxOS43MmMtNS40MzcgMC05Ljg0NCA0LjQ5OS05Ljg0NCAxMC4wNDh6TTQ2Ni4xMjUgMTIwVjg1LjU0OGgtNTYuMjVWMTIwSDM5M1Y0OC4yMjZoLjAxNWExNy4xNCAxNy4xNCAwIDAgMS0uMDE1LS43MThDMzkzIDM4LjM5MSA0MDAuMjQgMzEgNDA5LjE3MiAzMWMuMjM1IDAgLjQ3LjAwNS43MDMuMDE1VjMxaDU3LjY1NnYuMDE1YzguNjA2LjM3NiAxNS40NjkgNy42MTYgMTUuNDY5IDE2LjQ5MyAwIC4yNC0uMDA1LjQ4LS4wMTUuNzE4SDQ4M1YxMjBoLTE2Ljg3NXpNNjc3Ljg3NSA1NS40MDNWNzIuNjNoNTYuMjVWNTUuNDAzYzAtNS41NS00LjQwNy0xMC4wNDgtOS44NDQtMTAuMDQ4SDY4Ny43MmMtNS40MzcgMC05Ljg0NCA0LjQ5OS05Ljg0NCAxMC4wNDh6TTczNC4xMjUgMTIwVjg1LjU0OGgtNTYuMjVWMTIwSDY2MVY0OC4yMjZoLjAxNWExNy4xNCAxNy4xNCAwIDAgMS0uMDE1LS43MThDNjYxIDM4LjM5MSA2NjguMjQgMzEgNjc3LjE3MiAzMWMuMjM1IDAgLjQ3LjAwNS43MDMuMDE1VjMxaDU3LjY1NnYuMDE1YzguNjA2LjM3NiAxNS40NjkgNy42MTYgMTUuNDY5IDE2LjQ5MyAwIC4yNC0uMDA1LjQ4LS4wMTUuNzE4SDc1MVYxMjBoLTE2Ljg3NXpNNjAxLjEyNSA5OC40NTdWMzFINjE4djg5aC0xNi44NzV2LS4xNDlsLTU2LjI1LTY1Ljg4M1YxMjBINTI4VjMxaDE2Ljg3NXoiLz48L2c+PC9zdmc+" width="150" height="30" alt="Solana">
|
||||
{{#toc}}{{/toc}}
|
||||
</nav>
|
||||
|
||||
<div id="page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
{{> header}}
|
||||
<div id="menu-bar" class="menu-bar">
|
||||
<div id="menu-bar-sticky-container">
|
||||
<div class="left-buttons">
|
||||
<button id="sidebar-toggle" class="icon-button" type="button" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
|
||||
<i class="fa fa-bars"></i>
|
||||
</button>
|
||||
{{!-- <button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
|
||||
<i class="fa fa-paint-brush"></i>
|
||||
</button> --}}
|
||||
|
||||
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="light">Light <span class="default">(default)</span></button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
|
||||
</ul>
|
||||
{{#if search_enabled}}
|
||||
<button id="search-toggle" class="icon-button" type="button" title="Search. (Shortkey: s)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="S" aria-controls="searchbar">
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="{{ path_to_root }}print.html" title="Print this book" aria-label="Print this book">
|
||||
<i id="print-button" class="fa fa-print"></i>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{#if search_enabled}}
|
||||
<div id="search-wrapper" class="hidden">
|
||||
<form id="searchbar-outer" class="searchbar-outer">
|
||||
<input type="search" name="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
|
||||
</form>
|
||||
<div id="searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
{{/if}}
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script type="text/javascript">
|
||||
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
{{{ content }}}
|
||||
</main>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
{{#if livereload}}
|
||||
<!-- Livereload script (if served using the cli tool) -->
|
||||
<script type="text/javascript">
|
||||
var socket = new WebSocket("{{{livereload}}}");
|
||||
socket.onmessage = function (event) {
|
||||
if (event.data === "reload") {
|
||||
socket.close();
|
||||
location.reload(true); // force reload from server (not from cache)
|
||||
}
|
||||
};
|
||||
|
||||
window.onbeforeunload = function() {
|
||||
socket.close();
|
||||
}
|
||||
</script>
|
||||
{{/if}}
|
||||
|
||||
{{#if google_analytics}}
|
||||
<!-- Google Analytics Tag -->
|
||||
<script type="text/javascript">
|
||||
var localAddrs = ["localhost", "127.0.0.1", ""];
|
||||
|
||||
// make sure we don't activate google analytics if the developer is
|
||||
// inspecting the book locally...
|
||||
if (localAddrs.indexOf(document.location.hostname) === -1) {
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
|
||||
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
|
||||
|
||||
ga('create', '{{google_analytics}}', 'auto');
|
||||
ga('send', 'pageview');
|
||||
}
|
||||
</script>
|
||||
{{/if}}
|
||||
|
||||
{{#if playpen_js}}
|
||||
<script src="{{ path_to_root }}ace.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}editor.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}mode-rust.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}theme-dawn.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}theme-tomorrow_night.js" type="text/javascript" charset="utf-8"></script>
|
||||
{{/if}}
|
||||
|
||||
{{#if search_js}}
|
||||
<script src="{{ path_to_root }}elasticlunr.min.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}mark.min.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}searcher.js" type="text/javascript" charset="utf-8"></script>
|
||||
{{/if}}
|
||||
|
||||
<script src="{{ path_to_root }}clipboard.min.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}highlight.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}book.js" type="text/javascript" charset="utf-8"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
{{#each additional_js}}
|
||||
<script type="text/javascript" src="{{ ../path_to_root }}{{this}}"></script>
|
||||
{{/each}}
|
||||
|
||||
{{#if is_print}}
|
||||
{{#if mathjax_support}}
|
||||
<script type="text/javascript">
|
||||
window.addEventListener('load', function() {
|
||||
MathJax.Hub.Register.StartupHook('End', function() {
|
||||
window.setTimeout(window.print, 100);
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{{else}}
|
||||
<script type="text/javascript">
|
||||
window.addEventListener('load', function() {
|
||||
window.setTimeout(window.print, 100);
|
||||
});
|
||||
</script>
|
||||
{{/if}}
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.0.0"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.0" }
|
||||
solana-logger = { path = "../logger", version = "1.0.0" }
|
||||
solana-perf = { path = "../perf", version = "1.0.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha_cuda"
|
@ -1,280 +0,0 @@
|
||||
// Module used by validators to approve storage mining proofs in parallel using the GPU
|
||||
|
||||
use solana_chacha::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_perf::perf_libs;
|
||||
use solana_sdk::hash::Hash;
|
||||
use std::io;
|
||||
use std::mem::size_of;
|
||||
use std::sync::Arc;
|
||||
|
||||
// Encrypt a file with multiple starting IV states, determined by ivecs.len()
|
||||
//
|
||||
// Then sample each block at the offsets provided by samples argument with sha256
|
||||
// and return the vec of sha states
|
||||
pub fn chacha_cbc_encrypt_file_many_keys(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
segment: u64,
|
||||
slots_per_segment: u64,
|
||||
ivecs: &mut [u8],
|
||||
samples: &[u64],
|
||||
) -> io::Result<Vec<Hash>> {
|
||||
let api = perf_libs::api().expect("no perf libs");
|
||||
if ivecs.len() % CHACHA_BLOCK_SIZE != 0 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"bad IV length({}) not divisible by {} ",
|
||||
ivecs.len(),
|
||||
CHACHA_BLOCK_SIZE,
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
const BUFFER_SIZE: usize = 8 * 1024;
|
||||
let mut buffer = [0; BUFFER_SIZE];
|
||||
let num_keys = ivecs.len() / CHACHA_BLOCK_SIZE;
|
||||
let mut sha_states = vec![0; num_keys * size_of::<Hash>()];
|
||||
let mut int_sha_states = vec![0; num_keys * 112];
|
||||
let keys: Vec<u8> = vec![0; num_keys * CHACHA_KEY_SIZE]; // keys not used ATM, uniqueness comes from IV
|
||||
let mut current_slot = segment * slots_per_segment;
|
||||
let mut start_index = 0;
|
||||
let start_slot = current_slot;
|
||||
let mut total_size = 0;
|
||||
let mut time: f32 = 0.0;
|
||||
unsafe {
|
||||
(api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32);
|
||||
}
|
||||
loop {
|
||||
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
|
||||
Ok((last_index, mut size)) => {
|
||||
debug!(
|
||||
"chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}",
|
||||
segment,
|
||||
last_index.saturating_sub(start_index),
|
||||
size
|
||||
);
|
||||
|
||||
if size == 0 {
|
||||
if current_slot.saturating_sub(start_slot) < slots_per_segment {
|
||||
current_slot += 1;
|
||||
start_index = 0;
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if size < BUFFER_SIZE {
|
||||
// round to the nearest key_size boundary
|
||||
size = (size + CHACHA_KEY_SIZE - 1) & !(CHACHA_KEY_SIZE - 1);
|
||||
}
|
||||
|
||||
unsafe {
|
||||
(api.chacha_cbc_encrypt_many_sample)(
|
||||
buffer[..size].as_ptr(),
|
||||
int_sha_states.as_mut_ptr(),
|
||||
size,
|
||||
keys.as_ptr(),
|
||||
ivecs.as_mut_ptr(),
|
||||
num_keys as u32,
|
||||
samples.as_ptr(),
|
||||
samples.len() as u32,
|
||||
total_size,
|
||||
&mut time,
|
||||
);
|
||||
}
|
||||
|
||||
total_size += size as u64;
|
||||
start_index = last_index + 1;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Error encrypting file: {:?}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
unsafe {
|
||||
(api.chacha_end_sha_state)(
|
||||
int_sha_states.as_ptr(),
|
||||
sha_states.as_mut_ptr(),
|
||||
num_keys as u32,
|
||||
);
|
||||
}
|
||||
let mut res = Vec::new();
|
||||
for x in 0..num_keys {
|
||||
let start = x * size_of::<Hash>();
|
||||
let end = start + size_of::<Hash>();
|
||||
res.push(Hash::new(&sha_states[start..end]));
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_archiver_utils::sample_file;
|
||||
use solana_chacha::chacha::chacha_cbc_encrypt_ledger;
|
||||
use solana_ledger::entry::create_ticks;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::clock::DEFAULT_SLOTS_PER_SEGMENT;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use std::fs::{remove_dir_all, remove_file};
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_file_many_keys_single() {
|
||||
solana_logger::setup();
|
||||
if perf_libs::api().is_none() {
|
||||
info!("perf-libs unavailable, skipped");
|
||||
return;
|
||||
}
|
||||
|
||||
let slots_per_segment = 32;
|
||||
let entries = create_ticks(slots_per_segment, 0, Hash::default());
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ticks_per_slot = 16;
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
blockstore
|
||||
.write_entries(
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
ticks_per_slot,
|
||||
Some(0),
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let out_path = Path::new("test_chacha_encrypt_file_many_keys_single_output.txt.enc");
|
||||
|
||||
let samples = [0];
|
||||
let mut ivecs = hex!(
|
||||
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
|
||||
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
|
||||
);
|
||||
|
||||
let mut cpu_iv = ivecs.clone();
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blockstore,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
out_path,
|
||||
&mut cpu_iv,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let ref_hash = sample_file(&out_path, &samples).unwrap();
|
||||
|
||||
let hashes = chacha_cbc_encrypt_file_many_keys(
|
||||
&blockstore,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
&mut ivecs,
|
||||
&samples,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(hashes[0], ref_hash);
|
||||
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
let _ignored = remove_file(out_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_file_many_keys_multiple_keys() {
|
||||
solana_logger::setup();
|
||||
if perf_libs::api().is_none() {
|
||||
info!("perf-libs unavailable, skipped");
|
||||
return;
|
||||
}
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ticks_per_slot = 90;
|
||||
let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
blockstore
|
||||
.write_entries(
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
ticks_per_slot,
|
||||
Some(0),
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let out_path = Path::new("test_chacha_encrypt_file_many_keys_multiple_output.txt.enc");
|
||||
|
||||
let samples = [0, 1, 3, 4, 5, 150];
|
||||
let mut ivecs = Vec::new();
|
||||
let mut ref_hashes: Vec<Hash> = vec![];
|
||||
for i in 0..2 {
|
||||
let mut ivec = hex!(
|
||||
"abc123abc123abc123abc123abc123abc123abababababababababababababab
|
||||
abc123abc123abc123abc123abc123abc123abababababababababababababab"
|
||||
);
|
||||
ivec[0] = i;
|
||||
ivecs.extend(ivec.clone().iter());
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blockstore.clone(),
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
out_path,
|
||||
&mut ivec,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
ref_hashes.push(sample_file(&out_path, &samples).unwrap());
|
||||
info!(
|
||||
"ivec: {:?} hash: {:?} ivecs: {:?}",
|
||||
ivec.to_vec(),
|
||||
ref_hashes.last(),
|
||||
ivecs
|
||||
);
|
||||
}
|
||||
|
||||
let hashes = chacha_cbc_encrypt_file_many_keys(
|
||||
&blockstore,
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
&mut ivecs,
|
||||
&samples,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(hashes, ref_hashes);
|
||||
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
let _ignored = remove_file(out_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_file_many_keys_bad_key_length() {
|
||||
solana_logger::setup();
|
||||
if perf_libs::api().is_none() {
|
||||
info!("perf-libs unavailable, skipped");
|
||||
return;
|
||||
}
|
||||
|
||||
let mut keys = hex!("abc123");
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let samples = [0];
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
assert!(chacha_cbc_encrypt_file_many_keys(
|
||||
&blockstore,
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
&mut keys,
|
||||
&samples,
|
||||
)
|
||||
.is_err());
|
||||
}
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate hex_literal;
|
||||
|
||||
pub mod chacha_cuda;
|
@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.0.0"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0.49"
|
@ -1,8 +0,0 @@
|
||||
extern crate cc;
|
||||
|
||||
fn main() {
|
||||
cc::Build::new()
|
||||
.file("cpu-crypt/chacha20_core.c")
|
||||
.file("cpu-crypt/chacha_cbc.c")
|
||||
.compile("libcpu-crypt");
|
||||
}
|
1
chacha-sys/cpu-crypt/.gitignore
vendored
1
chacha-sys/cpu-crypt/.gitignore
vendored
@ -1 +0,0 @@
|
||||
release/
|
@ -1,25 +0,0 @@
|
||||
V:=debug
|
||||
|
||||
LIB:=cpu-crypt
|
||||
|
||||
CFLAGS_common:=-Wall -Werror -pedantic -fPIC
|
||||
CFLAGS_release:=-march=native -O3 $(CFLAGS_common)
|
||||
CFLAGS_debug:=-g $(CFLAGS_common)
|
||||
CFLAGS:=$(CFLAGS_$V)
|
||||
|
||||
all: $V/lib$(LIB).a
|
||||
|
||||
$V/chacha20_core.o: chacha20_core.c chacha.h
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
|
||||
$V/chacha_cbc.o: chacha_cbc.c chacha.h
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
|
||||
$V/lib$(LIB).a: $V/chacha20_core.o $V/chacha_cbc.o
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
.PHONY:clean
|
||||
clean:
|
||||
rm -rf $V
|
@ -1,35 +0,0 @@
|
||||
#ifndef HEADER_CHACHA_H
|
||||
# define HEADER_CHACHA_H
|
||||
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
# include <stddef.h>
|
||||
# ifdef __cplusplus
|
||||
extern "C" {
|
||||
# endif
|
||||
|
||||
typedef unsigned int u32;
|
||||
|
||||
#define CHACHA_KEY_SIZE 32
|
||||
#define CHACHA_NONCE_SIZE 12
|
||||
#define CHACHA_BLOCK_SIZE 64
|
||||
#define CHACHA_ROUNDS 500
|
||||
|
||||
void chacha20_encrypt(const u32 input[16],
|
||||
unsigned char output[64],
|
||||
int num_rounds);
|
||||
|
||||
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||
const uint8_t key[CHACHA_KEY_SIZE], const uint8_t nonce[CHACHA_NONCE_SIZE],
|
||||
uint32_t counter);
|
||||
|
||||
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
||||
uint32_t len, const uint8_t* key,
|
||||
unsigned char* ivec);
|
||||
|
||||
|
||||
# ifdef __cplusplus
|
||||
}
|
||||
# endif
|
||||
|
||||
#endif
|
@ -1,102 +0,0 @@
|
||||
#include "chacha.h"
|
||||
|
||||
#define ROTL32(v, n) (((v) << (n)) | ((v) >> (32 - (n))))
|
||||
|
||||
#define ROTATE(v, c) ROTL32((v), (c))
|
||||
|
||||
#define XOR(v, w) ((v) ^ (w))
|
||||
|
||||
#define PLUS(x, y) ((x) + (y))
|
||||
|
||||
#define U32TO8_LITTLE(p, v) \
|
||||
{ (p)[0] = ((v) ) & 0xff; (p)[1] = ((v) >> 8) & 0xff; \
|
||||
(p)[2] = ((v) >> 16) & 0xff; (p)[3] = ((v) >> 24) & 0xff; }
|
||||
|
||||
#define U8TO32_LITTLE(p) \
|
||||
(((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
|
||||
((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
|
||||
|
||||
#define QUARTERROUND(a,b,c,d) \
|
||||
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \
|
||||
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \
|
||||
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \
|
||||
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7);
|
||||
|
||||
// sigma contains the ChaCha constants, which happen to be an ASCII string.
|
||||
static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3',
|
||||
'2', '-', 'b', 'y', 't', 'e', ' ', 'k' };
|
||||
|
||||
void chacha20_encrypt(const u32 input[16],
|
||||
unsigned char output[64],
|
||||
int num_rounds)
|
||||
{
|
||||
u32 x[16];
|
||||
int i;
|
||||
memcpy(x, input, sizeof(u32) * 16);
|
||||
for (i = num_rounds; i > 0; i -= 2) {
|
||||
QUARTERROUND( 0, 4, 8,12)
|
||||
QUARTERROUND( 1, 5, 9,13)
|
||||
QUARTERROUND( 2, 6,10,14)
|
||||
QUARTERROUND( 3, 7,11,15)
|
||||
QUARTERROUND( 0, 5,10,15)
|
||||
QUARTERROUND( 1, 6,11,12)
|
||||
QUARTERROUND( 2, 7, 8,13)
|
||||
QUARTERROUND( 3, 4, 9,14)
|
||||
}
|
||||
for (i = 0; i < 16; ++i) {
|
||||
x[i] = PLUS(x[i], input[i]);
|
||||
}
|
||||
for (i = 0; i < 16; ++i) {
|
||||
U32TO8_LITTLE(output + 4 * i, x[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||
const uint8_t key[CHACHA_KEY_SIZE],
|
||||
const uint8_t nonce[CHACHA_NONCE_SIZE],
|
||||
uint32_t counter)
|
||||
{
|
||||
uint32_t input[16];
|
||||
uint8_t buf[64];
|
||||
size_t todo, i;
|
||||
|
||||
input[0] = U8TO32_LITTLE(sigma + 0);
|
||||
input[1] = U8TO32_LITTLE(sigma + 4);
|
||||
input[2] = U8TO32_LITTLE(sigma + 8);
|
||||
input[3] = U8TO32_LITTLE(sigma + 12);
|
||||
|
||||
input[4] = U8TO32_LITTLE(key + 0);
|
||||
input[5] = U8TO32_LITTLE(key + 4);
|
||||
input[6] = U8TO32_LITTLE(key + 8);
|
||||
input[7] = U8TO32_LITTLE(key + 12);
|
||||
|
||||
input[8] = U8TO32_LITTLE(key + 16);
|
||||
input[9] = U8TO32_LITTLE(key + 20);
|
||||
input[10] = U8TO32_LITTLE(key + 24);
|
||||
input[11] = U8TO32_LITTLE(key + 28);
|
||||
|
||||
input[12] = counter;
|
||||
input[13] = U8TO32_LITTLE(nonce + 0);
|
||||
input[14] = U8TO32_LITTLE(nonce + 4);
|
||||
input[15] = U8TO32_LITTLE(nonce + 8);
|
||||
|
||||
while (in_len > 0) {
|
||||
todo = sizeof(buf);
|
||||
if (in_len < todo) {
|
||||
todo = in_len;
|
||||
}
|
||||
|
||||
chacha20_encrypt(input, buf, 20);
|
||||
for (i = 0; i < todo; i++) {
|
||||
out[i] = in[i] ^ buf[i];
|
||||
}
|
||||
|
||||
out += todo;
|
||||
in += todo;
|
||||
in_len -= todo;
|
||||
|
||||
input[12]++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,72 +0,0 @@
|
||||
#include "chacha.h"
|
||||
|
||||
#if !defined(STRICT_ALIGNMENT) && !defined(PEDANTIC)
|
||||
# define STRICT_ALIGNMENT 0
|
||||
#endif
|
||||
|
||||
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
||||
uint32_t len, const uint8_t* key,
|
||||
unsigned char* ivec)
|
||||
{
|
||||
size_t n;
|
||||
unsigned char *iv = ivec;
|
||||
(void)key;
|
||||
|
||||
if (len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if !defined(OPENSSL_SMALL_FOOTPRINT)
|
||||
if (STRICT_ALIGNMENT &&
|
||||
((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
|
||||
while (len >= CHACHA_BLOCK_SIZE) {
|
||||
for (n = 0; n < CHACHA_BLOCK_SIZE; ++n) {
|
||||
out[n] = in[n] ^ iv[n];
|
||||
//printf("%x ", out[n]);
|
||||
}
|
||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||
iv = out;
|
||||
len -= CHACHA_BLOCK_SIZE;
|
||||
in += CHACHA_BLOCK_SIZE;
|
||||
out += CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
} else {
|
||||
while (len >= CHACHA_BLOCK_SIZE) {
|
||||
for (n = 0; n < CHACHA_BLOCK_SIZE; n += sizeof(size_t)) {
|
||||
*(size_t *)(out + n) =
|
||||
*(size_t *)(in + n) ^ *(size_t *)(iv + n);
|
||||
//printf("%zu ", *(size_t *)(iv + n));
|
||||
}
|
||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||
iv = out;
|
||||
len -= CHACHA_BLOCK_SIZE;
|
||||
in += CHACHA_BLOCK_SIZE;
|
||||
out += CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
while (len) {
|
||||
for (n = 0; n < CHACHA_BLOCK_SIZE && n < len; ++n) {
|
||||
out[n] = in[n] ^ iv[n];
|
||||
}
|
||||
for (; n < CHACHA_BLOCK_SIZE; ++n) {
|
||||
out[n] = iv[n];
|
||||
}
|
||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||
iv = out;
|
||||
if (len <= CHACHA_BLOCK_SIZE) {
|
||||
break;
|
||||
}
|
||||
len -= CHACHA_BLOCK_SIZE;
|
||||
in += CHACHA_BLOCK_SIZE;
|
||||
out += CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
memcpy(ivec, iv, CHACHA_BLOCK_SIZE);
|
||||
|
||||
}
|
||||
|
||||
void chacha20_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||
const uint8_t key[CHACHA_KEY_SIZE], uint8_t* ivec)
|
||||
{
|
||||
chacha20_cbc128_encrypt(in, out, in_len, key, ivec);
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user