Compare commits
1344 Commits
Author | SHA1 | Date | |
---|---|---|---|
c9cbc39ec9 | |||
606a392d50 | |||
c67596ceb4 | |||
9a42cc7555 | |||
2e5ef2a802 | |||
8c8e2c4b2b | |||
0578801f99 | |||
6141e1410a | |||
4fc86807ff | |||
d2a2eba69e | |||
156387aba4 | |||
8a8384e674 | |||
58ae9ab34f | |||
3dfef813bf | |||
3aae98c8be | |||
8d32441b96 | |||
26acd6aafa | |||
7373163bed | |||
a21409e97e | |||
9fae5aacc2 | |||
42aaacf520 | |||
36a36d1c83 | |||
2d3a906d55 | |||
48c0845359 | |||
10b1895357 | |||
f1e932c90a | |||
269db1710e | |||
e2b5cd6d47 | |||
2928c5d103 | |||
2324eb9ff9 | |||
b7a32f01c0 | |||
967320a091 | |||
4779858dd4 | |||
c7cdbc98e5 | |||
c78fd2b36d | |||
12a3b1ba6a | |||
18be7a7966 | |||
56c7e4a66c | |||
486168b796 | |||
074c41556f | |||
10d60288e8 | |||
77d42654dc | |||
07243dc87f | |||
429802a138 | |||
8da2e1b2f7 | |||
324cfd40f0 | |||
64cec764b9 | |||
ce17de7d25 | |||
417f0e41fa | |||
d6d032dd49 | |||
357a00d2bc | |||
276815bd33 | |||
4a72c2b054 | |||
9d89fb5c35 | |||
ad7b113944 | |||
f33688361c | |||
36627fb8b3 | |||
f27d001b7a | |||
d9919b99d2 | |||
439fd30840 | |||
e66b5d09db | |||
d5d06e6be0 | |||
97f2bcff69 | |||
427c78d891 | |||
5e43304eca | |||
d34b9ba306 | |||
fac854eb9d | |||
431a228402 | |||
300b33a20e | |||
759c0e0b03 | |||
bbc549f592 | |||
bac4aec16f | |||
4ca352a344 | |||
bfcfbab818 | |||
9222bc2b35 | |||
f562ed4cc8 | |||
c4a096d8d4 | |||
5e89bd8868 | |||
7080fb9b37 | |||
a32f34f131 | |||
f342a50a76 | |||
58ef02f02b | |||
1da1667920 | |||
b762319fc5 | |||
6a6c5f196a | |||
22cddcb1a6 | |||
63813fe69f | |||
adcd2f14a5 | |||
eb1acaf927 | |||
9ef9969d29 | |||
40b7c11262 | |||
d195dce5d1 | |||
816bf6ebdd | |||
ed53a70b5c | |||
4e4a21f9b7 | |||
c5460e7fee | |||
cf8eb7700b | |||
13bc3f8094 | |||
9575afc8fa | |||
1e80044e93 | |||
e09f517094 | |||
1eb40c3fe0 | |||
ee7f15eff1 | |||
a9b82cf95b | |||
5cc252d471 | |||
a75086287c | |||
a5fb3fc220 | |||
28d1f7c5e7 | |||
59de1b3b62 | |||
84b6120983 | |||
3b9dc50541 | |||
2521f75c18 | |||
965204b8e0 | |||
6660e93c39 | |||
4fd7526852 | |||
903a8a3196 | |||
7e364d01c2 | |||
bfe179e911 | |||
af84dff9ef | |||
97e17f9b32 | |||
b4b4d6b00d | |||
1f9d0fc284 | |||
1a47b1cd86 | |||
9c0b80ea1b | |||
288c9751c1 | |||
ad3c8fb812 | |||
19722fceb3 | |||
0541431ea8 | |||
dd78184f8f | |||
af6a8f5fac | |||
405e39fb9f | |||
3ee702a922 | |||
cb50877bbf | |||
84885d79d5 | |||
57a9996921 | |||
00e45ec935 | |||
f98bfda6f9 | |||
01ab1d1369 | |||
e970c58330 | |||
c970bbea4f | |||
f12c6c1ed1 | |||
2ac50177a6 | |||
3757754c89 | |||
754c65c066 | |||
f6e26f6c8c | |||
d08d9322d2 | |||
65a52a4145 | |||
d5c889d6b0 | |||
445e6668c2 | |||
766062b2cc | |||
068666b0e3 | |||
09ae61651a | |||
e951f8d0ed | |||
e078ba1dde | |||
16ddd001f6 | |||
72312ad615 | |||
3442f36f8a | |||
b2672fd623 | |||
16af67d5e1 | |||
627bc7e3a9 | |||
f5b0d13f08 | |||
3aedb81d48 | |||
f8ad3aca25 | |||
a8394317c7 | |||
ffbbdd46e8 | |||
f37f83fd12 | |||
de04563f18 | |||
894549f002 | |||
fc46a0d441 | |||
6eb50450ec | |||
79a6b4b596 | |||
db8011f4f3 | |||
8dfe0affd4 | |||
450f1d2867 | |||
7678af6300 | |||
217931479b | |||
e5bad7594f | |||
de9d8cd849 | |||
6deaf649ef | |||
a91236012d | |||
a0514eb2ae | |||
230df0ec0c | |||
2f08b12753 | |||
efb4988d10 | |||
6ed29b3653 | |||
1018807db9 | |||
0954ea19e8 | |||
b26c07b788 | |||
eb24f3df84 | |||
3d40ca86b0 | |||
d836dfff14 | |||
a4fe11fad2 | |||
9d91cca73c | |||
0a16d09e1f | |||
068f12fd6f | |||
063f616a19 | |||
87827b2330 | |||
f5aaf7ff28 | |||
6e42989309 | |||
d67ad70443 | |||
655e3bc418 | |||
659e87703b | |||
2de999fb61 | |||
a12428a5b8 | |||
3d8fc8a4a8 | |||
ef7196cec2 | |||
a61904b2dc | |||
aac580686f | |||
efad193180 | |||
193dbb1794 | |||
c11abf88b7 | |||
2f705b5b55 | |||
839ff51b9a | |||
8ef097bf6f | |||
c372a39dd3 | |||
c5a7df9221 | |||
f71a23a72a | |||
41eba7d1c7 | |||
9918539229 | |||
e907c0e650 | |||
d3e3f51330 | |||
c9d6c39c31 | |||
05acd4b29f | |||
a7f33b5014 | |||
d7f37a703e | |||
c92f95e0b8 | |||
fa20963b93 | |||
50f1ec0374 | |||
76b1c2baf0 | |||
767a0f9384 | |||
d44e0b7cd8 | |||
3670d3fd7a | |||
cb2efd530f | |||
79829c98db | |||
d2cef8ed9b | |||
17a8b0f783 | |||
3acfe42622 | |||
d1cbccd9ba | |||
504160b11f | |||
b21fd27360 | |||
8df79a3559 | |||
ecb343c23b | |||
7e48e5859d | |||
57a25de910 | |||
24354ccd6a | |||
71f7a7243b | |||
17e7667da4 | |||
5d2f488004 | |||
ab4bdd59db | |||
d5abff82e0 | |||
611d2fa75d | |||
89b30b4853 | |||
9b71573965 | |||
77c3a1f372 | |||
08e73e5366 | |||
2e8349196e | |||
2a935ec15f | |||
7bf1720a76 | |||
ba58589656 | |||
5b8d963ee2 | |||
45ff1f2379 | |||
0d24e758b2 | |||
cbc7b3b0b7 | |||
111a86f3ec | |||
bab3502260 | |||
ad186b8652 | |||
3023691487 | |||
92afe9020f | |||
7d6cdf83dc | |||
64e5684d45 | |||
4d97d3bdb1 | |||
18cba86f77 | |||
914b022663 | |||
6e908a1be8 | |||
5402434218 | |||
6793c10860 | |||
c856d8bdbd | |||
a6ad660e5e | |||
b1a0abc7a6 | |||
3fbe7f0bb3 | |||
41fec5bd5b | |||
44cced3ffc | |||
498d025bd3 | |||
8a69ea971f | |||
b1ca74ed30 | |||
6d941c82fd | |||
77fb4230d6 | |||
a5419fe79e | |||
1607891b29 | |||
75b25e33f6 | |||
d08517db8c | |||
65a9658b13 | |||
3205361163 | |||
58887c591b | |||
657fbfbefa | |||
36bf7ad694 | |||
679e7863cb | |||
a7aa7e172b | |||
729cb5eec6 | |||
addbdcb660 | |||
f142451a33 | |||
124287a0ea | |||
9da366c193 | |||
cb0a1a94a7 | |||
dbaebe101c | |||
8509dcb8a0 | |||
7b5cdf6adf | |||
7207a91aa5 | |||
55ed52a71d | |||
cd4927053e | |||
982e6c4916 | |||
b58338b066 | |||
a9c38fb0df | |||
9bba27a3aa | |||
e655cba5bd | |||
bcfd379f32 | |||
47ae57610a | |||
5ed39de8c5 | |||
66abe45ea1 | |||
425b4fe6dd | |||
93669ab1fc | |||
16b2d41dd6 | |||
30b3862770 | |||
7e7cbec8a1 | |||
4ac15e68cf | |||
a7ed33b552 | |||
9cc7265b05 | |||
d567799d43 | |||
530c542002 | |||
7aa4d401f7 | |||
a8b8c2f438 | |||
241a05fc52 | |||
40737e9efa | |||
217828a849 | |||
69f1e487b3 | |||
2b2b2cac1f | |||
ee72714c08 | |||
83a96c557d | |||
892e425d87 | |||
5298e3872c | |||
c77ed82caa | |||
2d0224b64e | |||
68b099c277 | |||
283f3ff620 | |||
9a95257c40 | |||
bcfadd6085 | |||
d4ea1ec6ad | |||
a0f0e199b7 | |||
5a0c2a0c1d | |||
ce027da236 | |||
37b048effb | |||
92a5a51632 | |||
230f014b9e | |||
3f33f4d3a9 | |||
47fc0a5cfa | |||
c86b0d8a85 | |||
8cda974552 | |||
3f1399cb0d | |||
99655206c8 | |||
3037eb8d4f | |||
31ebdbc77f | |||
6e1ce5ab6c | |||
aa8dfac313 | |||
c6da2ab0de | |||
f0291dc5d3 | |||
994f8c325a | |||
ae5a6419d4 | |||
85feca305b | |||
7b71a331c6 | |||
032127b591 | |||
91159ea8e3 | |||
d5a9ee97f2 | |||
900933bbcc | |||
aeddd8c95a | |||
be77bdef12 | |||
f3afe5c99c | |||
aab9d9229c | |||
a714b8052d | |||
e873c93be3 | |||
cb5c337540 | |||
4d14372d5e | |||
4b8d1abb5d | |||
d63ada489a | |||
d4e284b7c5 | |||
21cb56d808 | |||
e1aa247548 | |||
638108e9d5 | |||
f655b3f0fd | |||
6a2be8b0ca | |||
ad0482be73 | |||
4522e85ac4 | |||
36e73cada4 | |||
8e5ac1338f | |||
cb6cf189b4 | |||
8ed05c27f2 | |||
9883ca8549 | |||
dc91698b3a | |||
b4e00275b2 | |||
03978ac5a5 | |||
33a68ec9c3 | |||
c78b658a92 | |||
6b988155e1 | |||
4677cdb4c2 | |||
96c23110ae | |||
a4e2ee99d3 | |||
9a9fa5594d | |||
1c73f3e100 | |||
75234e28e5 | |||
b20edaca26 | |||
62cb2cd13c | |||
bfea3572ea | |||
acf64f8476 | |||
b28ec430e4 | |||
7b68628e6c | |||
b584174d67 | |||
49e2cc6593 | |||
36ab7e0600 | |||
ad0997e15f | |||
8cdf406dd3 | |||
2d618722e6 | |||
c0afbae940 | |||
ed86d8d1fc | |||
c1441a2a8f | |||
b557b3170e | |||
9493de4443 | |||
175ffd9054 | |||
66c78cb819 | |||
962e41f9ca | |||
fd5f8a8046 | |||
d61191db40 | |||
0139236464 | |||
c5b2db72a2 | |||
303a1207c1 | |||
1078c86100 | |||
c67e9fabc4 | |||
ad98f14fc1 | |||
ec4745d174 | |||
0e53939e00 | |||
8d1cd3ae5c | |||
18fe0f0c44 | |||
3b89708653 | |||
23bf7b8d63 | |||
a8817fb973 | |||
8b14eb9020 | |||
25ee36bbba | |||
19693a85cd | |||
c7ba1994ac | |||
9aab0b9388 | |||
492b7d5ef9 | |||
352de7929b | |||
9f5d3f0ee5 | |||
691a3c6087 | |||
268e04cb4a | |||
7605f1f540 | |||
b543aee24e | |||
a74a64084d | |||
743b8cddf9 | |||
74774dd44f | |||
a8d4b1c90a | |||
3a6cdf02e5 | |||
56667e17c9 | |||
1e6b789bfa | |||
a61ddb6f61 | |||
62e12e3af5 | |||
93be7370d9 | |||
130c0b484d | |||
974848310c | |||
49494be653 | |||
0e2722c638 | |||
66946a4680 | |||
24d887a38a | |||
73e99cc513 | |||
e6db701c17 | |||
50fa577af8 | |||
8636ef5e24 | |||
62040cef56 | |||
8731b6279f | |||
ae66c0e497 | |||
c67703e7a3 | |||
b1771b92ec | |||
5f31444300 | |||
729cc4e04f | |||
2ed3e2160d | |||
8bbf6e3f54 | |||
d7fa40087c | |||
3ae6e0b8ab | |||
4b7da6e60d | |||
2863f8ec65 | |||
e2491c6322 | |||
4a8b1d9b2c | |||
74aed5cb58 | |||
b130c298df | |||
e5a6f8c2de | |||
87e5f8acbf | |||
c1a3b6ecc2 | |||
c242d66130 | |||
864d212c64 | |||
a9564d207b | |||
b82a9c832b | |||
5d9298543f | |||
4e9ae61044 | |||
d47262d233 | |||
8fdcf9f968 | |||
c82d37f6c3 | |||
5a8658283a | |||
4b97e58cba | |||
48031651a0 | |||
f3d556e3f9 | |||
8d4cecdb77 | |||
39a622f66e | |||
dae28b9cfe | |||
b7b4aa5d4d | |||
ed036b978d | |||
284920433f | |||
30bed18b77 | |||
6678dd10a5 | |||
296d740f83 | |||
b8fda9d730 | |||
2623c71ed3 | |||
e4472db33f | |||
076fef5e57 | |||
40eba48109 | |||
095c79e863 | |||
959c1ea857 | |||
ef3af104ae | |||
9dc69d9843 | |||
45348b2c83 | |||
c558db2a48 | |||
f987c18a7e | |||
5d3f43c10b | |||
216b01b224 | |||
35dd52e9ba | |||
b0c83921be | |||
e744b15ad2 | |||
1fd695d337 | |||
8f38bc7dc0 | |||
7d6ea6c17e | |||
56dc958116 | |||
19dfb87b1f | |||
a5287f56fc | |||
eed8087d87 | |||
4115d73b9a | |||
064b95c16a | |||
70c167182a | |||
fee002382e | |||
d75a470ffa | |||
c530fbd22b | |||
1b8f9e75dd | |||
1a5b01676d | |||
4b397d15b3 | |||
4d2b83d01f | |||
87096f13d2 | |||
a0ffcc61ae | |||
4b4819cd07 | |||
ca791a0378 | |||
b08f8d3103 | |||
88ba8439fc | |||
4dd0367136 | |||
ff2c183ac1 | |||
aa24181a53 | |||
1f83c56e05 | |||
2592894958 | |||
85027caf42 | |||
3ea556bc24 | |||
ca4a22d4ba | |||
18c1f0dfe9 | |||
734afee5e0 | |||
271e17547a | |||
e28368ff1b | |||
1aab959d4e | |||
bca769111f | |||
909321928c | |||
8b0a7f6838 | |||
5fa36bbab3 | |||
d65a7a3c30 | |||
453f5ce8f2 | |||
dc1db33ec9 | |||
c68e80c93b | |||
6b9a0935c1 | |||
b84468ecd3 | |||
ff4ba54553 | |||
f78a90bce2 | |||
24d871b529 | |||
e547f38589 | |||
6fb16f9879 | |||
2dc50cff5b | |||
98228c392e | |||
aeb7278b00 | |||
42d7609d54 | |||
a70008cc5c | |||
306a5c849e | |||
bb92184085 | |||
90c9462dd4 | |||
21b287ef0b | |||
b0c524765e | |||
6d0318cbe6 | |||
8f5ee6832f | |||
38fe766fa7 | |||
74866882f2 | |||
c638e83bf5 | |||
de6ef68571 | |||
c51049a59b | |||
9cedeb0a8d | |||
e37a4823f1 | |||
bf60345b7a | |||
cb29b8dd2a | |||
3a501ad69e | |||
e6e43d236f | |||
142601d4b6 | |||
f192e4f08f | |||
f020370ae7 | |||
24935af867 | |||
6a213bc8f5 | |||
f0414711b7 | |||
d087ed5bf6 | |||
d14dea4660 | |||
29abfebb68 | |||
668dfc40c7 | |||
61514e3b0e | |||
46fcab14dd | |||
2435c3ce0c | |||
55907b2167 | |||
a03eff51af | |||
10175618d2 | |||
4ff033852d | |||
2237f47b90 | |||
bfca226964 | |||
6077458ad8 | |||
7079559c2d | |||
0641244378 | |||
563da2bb18 | |||
dc347dd3d7 | |||
eab4fe50a3 | |||
ead6dc553a | |||
009c124fac | |||
7029c88305 | |||
9411fc00b8 | |||
5a93a4c466 | |||
9afc5da2e1 | |||
49706172f3 | |||
b2a0cdaa38 | |||
5481d1a039 | |||
dd5e320aa1 | |||
3c2aff2b5b | |||
c3c4c9326b | |||
ae70f4ea92 | |||
29fb79382c | |||
5c2cf04e10 | |||
9e0a26628b | |||
ce88602ced | |||
53b8d0d528 | |||
96a61cc4e4 | |||
b7b36bb0a4 | |||
52b254071c | |||
fbf2dd1672 | |||
4bbf09f582 | |||
952cd38b7b | |||
9a79be5ca0 | |||
2182521a8b | |||
fe65c2ae02 | |||
554d36c74b | |||
29ef0916db | |||
f93c8290f4 | |||
a69293df24 | |||
48ac038f7a | |||
5a7d2560c9 | |||
d91027f771 | |||
deaf3cb416 | |||
f95e1ea40f | |||
f64ab49307 | |||
fe1c99c0cf | |||
bdb7b73b8a | |||
293fff90d3 | |||
6eb4973780 | |||
5f5824d78d | |||
0ef9d79056 | |||
215650f6e7 | |||
a0d0d4c0e9 | |||
0422af2aae | |||
cef8e42938 | |||
0eeeec38fa | |||
75a84ecdae | |||
87c507fdbe | |||
3783ae823d | |||
f3ed00e28e | |||
307d023b2e | |||
775ce3a03f | |||
f655372b08 | |||
2c4079f4c8 | |||
ac1f90f1a9 | |||
4bb55b1622 | |||
23c5bb17c7 | |||
a0ed3261c9 | |||
261732f140 | |||
595c96b262 | |||
496999beba | |||
bb50881346 | |||
948902eae0 | |||
e41ff2df66 | |||
f88b79d42b | |||
1a0dd53450 | |||
9872430bd2 | |||
ae8badb141 | |||
36fa3a1a0a | |||
df8a69d15f | |||
fad08a19cc | |||
6527d05d77 | |||
d303e6b94e | |||
5fa397ceed | |||
c0fd017906 | |||
74e7da214a | |||
756ba07b16 | |||
5c236fd06c | |||
f671be814e | |||
e277437bd2 | |||
beead7e54d | |||
ea010be5cb | |||
97b6c41d42 | |||
6d0f3762b2 | |||
132a2a73af | |||
eab80d0aea | |||
88b1383eed | |||
ff74452ef3 | |||
bf8e9b3d71 | |||
de34187db0 | |||
acb23e8ef0 | |||
f992ee3140 | |||
97986a5241 | |||
a7d1346d51 | |||
983ec5debc | |||
cb28ac3aed | |||
a817a7c889 | |||
a5f2444ad2 | |||
cea8067219 | |||
4db074a5aa | |||
3eb00ef60f | |||
ca8bf8f964 | |||
39b3ce9bd3 | |||
4caa313aef | |||
a78a339407 | |||
0919b13c87 | |||
f2b0e2f418 | |||
cb6848aa80 | |||
542691c4e4 | |||
8ad6a8767f | |||
2242b1b4a5 | |||
8df4d8b905 | |||
7fad53b112 | |||
9d667db634 | |||
f47a789b15 | |||
5e3ce30d02 | |||
97c5fb8141 | |||
0e3a8fa6d9 | |||
5eae76c66e | |||
849f79e4ed | |||
ff7cf839d8 | |||
f3cbd243cc | |||
f146c92e88 | |||
fb2620b3a5 | |||
fd00e5cb35 | |||
44fde2d964 | |||
448b957a13 | |||
01607b9860 | |||
23d8c7ff0e | |||
b321da00b4 | |||
dec3da8f9d | |||
80aae18794 | |||
1f2aaf3f98 | |||
2534a028c0 | |||
fc409d9262 | |||
b70d195473 | |||
7eedff2714 | |||
6d9185d121 | |||
f89c22b5ee | |||
f23dc11a86 | |||
09a0325534 | |||
408d5da50f | |||
561808cf90 | |||
25df95be6f | |||
b85d7c1f70 | |||
642720a2fe | |||
1cc7131bb7 | |||
8f60f1093a | |||
d3b458dd9b | |||
a08e2cc434 | |||
b83a0434a4 | |||
b68b74ac32 | |||
b084c1d437 | |||
63ed892502 | |||
1cb6101c6a | |||
be0cc0273f | |||
abf33b3b3b | |||
d9b0490f72 | |||
caa70d2bca | |||
4f05f08f5d | |||
0c76b89e55 | |||
08ab4b93ea | |||
f0028b6972 | |||
b6553357f9 | |||
d86103383a | |||
1265afebbb | |||
306783c661 | |||
8ec8204a30 | |||
8cf3ef895d | |||
e4498adb1f | |||
42c5c59800 | |||
8ef8c9094a | |||
8dc4724340 | |||
13551885c2 | |||
d677e83ed4 | |||
5d9130a3c4 | |||
1ca4913328 | |||
b7614abb9e | |||
862a4a243f | |||
db291234ed | |||
2a5605db24 | |||
b4362cc18b | |||
6a5a6387e2 | |||
0f31adeafb | |||
ae817722d8 | |||
90bedd7e06 | |||
7d27be2a73 | |||
74da2de3b7 | |||
35db70a56c | |||
7dac8e2dde | |||
82c6992d6f | |||
4831c7b9af | |||
113db8d656 | |||
de6679ea95 | |||
0b66ae5c53 | |||
61a20febb9 | |||
29f81577e9 | |||
3acf956f6f | |||
87b13bef8e | |||
0d4cb252c4 | |||
fcabc6f799 | |||
848c43a9ab | |||
5f766cd20b | |||
8c07ba635e | |||
bb07aecfec | |||
27c5ec0149 | |||
4f01db0482 | |||
f2f8a7a90e | |||
e743414908 | |||
f6f0f94e17 | |||
d47a47924a | |||
7a2bf7e7eb | |||
d5a7867087 | |||
fbf78b83c4 | |||
2c63cf3cbd | |||
3b648e71e6 | |||
021d0a46f8 | |||
8839dbfe5b | |||
407d058611 | |||
c6a7f499ce | |||
d821fd29d6 | |||
6b99ab3a57 | |||
004f1d5aed | |||
1caeea8bc2 | |||
6ce4a1a18d | |||
0b48c8eb35 | |||
fef913085e | |||
2059af822d | |||
0fe74e95fe | |||
b7755123c1 | |||
39282be486 | |||
b18e4057bb | |||
12a9b5f35e | |||
89baa94002 | |||
1ef3478709 | |||
73063544bd | |||
90240bf11d | |||
5c5a06198c | |||
394933e53c | |||
b106d3ba60 | |||
947a339714 | |||
edb18349c9 | |||
9dcb965959 | |||
72ae82fe47 | |||
2d9d2f1e99 | |||
dc02f2ea8b | |||
b7386f9d84 | |||
223f9707ca | |||
ea5b00364f | |||
fb98df76b7 | |||
4ddbf8d509 | |||
aa80f69171 | |||
0ace22d03f | |||
0e6aca5a7e | |||
3f04226864 | |||
d308eed136 | |||
ed1149c8e0 | |||
48f58a88bc | |||
d238371b0c | |||
0b7e8d0162 | |||
18fd52367e | |||
2d665da3e1 | |||
5ef06a9d36 | |||
f4622d67e9 | |||
b65c9ea544 | |||
cc7c6c960e | |||
01697a9f5c | |||
ab361a8073 | |||
ec5c02cb7f | |||
e8124324ff | |||
1720fe6a46 | |||
e50bc0d34b | |||
45774dc4aa | |||
ea8d9d1aea | |||
221866f74e | |||
3e96d59359 | |||
8c19b6268c | |||
8ae26867c5 | |||
19baaea0da | |||
e3cebcf82d | |||
ccad5d5aaf | |||
d0bcde001e | |||
83a8e82626 | |||
7305a1f407 | |||
3975c7f8c9 | |||
ac1d075d73 | |||
73a278dc64 | |||
a042ee609a | |||
0d5c1239c6 | |||
027ec71aa9 | |||
ef718c651e | |||
fc2a0d53d9 | |||
bb47844ae6 | |||
b997d3eb4e | |||
9bcca268a3 | |||
8a2d4e2f72 | |||
335675c51c | |||
1bf2285fa2 | |||
71f77a8e0a | |||
644a7f9a44 | |||
965361ff69 | |||
4593d333c7 | |||
940519ea5a | |||
a0bcbf70d5 | |||
17fb8258e5 | |||
c350543b46 | |||
5b4ecb01ca | |||
28b115497f | |||
0604029661 | |||
2374cf09e2 | |||
ab475e4849 | |||
1c97b31eaf | |||
bd257050e3 | |||
2d362ed337 | |||
cb7117beac | |||
b358ff66e1 | |||
6309c97697 | |||
58727463e1 | |||
741d148a0d | |||
127553ce4b | |||
ecb055a252 | |||
dfa6fbaa0c | |||
cf11d4c7dc | |||
d0a4686990 | |||
2542d5dd42 | |||
1e0f2b2446 | |||
a8028fbb93 | |||
ed87229cec | |||
c4fd81fc1c | |||
ad43babe3d | |||
36c0cb052b | |||
ed58bcda4c | |||
268bb1b59b | |||
059764586a | |||
72b11081a4 | |||
0bbee9456f | |||
fcac910989 | |||
2e9ba149f2 | |||
d3712dd26d | |||
60877f9ba4 | |||
4f2c76150f | |||
137577fb86 | |||
bf623219d2 | |||
25d1f841ee | |||
517fe73734 | |||
890919d140 | |||
33ea1e0edd | |||
7614af2a45 | |||
1528959327 | |||
46b6cedff4 | |||
8d8f28c1d0 | |||
df782b93ae | |||
124f77cdb1 | |||
fc15f74c3c | |||
1d06aa3b31 | |||
0b263f8714 | |||
84b3e12e1f | |||
669282ae69 | |||
485806c488 | |||
1412ee1ca6 | |||
ef5fb6fa46 | |||
99432833d2 | |||
fa00803fbf | |||
04ef977509 | |||
87c6508305 | |||
ed0c1d3b52 | |||
8b5598fabd | |||
5b070ad014 | |||
6246405afd | |||
27c8ba6afc | |||
b832a03315 | |||
09686290bc | |||
4aaa7b30ca | |||
fe590da3b6 | |||
a7fa92b372 | |||
a25e57c397 | |||
eed676113e | |||
b57f24f1bc | |||
0e084358b4 | |||
f016c9a669 | |||
59ba1df910 | |||
71a2c90f21 | |||
8436457e75 | |||
3ac0192d40 | |||
3db159f616 | |||
e21f5c784e | |||
65c24db83c | |||
ed5101b031 | |||
1420628b28 | |||
15ab966ed1 | |||
b5a735878a | |||
b6d09f1901 | |||
78f6ddc5b7 | |||
4e595e8e3c | |||
79249360f7 | |||
336d5136bf | |||
0c8cee8c4a | |||
4c0420b884 | |||
0d7e093415 | |||
c835749563 | |||
0172d2a065 | |||
927f272f0e | |||
5e2891ae5d | |||
4f85481a2b | |||
d314e0395a | |||
69a6d07371 | |||
fab8ef379f | |||
408ef8b2cb | |||
a2a2f1c2d2 | |||
dc2888c9a3 | |||
9739be9ecf | |||
e61257695f | |||
b9988b62e4 | |||
d6b3961530 | |||
6d0be323ad | |||
09256adbc3 | |||
8e3a7da596 | |||
7d96510d17 | |||
0fd795a676 | |||
eff876881b | |||
9adf0d4ee0 | |||
3bc9789e8d | |||
fd207b6907 | |||
2226c1b75c | |||
a0964bb2c2 | |||
b5383b8b54 | |||
39f86050a6 | |||
a03d441e6f | |||
3900d09f6f | |||
e218f4e56e | |||
81ba18eea6 | |||
1671ece9df | |||
775fa0c968 | |||
dd276138c2 | |||
0c55b37976 | |||
966d077431 | |||
400412d76c | |||
c7e77a2238 | |||
64c42e28dc | |||
c2baf7b07d | |||
a52a9afa3c | |||
669502ede7 | |||
b19f730527 | |||
d5ff5f4739 | |||
1c82f84595 | |||
bea9cd9684 | |||
1bc9a9c23b | |||
c4faccc77f | |||
e6803daf10 | |||
effe6e3ff3 | |||
0d6c233747 | |||
015e696077 | |||
7faab2072c | |||
83718a3b3e | |||
4a074133f7 | |||
12eff5a2f9 | |||
4197cce8c9 | |||
fed3817ed3 | |||
4ffd7693d6 | |||
1596c961d9 | |||
fd7d5cbe0d | |||
7058287273 | |||
912aafcefd | |||
2f34f433b3 | |||
1ff4dd9a9a | |||
fdcaad96c7 | |||
14a72b0fc0 | |||
c13ab9f14e | |||
cff1bc6e71 | |||
bb6c4efe9b | |||
c324e71768 | |||
e2570c98ee | |||
b5125479ec | |||
989355e885 | |||
a2f2c46f87 | |||
605623baf5 | |||
fdc452c536 | |||
1b391dd36b | |||
917067741a | |||
34ed93d57c | |||
d400a64b9a | |||
2c7447b73e | |||
c0f0fa24f8 | |||
bda5f949bb | |||
992e985972 | |||
afaa359b0d | |||
3c17db41dc | |||
d62ed4f6b3 | |||
79f3194d0c | |||
b045f9a50d | |||
ce231602dc | |||
6f5e0cd161 | |||
1269a79a4d | |||
1b3424ff61 | |||
8b8033c72b | |||
7ca0109732 | |||
6b5172d002 | |||
9e19a635bb | |||
15193d0e1f | |||
f1c5c72e62 | |||
25dfed207c | |||
006cbee88a | |||
c95e5346a4 | |||
e54bf563b5 | |||
8f79327190 | |||
a197ac092a | |||
1e2b55c0d7 | |||
964ff522be | |||
934c32cbc6 | |||
9bd6be779f | |||
ce70d6eedc | |||
3a0d13aa77 | |||
f9323c5273 | |||
7587656cf6 | |||
023074650f | |||
d854e90c23 | |||
3aabeb2b81 | |||
65f5885bce | |||
7a132eabb4 | |||
7e1b380f01 | |||
1a2d9b8eed | |||
6eefa0b72d | |||
44372db955 | |||
e24cce4aed | |||
a8595c0418 | |||
340424e03a | |||
93036bec01 | |||
663e98969d | |||
37d1daf58e | |||
1a18f0ca55 | |||
bb950ec93e | |||
39ab3557a3 | |||
dcdc46b97c | |||
da3ed0dfb3 | |||
e391b9fb90 | |||
e346cdad26 | |||
7e4c6ff218 | |||
356f246a74 | |||
80da552834 | |||
2dd8ab197d | |||
1fe11e9ae2 | |||
21d5fe6272 | |||
52bc4a3598 | |||
cccaacee36 | |||
ebf6e1c0e9 | |||
5cf090c896 | |||
cc299053cc | |||
82b75796f9 | |||
a560d94a9f | |||
0827d52c6f | |||
a8d33c9950 | |||
43c32ea280 | |||
30d40e9a32 | |||
e28508ad56 | |||
182e4cec86 | |||
a32de96ab1 | |||
0de35fdd1f | |||
470d9cd752 | |||
87598c7612 | |||
57bf618627 | |||
c576a707b0 | |||
b78b1bbfa9 | |||
e710964d05 | |||
2d00657756 | |||
0526d4ff21 | |||
76e20015a4 | |||
f5e797e3aa | |||
787e36a28f | |||
8572b57834 | |||
ed0129f881 | |||
78836a9e22 | |||
4c08184379 | |||
da165d6943 | |||
8ffccfbaff | |||
a6d083d69d | |||
91bae9d510 | |||
f0f185509f | |||
5947ef7706 | |||
4f663a2a86 | |||
1d01777a13 | |||
6d3b8b6d7d | |||
50c1c08235 | |||
b16c30b4c6 | |||
ff1ca1e0d3 | |||
721c4378c1 | |||
5f4e0c7e3e | |||
e6af4511a8 | |||
965ad778dd | |||
3b78be83cf | |||
564cd4e09d | |||
699ca5fec1 | |||
f91ffbbfdf | |||
156292e408 | |||
81ae44f858 | |||
c948814eae | |||
b5dba77056 | |||
ef06d165b4 | |||
5cb23c814d | |||
8f7ded33e0 | |||
a17d5795fb | |||
ad4d41e602 | |||
9754fc789e | |||
fd3c6eb320 | |||
b7b68ecdba | |||
08ba27627d | |||
27d2c0aaf3 | |||
b714a4be63 | |||
2356b25c58 | |||
05cad05505 | |||
1e3082fbc0 | |||
80d2573b10 | |||
6adcdc41f4 | |||
2d08dddfc8 | |||
6da8f49d8b | |||
bcd072c5e8 | |||
e90a31781c | |||
2e89ec9105 | |||
865c42465a | |||
73c93cc345 | |||
cf32fdf672 | |||
c33b54794c | |||
6775e83420 | |||
719785a8d3 | |||
287995ffdf | |||
0e506a53b5 | |||
70e1a15973 | |||
09cff5e4cc | |||
57858b8015 | |||
07855e3125 | |||
2f5f8e7afd | |||
43897de12e | |||
4b577aa77b | |||
85c3d64f29 | |||
47dd293904 | |||
c4220a4853 | |||
48ab88a2af | |||
d9cf9709d2 | |||
9720c894f1 | |||
8dad3af36d | |||
e5425d4a27 | |||
58e6d4aabb | |||
9ce142606c | |||
e75a64a8a2 | |||
bc71e1b612 | |||
580ca36a62 | |||
447fe48d2a | |||
e8a6c8cd6d | |||
a8fd42c1df | |||
e782c26908 | |||
cd65a1e172 | |||
6e51c5685e | |||
84a37a2c0c | |||
7e94cc2cc3 | |||
7002ccb866 | |||
4fe0b116ae | |||
a0fb9de515 | |||
5d42dcc9ec | |||
96e88c90e8 | |||
75d94240ed | |||
6c544708e1 | |||
078e7246ac | |||
06cff1fb9f | |||
2e8bbed75b | |||
a707c9410e | |||
a956bb08d8 | |||
db52cc6749 | |||
73c6224a95 | |||
a217920561 | |||
48a36f59a6 | |||
965b132664 | |||
63f185f9bf | |||
e97b0088f2 | |||
374c17a0d9 | |||
4b3bc587ab | |||
06c63f2026 | |||
6b7d9942a7 | |||
760a56964f | |||
6ca575b5a3 | |||
ce1d36cacb | |||
87b2525e03 | |||
faa77aca2e | |||
5d2158792c | |||
e1ebaa902b | |||
e0564f628e | |||
44e45aa090 | |||
89f5f336af | |||
727be309b2 | |||
ce2d7a2d5a | |||
fad6c7201e | |||
8f0e1f3349 | |||
6f7d0c6928 | |||
120c8f244c | |||
352a367570 | |||
9f65d22909 | |||
141131f3a6 | |||
488420fdf2 | |||
10e6b8f769 | |||
419da18405 | |||
7329d4bf3a | |||
c8fe4043b6 | |||
3d133d61ca | |||
d51e42c707 | |||
79e39d6f0b | |||
7dec934bb3 | |||
83f866df01 | |||
d88d8e2dbb | |||
3a40dff999 | |||
3f69d58498 | |||
ca10cf081f |
@ -1,42 +0,0 @@
|
|||||||
version: '{build}'
|
|
||||||
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- /^v[0-9.]+\.[0-9.]+/
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- '%USERPROFILE%\.cargo'
|
|
||||||
- '%APPVEYOR_BUILD_FOLDER%\target'
|
|
||||||
|
|
||||||
clone_folder: d:\projects\solana
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- bash ci/publish-tarball.sh
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
- provider: Slack
|
|
||||||
incoming_webhook:
|
|
||||||
secure: GJsBey+F5apAtUm86MHVJ68Uqa6WN1SImcuIc4TsTZrDhA8K1QWUNw9FFQPybUWDyOcS5dly3kubnUqlGt9ux6Ad2efsfRIQYWv0tOVXKeY=
|
|
||||||
channel: ci-status
|
|
||||||
on_build_success: false
|
|
||||||
on_build_failure: true
|
|
||||||
on_build_status_changed: true
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
- provider: S3
|
|
||||||
access_key_id:
|
|
||||||
secure: fTbJl6JpFebR40J7cOWZ2mXBa3kIvEiXgzxAj6L3N7A=
|
|
||||||
secret_access_key:
|
|
||||||
secure: vItsBXb2rEFLvkWtVn/Rcxu5a5+2EwC+b7GsA0waJy9hXh6XuBAD0lnHd9re3g/4
|
|
||||||
bucket: release.solana.com
|
|
||||||
region: us-west-1
|
|
||||||
set_public: true
|
|
||||||
|
|
||||||
- provider: GitHub
|
|
||||||
auth_token:
|
|
||||||
secure: 81fEmPZ0cV1wLtNuUrcmtgxKF6ROQF1+/ft5m+fHX21z6PoeCbaNo8cTyLioWBj7
|
|
||||||
draft: false
|
|
||||||
prerelease: false
|
|
||||||
on:
|
|
||||||
appveyor_repo_tag: true
|
|
5
.buildkite/env/secrets.ejson
vendored
5
.buildkite/env/secrets.ejson
vendored
@ -7,9 +7,6 @@
|
|||||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
||||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
||||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
||||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]",
|
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
|
||||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Egc2dMrHDU0NcZ71LwGv/V66shUhwYUE:04VoIb8CKy7KYhQ5W4cEW9SDKZltxWBL5Hob106lMBbUOD/yUvKYcG3Ep8JfTMwO3K8zowW5HpU/IdGoilX0XWLiJJ6t+p05WWK0TA16nOEtwrEG+UK8wm3sN+xCO20i4jDhpNpgg3FYFHT5rKTHW8+zaBTNUX/SFxkN67Lm+92IM28CXYE43SU1WV6H99hGFFVpTK5JVM3JuYU1ex/dHRE+xCzTr4MYUB/F+nGoNFW8HUDV/y0e1jxT9to3x0SmnytEEuk+5RUzFuEt9cKNFeNml3fOCi4qL+sfj/Y5pjH9xDiUxsvH/8NL35jbLP244aFHgWcp]",
|
|
||||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_apple_darwin": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:NeOxSoWCvXB9AL4H6OK26l/7bmsKd/oz:Ijfoxtvk2CHlN1ZXHup3Gg/914kbbAkEGWJfvozA8UIe+aUzUObMyTrKkVOeNAH8Q8YH9tNzk7RRnrTcpnzeCCBLlWcVEeruMxHox3mPRzmSeDLxtbzCl9VePlRO3T7jg90K5hW+ZAkd5J/WJNzpAcmr93ts/of3MbvGHSujId/efCTzJEcP6JInnBb8Vrj7TlgKbzUlnqpq1+NjYPSXN3maKa9pKeo2JWxZlGBMoy6QWUUY5GbYEylw9smwh1LJcHZjlaZNMuOl4gNKtaSr38IXQkAXaRUJDPAmPras00YObKzXU8RkTrP4EoP/jx5LPR7f]",
|
|
||||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_pc_windows_msvc": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:7t+56twjW+jR7fpFNNeRFLPd7E4lbmyN:JuviDpkQrfVcNUGRGsa2e/UhvH6tTYyk1s4cHHE5xZH1NByL7Kpqx36VG/+o1AUGEeSQdsBnKgzYdMoFYbO8o50DoRPc86QIEVXCupD6J9avxLFtQgOWgJp+/mCdUVXlqXiFs/vQgS/L4psrcKdF6WHd77BeUr6ll8DjH+9m5FC9Rcai2pXno6VbPpunHQ0oUdYzhFR64+LiRacBaefQ9igZ+nSEWDLqbaZSyfm9viWkijoVFTq8gAgdXXEh7g0QdxVE5T6bPristJhT6jWBhWunPUCDNFFErWIsbRGctepl4pbCWqh2hNTw9btSgVfeY6uGCOsdy9E=]"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,18 @@
|
|||||||
root: ./book/src
|
root: ./docs/src
|
||||||
|
|
||||||
structure:
|
structure:
|
||||||
readme: introduction.md
|
readme: introduction.md
|
||||||
summary: SUMMARY.md
|
summary: SUMMARY.md
|
||||||
|
|
||||||
|
redirects:
|
||||||
|
wallet: ./wallet-guide/README.md
|
||||||
|
wallet/app-wallets: ./wallet-guide/apps.md
|
||||||
|
wallet/app-wallets/trust-wallet: ./wallet-guide/trust-wallet.md
|
||||||
|
wallet/app-wallets/ledger-live: ./wallet-guide/ledger-live.md
|
||||||
|
wallet/cli-wallets: ./wallet-guide/cli.md
|
||||||
|
wallet/cli-wallets/paper-wallet: ./paper-wallet/README.md
|
||||||
|
wallet/cli-wallets/paper-wallet/paper-wallet-usage: ./paper-wallet/paper-wallet-usage.md
|
||||||
|
wallet/cli-wallets/remote-wallet: ./hardware-wallets/README.md
|
||||||
|
wallet/cli-wallets/remote-wallet/ledger: ./hardware-wallets/ledger.md
|
||||||
|
wallet/cli-wallets/file-system-wallet: ./file-system-wallet/README.md
|
||||||
|
wallet/support: ./wallet-guide/support.md
|
||||||
|
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,6 +1,7 @@
|
|||||||
/book/html/
|
/docs/html/
|
||||||
/book/src/tests.ok
|
/docs/src/tests.ok
|
||||||
/book/src/.gitbook/assets/*.svg
|
/docs/src/cli/usage.md
|
||||||
|
/docs/src/.gitbook/assets/*.svg
|
||||||
/farf/
|
/farf/
|
||||||
/solana-release/
|
/solana-release/
|
||||||
/solana-release.tar.bz2
|
/solana-release.tar.bz2
|
||||||
|
24
.mergify.yml
24
.mergify.yml
@ -19,27 +19,27 @@ pull_request_rules:
|
|||||||
label:
|
label:
|
||||||
add:
|
add:
|
||||||
- automerge
|
- automerge
|
||||||
- name: v0.21 backport
|
- name: v1.0 backport
|
||||||
conditions:
|
conditions:
|
||||||
- base=master
|
- label=v1.0
|
||||||
- label=v0.21
|
|
||||||
actions:
|
actions:
|
||||||
backport:
|
backport:
|
||||||
|
ignore_conflicts: true
|
||||||
branches:
|
branches:
|
||||||
- v0.21
|
- v1.0
|
||||||
- name: v0.22 backport
|
- name: v1.1 backport
|
||||||
conditions:
|
conditions:
|
||||||
- base=master
|
- label=v1.1
|
||||||
- label=v0.22
|
|
||||||
actions:
|
actions:
|
||||||
backport:
|
backport:
|
||||||
|
ignore_conflicts: true
|
||||||
branches:
|
branches:
|
||||||
- v0.22
|
- v1.1
|
||||||
- name: v0.23 backport
|
- name: v1.2 backport
|
||||||
conditions:
|
conditions:
|
||||||
- base=master
|
- label=v1.2
|
||||||
- label=v0.23
|
|
||||||
actions:
|
actions:
|
||||||
backport:
|
backport:
|
||||||
|
ignore_conflicts: true
|
||||||
branches:
|
branches:
|
||||||
- v0.23
|
- v1.2
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
os:
|
os:
|
||||||
- osx
|
- osx
|
||||||
|
- windows
|
||||||
|
|
||||||
language: rust
|
language: rust
|
||||||
rust:
|
rust:
|
||||||
|
@ -224,21 +224,20 @@ Inventing new terms is allowed, but should only be done when the term is widely
|
|||||||
used and understood. Avoid introducing new 3-letter terms, which can be
|
used and understood. Avoid introducing new 3-letter terms, which can be
|
||||||
confused with 3-letter acronyms.
|
confused with 3-letter acronyms.
|
||||||
|
|
||||||
[Terms currently in use](book/src/terminology.md)
|
[Terms currently in use](docs/src/terminology.md)
|
||||||
|
|
||||||
|
|
||||||
## Design Proposals
|
## Design Proposals
|
||||||
|
|
||||||
Solana's architecture is described by a book generated from markdown files in
|
Solana's architecture is described by docs generated from markdown files in
|
||||||
the `book/src/` directory, maintained by an *editor* (currently @garious). To
|
the `docs/src/` directory, maintained by an *editor* (currently @garious). To
|
||||||
add a design proposal, you'll need to at least propose a change the content
|
add a design proposal, you'll need to include it in the
|
||||||
under the [Accepted Design
|
[Accepted Design Proposals](https://docs.solana.com/proposals)
|
||||||
Proposals](https://docs.solana.com/book/v/master/proposals) chapter. Here's
|
section of the Solana docs. Here's the full process:
|
||||||
the full process:
|
|
||||||
|
|
||||||
1. Propose a design by creating a PR that adds a markdown document to the
|
1. Propose a design by creating a PR that adds a markdown document to the
|
||||||
directory `book/src/` and references it from the [table of
|
`docs/src/proposals` directory and references it from the [table of
|
||||||
contents](book/src/SUMMARY.md). Add any relevant *maintainers* to the PR
|
contents](docs/src/SUMMARY.md). Add any relevant *maintainers* to the PR
|
||||||
review.
|
review.
|
||||||
2. The PR being merged indicates your proposed change was accepted and that the
|
2. The PR being merged indicates your proposed change was accepted and that the
|
||||||
maintainers support your plan of attack.
|
maintainers support your plan of attack.
|
||||||
|
6942
Cargo.lock
generated
6942
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
19
Cargo.toml
19
Cargo.toml
@ -3,10 +3,13 @@ members = [
|
|||||||
"bench-exchange",
|
"bench-exchange",
|
||||||
"bench-streamer",
|
"bench-streamer",
|
||||||
"bench-tps",
|
"bench-tps",
|
||||||
|
"accounts-bench",
|
||||||
"banking-bench",
|
"banking-bench",
|
||||||
"chacha-sys",
|
"cli-config",
|
||||||
"client",
|
"client",
|
||||||
"core",
|
"core",
|
||||||
|
"dos",
|
||||||
|
"download-utils",
|
||||||
"faucet",
|
"faucet",
|
||||||
"perf",
|
"perf",
|
||||||
"validator",
|
"validator",
|
||||||
@ -21,9 +24,12 @@ members = [
|
|||||||
"logger",
|
"logger",
|
||||||
"log-analyzer",
|
"log-analyzer",
|
||||||
"merkle-tree",
|
"merkle-tree",
|
||||||
|
"stake-o-matic",
|
||||||
|
"streamer",
|
||||||
"measure",
|
"measure",
|
||||||
"metrics",
|
"metrics",
|
||||||
"net-shaper",
|
"net-shaper",
|
||||||
|
"notifier",
|
||||||
"programs/bpf_loader",
|
"programs/bpf_loader",
|
||||||
"programs/budget",
|
"programs/budget",
|
||||||
"programs/btc_spv",
|
"programs/btc_spv",
|
||||||
@ -34,18 +40,21 @@ members = [
|
|||||||
"programs/noop",
|
"programs/noop",
|
||||||
"programs/ownable",
|
"programs/ownable",
|
||||||
"programs/stake",
|
"programs/stake",
|
||||||
"programs/storage",
|
|
||||||
"programs/vest",
|
"programs/vest",
|
||||||
"programs/vote",
|
"programs/vote",
|
||||||
"archiver",
|
"remote-wallet",
|
||||||
|
"ramp-tps",
|
||||||
"runtime",
|
"runtime",
|
||||||
"sdk",
|
"sdk",
|
||||||
"sdk-c",
|
|
||||||
"scripts",
|
"scripts",
|
||||||
|
"stake-accounts",
|
||||||
|
"stake-monitor",
|
||||||
"sys-tuner",
|
"sys-tuner",
|
||||||
|
"tokens",
|
||||||
|
"transaction-status",
|
||||||
"upload-perf",
|
"upload-perf",
|
||||||
"net-utils",
|
"net-utils",
|
||||||
"fixed-buf",
|
"version",
|
||||||
"vote-signer",
|
"vote-signer",
|
||||||
"cli",
|
"cli",
|
||||||
"rayon-threadlimit",
|
"rayon-threadlimit",
|
||||||
|
187
README.md
187
README.md
@ -1,76 +1,17 @@
|
|||||||
|
<p align="center">
|
||||||
|
<a href="https://solana.com">
|
||||||
|
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
[](https://crates.io/crates/solana-core)
|
[](https://crates.io/crates/solana-core)
|
||||||
[](https://docs.rs/solana-core)
|
[](https://docs.rs/solana-core)
|
||||||
[](https://buildkite.com/solana-labs/solana/builds?branch=master)
|
[](https://buildkite.com/solana-labs/solana/builds?branch=master)
|
||||||
[](https://codecov.io/gh/solana-labs/solana)
|
[](https://codecov.io/gh/solana-labs/solana)
|
||||||
|
|
||||||
Blockchain Rebuilt for Scale
|
# Building
|
||||||
===
|
|
||||||
|
|
||||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
## **1. Install rustc, cargo and rustfmt.**
|
||||||
up to 710 thousand transactions per second on a gigabit network.
|
|
||||||
|
|
||||||
Disclaimer
|
|
||||||
===
|
|
||||||
|
|
||||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
|
||||||
|
|
||||||
Introduction
|
|
||||||
===
|
|
||||||
|
|
||||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
|
||||||
|
|
||||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
|
|
||||||
|
|
||||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
===
|
|
||||||
|
|
||||||
Before you jump into the code, review the online book [Solana: Blockchain Rebuilt for Scale](https://docs.solana.com/book/).
|
|
||||||
|
|
||||||
(The _latest_ development version of the online book is also [available here](https://docs.solana.com/book/v/master/).)
|
|
||||||
|
|
||||||
Release Binaries
|
|
||||||
===
|
|
||||||
Official release binaries are available at [Github Releases](https://github.com/solana-labs/solana/releases).
|
|
||||||
|
|
||||||
Additionally we provide pre-release binaries for the latest code on the edge and
|
|
||||||
beta channels. Note that these pre-release binaries may be less stable than an
|
|
||||||
official release.
|
|
||||||
|
|
||||||
### Edge channel
|
|
||||||
#### Linux (x86_64-unknown-linux-gnu)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
|
||||||
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
|
||||||
#### mac OS (x86_64-apple-darwin)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-apple-darwin.tar.bz2)
|
|
||||||
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
|
||||||
#### Windows (x86_64-pc-windows-msvc)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
|
||||||
* [solana-install-init.exe](http://release.solana.com/edge/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
|
||||||
#### All platforms
|
|
||||||
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/edge/solana-metrics.tar.bz2)
|
|
||||||
|
|
||||||
### Beta channel
|
|
||||||
#### Linux (x86_64-unknown-linux-gnu)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
|
||||||
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
|
||||||
#### mac OS (x86_64-apple-darwin)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-apple-darwin.tar.bz2)
|
|
||||||
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
|
||||||
#### Windows (x86_64-pc-windows-msvc)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
|
||||||
* [solana-install-init.exe](http://release.solana.com/beta/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
|
||||||
#### All platforms
|
|
||||||
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/beta/solana-metrics.tar.bz2)
|
|
||||||
|
|
||||||
Developing
|
|
||||||
===
|
|
||||||
|
|
||||||
Building
|
|
||||||
---
|
|
||||||
|
|
||||||
Install rustc, cargo and rustfmt:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ curl https://sh.rustup.rs -sSf | sh
|
$ curl https://sh.rustup.rs -sSf | sh
|
||||||
@ -87,118 +28,43 @@ $ rustup update
|
|||||||
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc. On Ubuntu:
|
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc. On Ubuntu:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo apt-get install libssl-dev pkg-config zlib1g-dev llvm clang
|
$ sudo apt-get update
|
||||||
|
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the source code:
|
## **2. Download the source code.**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/solana-labs/solana.git
|
$ git clone https://github.com/solana-labs/solana.git
|
||||||
$ cd solana
|
$ cd solana
|
||||||
```
|
```
|
||||||
|
|
||||||
Build
|
## **3. Build.**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cargo build
|
$ cargo build
|
||||||
```
|
```
|
||||||
|
|
||||||
Then to run a minimal local cluster
|
## **4. Run a minimal local cluster.**
|
||||||
```bash
|
```bash
|
||||||
$ ./run.sh
|
$ ./run.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Testing
|
# Testing
|
||||||
---
|
|
||||||
|
|
||||||
Run the test suite:
|
**Run the test suite:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cargo test
|
$ cargo test
|
||||||
```
|
```
|
||||||
|
|
||||||
Local Testnet
|
### Starting a local testnet
|
||||||
---
|
Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/bench-tps).
|
||||||
|
|
||||||
Start your own testnet locally, instructions are in the book [Solana: Blockchain Rebuild for Scale: Getting Started](https://docs.solana.com/book/getting-started).
|
### Accessing the remote testnet
|
||||||
|
* `testnet` - public stable testnet accessible via devnet.solana.com. Runs 24/7
|
||||||
|
|
||||||
Remote Testnets
|
# Benchmarking
|
||||||
---
|
|
||||||
|
|
||||||
We maintain several testnets:
|
|
||||||
|
|
||||||
* `testnet` - public stable testnet accessible via testnet.solana.com. Runs 24/7
|
|
||||||
* `testnet-beta` - public beta channel testnet accessible via beta.testnet.solana.com. Runs 24/7
|
|
||||||
* `testnet-edge` - public edge channel testnet accessible via edge.testnet.solana.com. Runs 24/7
|
|
||||||
|
|
||||||
## Deploy process
|
|
||||||
|
|
||||||
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
|
|
||||||
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
|
|
||||||
Each testnet can be manually manipulated from buildkite as well.
|
|
||||||
|
|
||||||
## How do I reset the testnet?
|
|
||||||
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
|
|
||||||
and when prompted select the desired testnet
|
|
||||||
|
|
||||||
## How can I scale the tx generation rate?
|
|
||||||
|
|
||||||
Increase the TX rate by increasing the number of cores on the client machine which is running
|
|
||||||
`bench-tps` or run multiple clients. Decrease by lowering cores or using the rayon env
|
|
||||||
variable `RAYON_NUM_THREADS=<xx>`
|
|
||||||
|
|
||||||
## How can I test a change on the testnet?
|
|
||||||
|
|
||||||
Currently, a merged PR is the only way to test a change on the testnet. But you
|
|
||||||
can run your own testnet using the scripts in the `net/` directory.
|
|
||||||
|
|
||||||
## Adjusting the number of clients or validators on the testnet
|
|
||||||
Edit `ci/testnet-manager.sh`
|
|
||||||
|
|
||||||
|
|
||||||
## Metrics Server Maintenance
|
|
||||||
Sometimes the dashboard becomes unresponsive. This happens due to glitch in the metrics server.
|
|
||||||
The current solution is to reset the metrics server. Use the following steps.
|
|
||||||
|
|
||||||
1. The server is hosted in a GCP VM instance. Check if the VM instance is down by trying to SSH
|
|
||||||
into it from the GCP console. The name of the VM is ```metrics-solana-com```.
|
|
||||||
2. If the VM is inaccessible, reset it from the GCP console.
|
|
||||||
3. Once VM is up (or, was already up), the metrics services can be restarted from build automation.
|
|
||||||
1. Navigate to https://buildkite.com/solana-labs/metrics-dot-solana-dot-com in your web browser
|
|
||||||
2. Click on ```New Build```
|
|
||||||
3. This will show a pop up dialog. Click on ```options``` drop down.
|
|
||||||
4. Type in ```FORCE_START=true``` in ```Environment Variables``` text box.
|
|
||||||
5. Click ```Create Build```
|
|
||||||
6. This will restart the metrics services, and the dashboards should be accessible afterwards.
|
|
||||||
|
|
||||||
## Debugging Testnet
|
|
||||||
Testnet may exhibit different symptoms of failures. Primary statistics to check are
|
|
||||||
1. Rise in Confirmation Time
|
|
||||||
2. Nodes are not voting
|
|
||||||
3. Panics, and OOM notifications
|
|
||||||
|
|
||||||
Check the following if there are any signs of failure.
|
|
||||||
1. Did testnet deployment fail?
|
|
||||||
1. View buildkite logs for the last deployment: https://buildkite.com/solana-labs/testnet-management
|
|
||||||
2. Use the relevant branch
|
|
||||||
3. If the deployment failed, look at the build logs. The build artifacts for each remote node is uploaded.
|
|
||||||
It's a good first step to triage from these logs.
|
|
||||||
2. You may have to log into remote node if the deployment succeeded, but something failed during runtime.
|
|
||||||
1. Get the private key for the testnet deployment from ```metrics-solana-com``` GCP instance.
|
|
||||||
2. SSH into ```metrics-solana-com``` using GCP console and do the following.
|
|
||||||
```bash
|
|
||||||
sudo bash
|
|
||||||
cd ~buildkite-agent/.ssh
|
|
||||||
ls
|
|
||||||
```
|
|
||||||
3. Copy the relevant private key to your local machine
|
|
||||||
4. Find the public IP address of the AWS instance for the remote node using AWS console
|
|
||||||
5. ```ssh -i <private key file> ubuntu@<ip address of remote node>```
|
|
||||||
6. The logs are in ```~solana\solana``` folder
|
|
||||||
|
|
||||||
|
|
||||||
Benchmarking
|
|
||||||
---
|
|
||||||
|
|
||||||
First install the nightly build of rustc. `cargo bench` requires use of the
|
First install the nightly build of rustc. `cargo bench` requires use of the
|
||||||
unstable features only available in the nightly build.
|
unstable features only available in the nightly build.
|
||||||
@ -213,13 +79,11 @@ Run the benchmarks:
|
|||||||
$ cargo +nightly bench
|
$ cargo +nightly bench
|
||||||
```
|
```
|
||||||
|
|
||||||
Release Process
|
# Release Process
|
||||||
---
|
|
||||||
The release process for this project is described [here](RELEASE.md).
|
The release process for this project is described [here](RELEASE.md).
|
||||||
|
|
||||||
|
# Code coverage
|
||||||
Code coverage
|
|
||||||
---
|
|
||||||
|
|
||||||
To generate code coverage statistics:
|
To generate code coverage statistics:
|
||||||
|
|
||||||
@ -228,7 +92,6 @@ $ scripts/coverage.sh
|
|||||||
$ open target/cov/lcov-local/index.html
|
$ open target/cov/lcov-local/index.html
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||||
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||||
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
|
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
|
||||||
@ -240,3 +103,7 @@ problem is solved by this code?" On the other hand, if a test does fail and you
|
|||||||
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||||
send us that patch!
|
send us that patch!
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
|
||||||
|
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||||
|
27
RELEASE.md
27
RELEASE.md
@ -138,30 +138,11 @@ There are three release channels that map to branches as follows:
|
|||||||
### Update documentation
|
### Update documentation
|
||||||
TODO: Documentation update procedure is WIP as we move to gitbook
|
TODO: Documentation update procedure is WIP as we move to gitbook
|
||||||
|
|
||||||
Document the new recommended version by updating `book/src/running-archiver.md` and `book/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version.
|
Document the new recommended version by updating `docs/src/running-archiver.md` and `docs/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version.
|
||||||
|
|
||||||
#### Publish updated Book
|
### Update software on devnet.solana.com
|
||||||
We maintain three copies of the "book" as official documentation:
|
|
||||||
|
|
||||||
1) "Book" is the documentation for the latest official release. This should get manually updated whenever a new release is made. It is published here:
|
The testnet running on devnet.solana.com is set to use a fixed release tag
|
||||||
https://solana-labs.github.io/book/
|
|
||||||
|
|
||||||
2) "Book-edge" tracks the tip of the master branch and updates automatically.
|
|
||||||
https://solana-labs.github.io/book-edge/
|
|
||||||
|
|
||||||
3) "Book-beta" tracks the tip of the beta branch and updates automatically.
|
|
||||||
https://solana-labs.github.io/book-beta/
|
|
||||||
|
|
||||||
To manually trigger an update of the "Book", create a new job of the manual-update-book pipeline.
|
|
||||||
Set the tag of the latest release as the PUBLISH_BOOK_TAG environment variable.
|
|
||||||
```bash
|
|
||||||
PUBLISH_BOOK_TAG=v0.16.6
|
|
||||||
```
|
|
||||||
https://buildkite.com/solana-labs/manual-update-book
|
|
||||||
|
|
||||||
### Update software on testnet.solana.com
|
|
||||||
|
|
||||||
The testnet running on testnet.solana.com is set to use a fixed release tag
|
|
||||||
which is set in the Buildkite testnet-management pipeline.
|
which is set in the Buildkite testnet-management pipeline.
|
||||||
This tag needs to be updated and the testnet restarted after a new release
|
This tag needs to be updated and the testnet restarted after a new release
|
||||||
tag is created.
|
tag is created.
|
||||||
@ -201,4 +182,4 @@ TESTNET_OP=create-and-start
|
|||||||
### Alert the community
|
### Alert the community
|
||||||
|
|
||||||
Notify Discord users on #validator-support that a new release for
|
Notify Discord users on #validator-support that a new release for
|
||||||
testnet.solana.com is available
|
devnet.solana.com is available
|
||||||
|
22
accounts-bench/Cargo.toml
Normal file
22
accounts-bench/Cargo.toml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
[package]
|
||||||
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
name = "solana-accounts-bench"
|
||||||
|
version = "1.2.0"
|
||||||
|
repository = "https://github.com/solana-labs/solana"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
log = "0.4.6"
|
||||||
|
rayon = "1.3.0"
|
||||||
|
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||||
|
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||||
|
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||||
|
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||||
|
rand = "0.7.0"
|
||||||
|
clap = "2.33.1"
|
||||||
|
crossbeam-channel = "0.4"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
targets = ["x86_64-unknown-linux-gnu"]
|
105
accounts-bench/src/main.rs
Normal file
105
accounts-bench/src/main.rs
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
use clap::{value_t, App, Arg};
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use solana_measure::measure::Measure;
|
||||||
|
use solana_runtime::{
|
||||||
|
accounts::{create_test_accounts, update_accounts, Accounts},
|
||||||
|
accounts_index::Ancestors,
|
||||||
|
};
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use std::fs;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
solana_logger::setup();
|
||||||
|
|
||||||
|
let matches = App::new("crate")
|
||||||
|
.about("about")
|
||||||
|
.version("version")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("num_slots")
|
||||||
|
.long("num_slots")
|
||||||
|
.takes_value(true)
|
||||||
|
.value_name("SLOTS")
|
||||||
|
.help("Number of slots to store to."),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("num_accounts")
|
||||||
|
.long("num_accounts")
|
||||||
|
.takes_value(true)
|
||||||
|
.value_name("NUM_ACCOUNTS")
|
||||||
|
.help("Total number of accounts"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("iterations")
|
||||||
|
.long("iterations")
|
||||||
|
.takes_value(true)
|
||||||
|
.value_name("ITERATIONS")
|
||||||
|
.help("Number of bench iterations"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("clean")
|
||||||
|
.long("clean")
|
||||||
|
.takes_value(false)
|
||||||
|
.help("Run clean"),
|
||||||
|
)
|
||||||
|
.get_matches();
|
||||||
|
|
||||||
|
let num_slots = value_t!(matches, "num_slots", usize).unwrap_or(4);
|
||||||
|
let num_accounts = value_t!(matches, "num_accounts", usize).unwrap_or(10_000);
|
||||||
|
let iterations = value_t!(matches, "iterations", usize).unwrap_or(20);
|
||||||
|
let clean = matches.is_present("clean");
|
||||||
|
println!("clean: {:?}", clean);
|
||||||
|
|
||||||
|
let path = PathBuf::from("farf/accounts-bench");
|
||||||
|
if fs::remove_dir_all(path.clone()).is_err() {
|
||||||
|
println!("Warning: Couldn't remove {:?}", path);
|
||||||
|
}
|
||||||
|
let accounts = Accounts::new(vec![path]);
|
||||||
|
println!("Creating {} accounts", num_accounts);
|
||||||
|
let mut create_time = Measure::start("create accounts");
|
||||||
|
let pubkeys: Vec<_> = (0..num_slots)
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|slot| {
|
||||||
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||||
|
create_test_accounts(
|
||||||
|
&accounts,
|
||||||
|
&mut pubkeys,
|
||||||
|
num_accounts / num_slots,
|
||||||
|
slot as u64,
|
||||||
|
);
|
||||||
|
pubkeys
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let pubkeys: Vec<_> = pubkeys.into_iter().flatten().collect();
|
||||||
|
create_time.stop();
|
||||||
|
println!(
|
||||||
|
"created {} accounts in {} slots {}",
|
||||||
|
(num_accounts / num_slots) * num_slots,
|
||||||
|
num_slots,
|
||||||
|
create_time
|
||||||
|
);
|
||||||
|
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
||||||
|
for i in 1..num_slots {
|
||||||
|
ancestors.insert(i as u64, i - 1);
|
||||||
|
accounts.add_root(i as u64);
|
||||||
|
}
|
||||||
|
for x in 0..iterations {
|
||||||
|
if clean {
|
||||||
|
let mut time = Measure::start("clean");
|
||||||
|
accounts.accounts_db.clean_accounts();
|
||||||
|
time.stop();
|
||||||
|
println!("{}", time);
|
||||||
|
for slot in 0..num_slots {
|
||||||
|
update_accounts(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
|
||||||
|
accounts.add_root((x * num_slots + slot) as u64);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||||
|
let mut time = Measure::start("hash");
|
||||||
|
let hash = accounts.accounts_db.update_accounts_hash(0, &ancestors);
|
||||||
|
time.stop();
|
||||||
|
println!("hash: {} {}", hash, time);
|
||||||
|
create_test_accounts(&accounts, &mut pubkeys, 1, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,19 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
name = "solana-archiver"
|
|
||||||
version = "0.22.0"
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
clap = "2.33.0"
|
|
||||||
console = "0.9.1"
|
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
|
||||||
solana-core = { path = "../core", version = "0.22.0" }
|
|
||||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
|
||||||
solana-metrics = { path = "../metrics", version = "0.22.0" }
|
|
||||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
|
||||||
|
|
@ -1,147 +0,0 @@
|
|||||||
use clap::{crate_description, crate_name, App, Arg};
|
|
||||||
use console::style;
|
|
||||||
use solana_clap_utils::{
|
|
||||||
input_validators::is_keypair,
|
|
||||||
keypair::{
|
|
||||||
self, keypair_input, KeypairWithSource, ASK_SEED_PHRASE_ARG,
|
|
||||||
SKIP_SEED_PHRASE_VALIDATION_ARG,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use solana_core::{
|
|
||||||
archiver::Archiver,
|
|
||||||
cluster_info::{Node, VALIDATOR_PORT_RANGE},
|
|
||||||
contact_info::ContactInfo,
|
|
||||||
};
|
|
||||||
use solana_sdk::{commitment_config::CommitmentConfig, signature::KeypairUtil};
|
|
||||||
use std::{net::SocketAddr, path::PathBuf, process::exit, sync::Arc};
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
solana_logger::setup();
|
|
||||||
|
|
||||||
let matches = App::new(crate_name!())
|
|
||||||
.about(crate_description!())
|
|
||||||
.version(solana_clap_utils::version!())
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("identity_keypair")
|
|
||||||
.short("i")
|
|
||||||
.long("identity-keypair")
|
|
||||||
.value_name("PATH")
|
|
||||||
.takes_value(true)
|
|
||||||
.validator(is_keypair)
|
|
||||||
.help("File containing an identity (keypair)"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("entrypoint")
|
|
||||||
.short("n")
|
|
||||||
.long("entrypoint")
|
|
||||||
.value_name("HOST:PORT")
|
|
||||||
.takes_value(true)
|
|
||||||
.required(true)
|
|
||||||
.validator(solana_net_utils::is_host_port)
|
|
||||||
.help("Rendezvous with the cluster at this entry point"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("ledger")
|
|
||||||
.short("l")
|
|
||||||
.long("ledger")
|
|
||||||
.value_name("DIR")
|
|
||||||
.takes_value(true)
|
|
||||||
.required(true)
|
|
||||||
.help("use DIR as persistent ledger location"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("storage_keypair")
|
|
||||||
.short("s")
|
|
||||||
.long("storage-keypair")
|
|
||||||
.value_name("PATH")
|
|
||||||
.takes_value(true)
|
|
||||||
.validator(is_keypair)
|
|
||||||
.help("File containing the storage account keypair"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name(ASK_SEED_PHRASE_ARG.name)
|
|
||||||
.long(ASK_SEED_PHRASE_ARG.long)
|
|
||||||
.value_name("KEYPAIR NAME")
|
|
||||||
.multiple(true)
|
|
||||||
.takes_value(true)
|
|
||||||
.possible_values(&["identity-keypair", "storage-keypair"])
|
|
||||||
.help(ASK_SEED_PHRASE_ARG.help),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
|
||||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
|
||||||
.requires(ASK_SEED_PHRASE_ARG.name)
|
|
||||||
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
let ledger_path = PathBuf::from(matches.value_of("ledger").unwrap());
|
|
||||||
|
|
||||||
let identity_keypair = keypair_input(&matches, "identity_keypair")
|
|
||||||
.unwrap_or_else(|err| {
|
|
||||||
eprintln!("Identity keypair input failed: {}", err);
|
|
||||||
exit(1);
|
|
||||||
})
|
|
||||||
.keypair;
|
|
||||||
let KeypairWithSource {
|
|
||||||
keypair: storage_keypair,
|
|
||||||
source: storage_keypair_source,
|
|
||||||
} = keypair_input(&matches, "storage_keypair").unwrap_or_else(|err| {
|
|
||||||
eprintln!("Storage keypair input failed: {}", err);
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
if storage_keypair_source == keypair::Source::Generated {
|
|
||||||
clap::Error::with_description(
|
|
||||||
"The `storage-keypair` argument was not found",
|
|
||||||
clap::ErrorKind::ArgumentNotFound,
|
|
||||||
)
|
|
||||||
.exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
let entrypoint_addr = matches
|
|
||||||
.value_of("entrypoint")
|
|
||||||
.map(|entrypoint| {
|
|
||||||
solana_net_utils::parse_host_port(entrypoint)
|
|
||||||
.expect("failed to parse entrypoint address")
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let gossip_addr = {
|
|
||||||
let ip = solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap();
|
|
||||||
let mut addr = SocketAddr::new(ip, 0);
|
|
||||||
addr.set_ip(solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap());
|
|
||||||
addr
|
|
||||||
};
|
|
||||||
let node = Node::new_archiver_with_external_ip(
|
|
||||||
&identity_keypair.pubkey(),
|
|
||||||
&gossip_addr,
|
|
||||||
VALIDATOR_PORT_RANGE,
|
|
||||||
);
|
|
||||||
|
|
||||||
println!(
|
|
||||||
"{} version {} (branch={}, commit={})",
|
|
||||||
style(crate_name!()).bold(),
|
|
||||||
solana_clap_utils::version!(),
|
|
||||||
option_env!("CI_BRANCH").unwrap_or("unknown"),
|
|
||||||
option_env!("CI_COMMIT").unwrap_or("unknown")
|
|
||||||
);
|
|
||||||
solana_metrics::set_host_id(identity_keypair.pubkey().to_string());
|
|
||||||
println!(
|
|
||||||
"replicating the data with identity_keypair={:?} gossip_addr={:?}",
|
|
||||||
identity_keypair.pubkey(),
|
|
||||||
gossip_addr
|
|
||||||
);
|
|
||||||
|
|
||||||
let entrypoint_info = ContactInfo::new_gossip_entry_point(&entrypoint_addr);
|
|
||||||
let archiver = Archiver::new(
|
|
||||||
&ledger_path,
|
|
||||||
node,
|
|
||||||
entrypoint_info,
|
|
||||||
Arc::new(identity_keypair),
|
|
||||||
Arc::new(storage_keypair),
|
|
||||||
CommitmentConfig::recent(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
archiver.join();
|
|
||||||
}
|
|
@ -2,19 +2,27 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-banking-bench"
|
name = "solana-banking-bench"
|
||||||
version = "0.22.0"
|
version = "1.2.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
clap = "2.33.1"
|
||||||
|
crossbeam-channel = "0.4"
|
||||||
log = "0.4.6"
|
log = "0.4.6"
|
||||||
rayon = "1.2.0"
|
rand = "0.7.0"
|
||||||
solana-core = { path = "../core", version = "0.22.0" }
|
rayon = "1.3.0"
|
||||||
solana-ledger = { path = "../ledger", version = "0.22.0" }
|
solana-core = { path = "../core", version = "1.2.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.22.0" }
|
solana-streamer = { path = "../streamer", version = "1.2.0" }
|
||||||
solana-measure = { path = "../measure", version = "0.22.0" }
|
solana-perf = { path = "../perf", version = "1.2.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
solana-ledger = { path = "../ledger", version = "1.2.0" }
|
||||||
rand = "0.6.5"
|
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||||
crossbeam-channel = "0.3"
|
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||||
|
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||||
|
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||||
|
solana-version = { path = "../version", version = "1.2.0" }
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@ -1,30 +1,38 @@
|
|||||||
|
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||||
use crossbeam_channel::unbounded;
|
use crossbeam_channel::unbounded;
|
||||||
use log::*;
|
use log::*;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana_core::banking_stage::{create_test_recorder, BankingStage};
|
use solana_core::{
|
||||||
use solana_core::cluster_info::ClusterInfo;
|
banking_stage::{create_test_recorder, BankingStage},
|
||||||
use solana_core::cluster_info::Node;
|
cluster_info::ClusterInfo,
|
||||||
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
cluster_info::Node,
|
||||||
use solana_core::packet::to_packets_chunked;
|
poh_recorder::PohRecorder,
|
||||||
use solana_core::poh_recorder::PohRecorder;
|
poh_recorder::WorkingBankEntry,
|
||||||
use solana_core::poh_recorder::WorkingBankEntry;
|
};
|
||||||
use solana_ledger::bank_forks::BankForks;
|
use solana_ledger::{
|
||||||
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
|
bank_forks::BankForks,
|
||||||
|
blockstore::Blockstore,
|
||||||
|
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||||
|
get_tmp_ledger_path,
|
||||||
|
};
|
||||||
use solana_measure::measure::Measure;
|
use solana_measure::measure::Measure;
|
||||||
|
use solana_perf::packet::to_packets_chunked;
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::{
|
||||||
use solana_sdk::pubkey::Pubkey;
|
hash::Hash,
|
||||||
use solana_sdk::signature::Keypair;
|
pubkey::Pubkey,
|
||||||
use solana_sdk::signature::Signature;
|
signature::Keypair,
|
||||||
use solana_sdk::system_transaction;
|
signature::Signature,
|
||||||
use solana_sdk::timing::{duration_as_us, timestamp};
|
system_transaction,
|
||||||
use solana_sdk::transaction::Transaction;
|
timing::{duration_as_us, timestamp},
|
||||||
use std::sync::atomic::Ordering;
|
transaction::Transaction,
|
||||||
use std::sync::mpsc::Receiver;
|
};
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::{
|
||||||
use std::thread::sleep;
|
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||||
use std::time::{Duration, Instant};
|
thread::sleep,
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
|
||||||
fn check_txs(
|
fn check_txs(
|
||||||
receiver: &Arc<Receiver<WorkingBankEntry>>,
|
receiver: &Arc<Receiver<WorkingBankEntry>>,
|
||||||
@ -57,15 +65,22 @@ fn check_txs(
|
|||||||
no_bank
|
no_bank
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
|
fn make_accounts_txs(
|
||||||
|
total_num_transactions: usize,
|
||||||
|
hash: Hash,
|
||||||
|
same_payer: bool,
|
||||||
|
) -> Vec<Transaction> {
|
||||||
let to_pubkey = Pubkey::new_rand();
|
let to_pubkey = Pubkey::new_rand();
|
||||||
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
|
let payer_key = Keypair::new();
|
||||||
(0..txes)
|
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
|
||||||
|
(0..total_num_transactions)
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let mut new = dummy.clone();
|
let mut new = dummy.clone();
|
||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||||
|
if !same_payer {
|
||||||
new.message.account_keys[0] = Pubkey::new_rand();
|
new.message.account_keys[0] = Pubkey::new_rand();
|
||||||
|
}
|
||||||
new.message.account_keys[1] = Pubkey::new_rand();
|
new.message.account_keys[1] = Pubkey::new_rand();
|
||||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||||
new
|
new
|
||||||
@ -89,13 +104,61 @@ fn bytes_as_usize(bytes: &[u8]) -> usize {
|
|||||||
bytes[0] as usize | (bytes[1] as usize) << 8
|
bytes[0] as usize | (bytes[1] as usize) << 8
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::cognitive_complexity)]
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let num_threads = BankingStage::num_threads() as usize;
|
|
||||||
|
let matches = App::new(crate_name!())
|
||||||
|
.about(crate_description!())
|
||||||
|
.version(solana_version::version!())
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("num_chunks")
|
||||||
|
.long("num-chunks")
|
||||||
|
.takes_value(true)
|
||||||
|
.value_name("SIZE")
|
||||||
|
.help("Number of transaction chunks."),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("packets_per_chunk")
|
||||||
|
.long("packets-per-chunk")
|
||||||
|
.takes_value(true)
|
||||||
|
.value_name("SIZE")
|
||||||
|
.help("Packets per chunk"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("skip_sanity")
|
||||||
|
.long("skip-sanity")
|
||||||
|
.takes_value(false)
|
||||||
|
.help("Skip transaction sanity execution"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("same_payer")
|
||||||
|
.long("same-payer")
|
||||||
|
.takes_value(false)
|
||||||
|
.help("Use the same payer for transfers"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("iterations")
|
||||||
|
.long("iterations")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("Number of iterations"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("num_threads")
|
||||||
|
.long("num-threads")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("Number of iterations"),
|
||||||
|
)
|
||||||
|
.get_matches();
|
||||||
|
|
||||||
|
let num_threads =
|
||||||
|
value_t!(matches, "num_threads", usize).unwrap_or(BankingStage::num_threads() as usize);
|
||||||
// a multiple of packet chunk duplicates to avoid races
|
// a multiple of packet chunk duplicates to avoid races
|
||||||
const CHUNKS: usize = 8 * 2;
|
let num_chunks = value_t!(matches, "num_chunks", usize).unwrap_or(16);
|
||||||
const PACKETS_PER_BATCH: usize = 192;
|
let packets_per_chunk = value_t!(matches, "packets_per_chunk", usize).unwrap_or(192);
|
||||||
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
|
let iterations = value_t!(matches, "iterations", usize).unwrap_or(1000);
|
||||||
|
|
||||||
|
let total_num_transactions = num_chunks * num_threads * packets_per_chunk;
|
||||||
let mint_total = 1_000_000_000_000;
|
let mint_total = 1_000_000_000_000;
|
||||||
let GenesisConfigInfo {
|
let GenesisConfigInfo {
|
||||||
genesis_config,
|
genesis_config,
|
||||||
@ -109,43 +172,53 @@ fn main() {
|
|||||||
let mut bank_forks = BankForks::new(0, bank0);
|
let mut bank_forks = BankForks::new(0, bank0);
|
||||||
let mut bank = bank_forks.working_bank();
|
let mut bank = bank_forks.working_bank();
|
||||||
|
|
||||||
info!("threads: {} txs: {}", num_threads, txes);
|
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||||
|
|
||||||
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_config.hash());
|
let same_payer = matches.is_present("same_payer");
|
||||||
|
let mut transactions =
|
||||||
|
make_accounts_txs(total_num_transactions, genesis_config.hash(), same_payer);
|
||||||
|
|
||||||
// fund all the accounts
|
// fund all the accounts
|
||||||
transactions.iter().for_each(|tx| {
|
transactions.iter().for_each(|tx| {
|
||||||
let fund = system_transaction::transfer(
|
let mut fund = system_transaction::transfer(
|
||||||
&mint_keypair,
|
&mint_keypair,
|
||||||
&tx.message.account_keys[0],
|
&tx.message.account_keys[0],
|
||||||
mint_total / txes as u64,
|
mint_total / total_num_transactions as u64,
|
||||||
genesis_config.hash(),
|
genesis_config.hash(),
|
||||||
);
|
);
|
||||||
|
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||||
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||||
|
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||||
let x = bank.process_transaction(&fund);
|
let x = bank.process_transaction(&fund);
|
||||||
x.unwrap();
|
x.unwrap();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let skip_sanity = matches.is_present("skip_sanity");
|
||||||
|
if !skip_sanity {
|
||||||
//sanity check, make sure all the transactions can execute sequentially
|
//sanity check, make sure all the transactions can execute sequentially
|
||||||
transactions.iter().for_each(|tx| {
|
transactions.iter().for_each(|tx| {
|
||||||
let res = bank.process_transaction(&tx);
|
let res = bank.process_transaction(&tx);
|
||||||
assert!(res.is_ok(), "sanity test transactions");
|
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||||
});
|
});
|
||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
//sanity check, make sure all the transactions can execute in parallel
|
//sanity check, make sure all the transactions can execute in parallel
|
||||||
let res = bank.process_transactions(&transactions);
|
let res = bank.process_transactions(&transactions);
|
||||||
for r in res {
|
for r in res {
|
||||||
assert!(r.is_ok(), "sanity parallel execution");
|
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
|
||||||
}
|
}
|
||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
}
|
||||||
|
|
||||||
|
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blocktree = Arc::new(
|
let blockstore = Arc::new(
|
||||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||||
);
|
);
|
||||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||||
create_test_recorder(&bank, &blocktree, None);
|
create_test_recorder(&bank, &blockstore, None);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(cluster_info);
|
||||||
let banking_stage = BankingStage::new(
|
let banking_stage = BankingStage::new(
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
@ -155,25 +228,26 @@ fn main() {
|
|||||||
);
|
);
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||||
|
|
||||||
let chunk_len = verified.len() / CHUNKS;
|
let chunk_len = verified.len() / num_chunks;
|
||||||
let mut start = 0;
|
let mut start = 0;
|
||||||
|
|
||||||
// This is so that the signal_receiver does not go out of scope after the closure.
|
// This is so that the signal_receiver does not go out of scope after the closure.
|
||||||
// If it is dropped before poh_service, then poh_service will error when
|
// If it is dropped before poh_service, then poh_service will error when
|
||||||
// calling send() on the channel.
|
// calling send() on the channel.
|
||||||
let signal_receiver = Arc::new(signal_receiver);
|
let signal_receiver = Arc::new(signal_receiver);
|
||||||
let mut total = 0;
|
let mut total_us = 0;
|
||||||
let mut tx_total = 0;
|
let mut tx_total_us = 0;
|
||||||
|
let base_tx_count = bank.transaction_count();
|
||||||
let mut txs_processed = 0;
|
let mut txs_processed = 0;
|
||||||
let mut root = 1;
|
let mut root = 1;
|
||||||
let collector = Pubkey::new_rand();
|
let collector = Pubkey::new_rand();
|
||||||
const ITERS: usize = 1_000;
|
|
||||||
let config = Config {
|
let config = Config {
|
||||||
packets_per_batch: PACKETS_PER_BATCH,
|
packets_per_batch: packets_per_chunk,
|
||||||
chunk_len,
|
chunk_len,
|
||||||
num_threads,
|
num_threads,
|
||||||
};
|
};
|
||||||
for _ in 0..ITERS {
|
let mut total_sent = 0;
|
||||||
|
for _ in 0..iterations {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let mut sent = 0;
|
let mut sent = 0;
|
||||||
|
|
||||||
@ -214,7 +288,11 @@ fn main() {
|
|||||||
sleep(Duration::from_millis(5));
|
sleep(Duration::from_millis(5));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if check_txs(&signal_receiver, txes / CHUNKS, &poh_recorder) {
|
if check_txs(
|
||||||
|
&signal_receiver,
|
||||||
|
total_num_transactions / num_chunks,
|
||||||
|
&poh_recorder,
|
||||||
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
"resetting bank {} tx count: {} txs_proc: {}",
|
"resetting bank {} tx count: {} txs_proc: {}",
|
||||||
bank.slot(),
|
bank.slot(),
|
||||||
@ -223,7 +301,7 @@ fn main() {
|
|||||||
);
|
);
|
||||||
assert!(txs_processed < bank.transaction_count());
|
assert!(txs_processed < bank.transaction_count());
|
||||||
txs_processed = bank.transaction_count();
|
txs_processed = bank.transaction_count();
|
||||||
tx_total += duration_as_us(&now.elapsed());
|
tx_total_us += duration_as_us(&now.elapsed());
|
||||||
|
|
||||||
let mut poh_time = Measure::start("poh_time");
|
let mut poh_time = Measure::start("poh_time");
|
||||||
poh_recorder.lock().unwrap().reset(
|
poh_recorder.lock().unwrap().reset(
|
||||||
@ -245,7 +323,7 @@ fn main() {
|
|||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||||
if bank.slot() > 32 {
|
if bank.slot() > 32 {
|
||||||
bank_forks.set_root(root, &None);
|
bank_forks.set_root(root, &None, None);
|
||||||
root += 1;
|
root += 1;
|
||||||
}
|
}
|
||||||
debug!(
|
debug!(
|
||||||
@ -255,20 +333,21 @@ fn main() {
|
|||||||
poh_time.as_us(),
|
poh_time.as_us(),
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
tx_total += duration_as_us(&now.elapsed());
|
tx_total_us += duration_as_us(&now.elapsed());
|
||||||
}
|
}
|
||||||
|
|
||||||
// This signature clear may not actually clear the signatures
|
// This signature clear may not actually clear the signatures
|
||||||
// in this chunk, but since we rotate between CHUNKS then
|
// in this chunk, but since we rotate between CHUNKS then
|
||||||
// we should clear them by the time we come around again to re-use that chunk.
|
// we should clear them by the time we come around again to re-use that chunk.
|
||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
total += duration_as_us(&now.elapsed());
|
total_us += duration_as_us(&now.elapsed());
|
||||||
debug!(
|
debug!(
|
||||||
"time: {} us checked: {} sent: {}",
|
"time: {} us checked: {} sent: {}",
|
||||||
duration_as_us(&now.elapsed()),
|
duration_as_us(&now.elapsed()),
|
||||||
txes / CHUNKS,
|
total_num_transactions / num_chunks,
|
||||||
sent,
|
sent,
|
||||||
);
|
);
|
||||||
|
total_sent += sent;
|
||||||
|
|
||||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
||||||
for tx in transactions.iter_mut() {
|
for tx in transactions.iter_mut() {
|
||||||
@ -276,19 +355,25 @@ fn main() {
|
|||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||||
}
|
}
|
||||||
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
start += chunk_len;
|
start += chunk_len;
|
||||||
start %= verified.len();
|
start %= verified.len();
|
||||||
}
|
}
|
||||||
|
let txs_processed = bank_forks.working_bank().transaction_count();
|
||||||
|
debug!("processed: {} base: {}", txs_processed, base_tx_count);
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"{{'name': 'banking_bench_total', 'median': '{}'}}",
|
"{{'name': 'banking_bench_total', 'median': '{:.2}'}}",
|
||||||
total / ITERS as u64,
|
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
|
||||||
);
|
);
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
|
"{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}",
|
||||||
tx_total / ITERS as u64,
|
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
|
||||||
|
);
|
||||||
|
eprintln!(
|
||||||
|
"{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}",
|
||||||
|
(1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64),
|
||||||
);
|
);
|
||||||
|
|
||||||
drop(verified_sender);
|
drop(verified_sender);
|
||||||
@ -300,5 +385,5 @@ fn main() {
|
|||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
debug!("waited for poh_service");
|
debug!("waited for poh_service");
|
||||||
}
|
}
|
||||||
let _unused = Blocktree::destroy(&ledger_path);
|
let _unused = Blockstore::destroy(&ledger_path);
|
||||||
}
|
}
|
||||||
|
@ -2,40 +2,37 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-exchange"
|
name = "solana-bench-exchange"
|
||||||
version = "0.22.0"
|
version = "1.2.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
publish = false
|
publish = false
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.2.1"
|
clap = "2.33.1"
|
||||||
bs58 = "0.3.0"
|
itertools = "0.9.0"
|
||||||
clap = "2.32.0"
|
|
||||||
env_logger = "0.7.1"
|
|
||||||
itertools = "0.8.2"
|
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
num-derive = "0.3"
|
num-derive = "0.3"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
rand = "0.6.5"
|
rand = "0.7.0"
|
||||||
rayon = "1.2.0"
|
rayon = "1.3.0"
|
||||||
serde = "1.0.104"
|
serde_json = "1.0.53"
|
||||||
serde_derive = "1.0.103"
|
serde_yaml = "0.8.12"
|
||||||
serde_json = "1.0.44"
|
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||||
serde_yaml = "0.8.11"
|
solana-core = { path = "../core", version = "1.2.0" }
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
solana-genesis = { path = "../genesis", version = "1.2.0" }
|
||||||
solana-core = { path = "../core", version = "0.22.0" }
|
solana-client = { path = "../client", version = "1.2.0" }
|
||||||
solana-genesis = { path = "../genesis", version = "0.22.0" }
|
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||||
solana-client = { path = "../client", version = "0.22.0" }
|
solana-exchange-program = { path = "../programs/exchange", version = "1.2.0" }
|
||||||
solana-faucet = { path = "../faucet", version = "0.22.0" }
|
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||||
solana-exchange-program = { path = "../programs/exchange", version = "0.22.0" }
|
solana-metrics = { path = "../metrics", version = "1.2.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.22.0" }
|
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.22.0" }
|
solana-version = { path = "../version", version = "1.2.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
|
||||||
untrusted = "0.7.0"
|
|
||||||
ws = "0.9.1"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
solana-local-cluster = { path = "../local-cluster", version = "0.22.0" }
|
solana-local-cluster = { path = "../local-cluster", version = "1.2.0" }
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@ -15,7 +15,7 @@ use solana_sdk::{
|
|||||||
client::{Client, SyncClient},
|
client::{Client, SyncClient},
|
||||||
commitment_config::CommitmentConfig,
|
commitment_config::CommitmentConfig,
|
||||||
pubkey::Pubkey,
|
pubkey::Pubkey,
|
||||||
signature::{Keypair, KeypairUtil},
|
signature::{Keypair, Signer},
|
||||||
timing::{duration_as_ms, duration_as_s},
|
timing::{duration_as_ms, duration_as_s},
|
||||||
transaction::Transaction,
|
transaction::Transaction,
|
||||||
{system_instruction, system_program},
|
{system_instruction, system_program},
|
||||||
@ -459,7 +459,7 @@ fn swapper<T>(
|
|||||||
let owner = &signer.pubkey();
|
let owner = &signer.pubkey();
|
||||||
Transaction::new_signed_instructions(
|
Transaction::new_signed_instructions(
|
||||||
&[s],
|
&[s],
|
||||||
vec![exchange_instruction::swap_request(
|
&[exchange_instruction::swap_request(
|
||||||
owner,
|
owner,
|
||||||
&swap.0.pubkey,
|
&swap.0.pubkey,
|
||||||
&swap.1.pubkey,
|
&swap.1.pubkey,
|
||||||
@ -590,7 +590,7 @@ fn trader<T>(
|
|||||||
let space = mem::size_of::<ExchangeState>() as u64;
|
let space = mem::size_of::<ExchangeState>() as u64;
|
||||||
Transaction::new_signed_instructions(
|
Transaction::new_signed_instructions(
|
||||||
&[owner.as_ref(), trade],
|
&[owner.as_ref(), trade],
|
||||||
vec![
|
&[
|
||||||
system_instruction::create_account(
|
system_instruction::create_account(
|
||||||
owner_pubkey,
|
owner_pubkey,
|
||||||
trade_pubkey,
|
trade_pubkey,
|
||||||
@ -701,7 +701,7 @@ fn verify_funding_transfer<T: SyncClient + ?Sized>(
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fund_keys(client: &dyn Client, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
||||||
let total = lamports * (dests.len() as u64 + 1);
|
let total = lamports * (dests.len() as u64 + 1);
|
||||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||||
let mut notfunded: Vec<&Arc<Keypair>> = dests.iter().collect();
|
let mut notfunded: Vec<&Arc<Keypair>> = dests.iter().collect();
|
||||||
@ -749,7 +749,7 @@ pub fn fund_keys(client: &dyn Client, source: &Keypair, dests: &[Arc<Keypair>],
|
|||||||
.map(|(k, m)| {
|
.map(|(k, m)| {
|
||||||
(
|
(
|
||||||
k.clone(),
|
k.clone(),
|
||||||
Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
Transaction::new_unsigned_instructions(&system_instruction::transfer_many(
|
||||||
&k.pubkey(),
|
&k.pubkey(),
|
||||||
&m,
|
&m,
|
||||||
)),
|
)),
|
||||||
@ -760,9 +760,10 @@ pub fn fund_keys(client: &dyn Client, source: &Keypair, dests: &[Arc<Keypair>],
|
|||||||
let mut retries = 0;
|
let mut retries = 0;
|
||||||
let amount = chunk[0].1[0].1;
|
let amount = chunk[0].1[0].1;
|
||||||
while !to_fund_txs.is_empty() {
|
while !to_fund_txs.is_empty() {
|
||||||
let receivers = to_fund_txs
|
let receivers: usize = to_fund_txs
|
||||||
.iter()
|
.iter()
|
||||||
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
|
.map(|(_, tx)| tx.message().instructions.len())
|
||||||
|
.sum();
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
" {} to {} in {} txs",
|
" {} to {} in {} txs",
|
||||||
@ -824,7 +825,11 @@ pub fn fund_keys(client: &dyn Client, source: &Keypair, dests: &[Arc<Keypair>],
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_token_accounts(client: &dyn Client, signers: &[Arc<Keypair>], accounts: &[Keypair]) {
|
pub fn create_token_accounts<T: Client>(
|
||||||
|
client: &T,
|
||||||
|
signers: &[Arc<Keypair>],
|
||||||
|
accounts: &[Keypair],
|
||||||
|
) {
|
||||||
let mut notfunded: Vec<(&Arc<Keypair>, &Keypair)> = signers.iter().zip(accounts).collect();
|
let mut notfunded: Vec<(&Arc<Keypair>, &Keypair)> = signers.iter().zip(accounts).collect();
|
||||||
|
|
||||||
while !notfunded.is_empty() {
|
while !notfunded.is_empty() {
|
||||||
@ -845,14 +850,15 @@ pub fn create_token_accounts(client: &dyn Client, signers: &[Arc<Keypair>], acco
|
|||||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
||||||
(
|
(
|
||||||
(from_keypair, new_keypair),
|
(from_keypair, new_keypair),
|
||||||
Transaction::new_unsigned_instructions(vec![create_ix, request_ix]),
|
Transaction::new_unsigned_instructions(&[create_ix, request_ix]),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let accounts = to_create_txs
|
let accounts: usize = to_create_txs
|
||||||
.iter()
|
.iter()
|
||||||
.fold(0, |len, (_, tx)| len + tx.message().instructions.len() / 2);
|
.map(|(_, tx)| tx.message().instructions.len() / 2)
|
||||||
|
.sum();
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
" Creating {} accounts in {} txs",
|
" Creating {} accounts in {} txs",
|
||||||
@ -968,7 +974,12 @@ fn generate_keypairs(num: u64) -> Vec<Keypair> {
|
|||||||
rnd.gen_n_keypairs(num)
|
rnd.gen_n_keypairs(num)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn airdrop_lamports(client: &dyn Client, faucet_addr: &SocketAddr, id: &Keypair, amount: u64) {
|
pub fn airdrop_lamports<T: Client>(
|
||||||
|
client: &T,
|
||||||
|
faucet_addr: &SocketAddr,
|
||||||
|
id: &Keypair,
|
||||||
|
amount: u64,
|
||||||
|
) {
|
||||||
let balance = client.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent());
|
let balance = client.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent());
|
||||||
let balance = balance.unwrap_or(0);
|
let balance = balance.unwrap_or(0);
|
||||||
if balance >= amount {
|
if balance >= amount {
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
|
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
|
||||||
use solana_core::gen_keys::GenKeys;
|
use solana_core::gen_keys::GenKeys;
|
||||||
use solana_faucet::faucet::FAUCET_PORT;
|
use solana_faucet::faucet::FAUCET_PORT;
|
||||||
use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil};
|
use solana_sdk::signature::{read_keypair_file, Keypair};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
@ -5,13 +5,13 @@ pub mod order_book;
|
|||||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||||
use log::*;
|
use log::*;
|
||||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||||
use solana_sdk::signature::KeypairUtil;
|
use solana_sdk::signature::Signer;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
solana_metrics::set_panic_hook("bench-exchange");
|
solana_metrics::set_panic_hook("bench-exchange");
|
||||||
|
|
||||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||||
let cli_config = cli::extract_args(&matches);
|
let cli_config = cli::extract_args(&matches);
|
||||||
|
|
||||||
let cli::Config {
|
let cli::Config {
|
||||||
@ -54,8 +54,7 @@ fn main() {
|
|||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
info!("Connecting to the cluster");
|
info!("Connecting to the cluster");
|
||||||
let (nodes, _archivers) =
|
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||||
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
|
||||||
panic!("Failed to discover nodes");
|
panic!("Failed to discover nodes");
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -10,12 +10,13 @@ use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
|||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_runtime::bank_client::BankClient;
|
use solana_runtime::bank_client::BankClient;
|
||||||
use solana_sdk::genesis_config::create_genesis_config;
|
use solana_sdk::genesis_config::create_genesis_config;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, Signer};
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[ignore]
|
||||||
fn test_exchange_local_cluster() {
|
fn test_exchange_local_cluster() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
@ -58,7 +59,7 @@ fn test_exchange_local_cluster() {
|
|||||||
let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||||
|
|
||||||
info!("Connecting to the cluster");
|
info!("Connecting to the cluster");
|
||||||
let (nodes, _) =
|
let nodes =
|
||||||
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
||||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||||
exit(1);
|
exit(1);
|
||||||
@ -85,7 +86,7 @@ fn test_exchange_bank_client() {
|
|||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
|
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
|
||||||
let mut bank = Bank::new(&genesis_config);
|
let mut bank = Bank::new(&genesis_config);
|
||||||
bank.add_instruction_processor(id(), process_instruction);
|
bank.add_builtin_program("exchange_program", id(), process_instruction);
|
||||||
let clients = vec![BankClient::new(bank)];
|
let clients = vec![BankClient::new(bank)];
|
||||||
|
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
|
@ -2,14 +2,18 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-streamer"
|
name = "solana-bench-streamer"
|
||||||
version = "0.22.0"
|
version = "1.2.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.1"
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||||
solana-core = { path = "../core", version = "0.22.0" }
|
solana-streamer = { path = "../streamer", version = "1.2.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||||
|
solana-version = { path = "../version", version = "1.2.0" }
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@ -1,14 +1,13 @@
|
|||||||
use clap::{crate_description, crate_name, App, Arg};
|
use clap::{crate_description, crate_name, App, Arg};
|
||||||
use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||||
use solana_core::result::Result;
|
use solana_streamer::streamer::{receiver, PacketReceiver};
|
||||||
use solana_core::streamer::{receiver, PacketReceiver};
|
|
||||||
use std::cmp::max;
|
use std::cmp::max;
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{spawn, JoinHandle, Result};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
|
||||||
@ -53,7 +52,7 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
let matches = App::new(crate_name!())
|
let matches = App::new(crate_name!())
|
||||||
.about(crate_description!())
|
.about(crate_description!())
|
||||||
.version(solana_clap_utils::version!())
|
.version(solana_version::version!())
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("num-recv-sockets")
|
Arg::with_name("num-recv-sockets")
|
||||||
.long("num-recv-sockets")
|
.long("num-recv-sockets")
|
||||||
@ -68,7 +67,8 @@ fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut port = 0;
|
let mut port = 0;
|
||||||
let mut addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||||
|
let mut addr = SocketAddr::new(ip_addr, 0);
|
||||||
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ fn main() -> Result<()> {
|
|||||||
let mut read_threads = Vec::new();
|
let mut read_threads = Vec::new();
|
||||||
let recycler = PacketsRecycler::default();
|
let recycler = PacketsRecycler::default();
|
||||||
for _ in 0..num_sockets {
|
for _ in 0..num_sockets {
|
||||||
let read = solana_net_utils::bind_to(port, false).unwrap();
|
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||||
|
|
||||||
addr = read.local_addr().unwrap();
|
addr = read.local_addr().unwrap();
|
||||||
|
@ -2,38 +2,40 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-tps"
|
name = "solana-bench-tps"
|
||||||
version = "0.22.0"
|
version = "1.2.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.2.1"
|
bincode = "1.2.1"
|
||||||
clap = "2.33.0"
|
clap = "2.33.1"
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
rayon = "1.2.0"
|
rayon = "1.3.0"
|
||||||
serde = "1.0.104"
|
serde_json = "1.0.53"
|
||||||
serde_derive = "1.0.103"
|
serde_yaml = "0.8.12"
|
||||||
serde_json = "1.0.44"
|
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||||
serde_yaml = "0.8.11"
|
solana-core = { path = "../core", version = "1.2.0" }
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
solana-genesis = { path = "../genesis", version = "1.2.0" }
|
||||||
solana-core = { path = "../core", version = "0.22.0" }
|
solana-client = { path = "../client", version = "1.2.0" }
|
||||||
solana-genesis = { path = "../genesis", version = "0.22.0" }
|
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||||
solana-client = { path = "../client", version = "0.22.0" }
|
solana-librapay = { path = "../programs/librapay", version = "1.2.0", optional = true }
|
||||||
solana-faucet = { path = "../faucet", version = "0.22.0" }
|
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||||
solana-librapay = { path = "../programs/librapay", version = "0.22.0", optional = true }
|
solana-metrics = { path = "../metrics", version = "1.2.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.22.0" }
|
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||||
solana-measure = { path = "../measure", version = "0.22.0" }
|
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.22.0" }
|
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.0", optional = true }
|
||||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
solana-version = { path = "../version", version = "1.2.0" }
|
||||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.0", optional = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serial_test = "0.3.2"
|
serial_test = "0.4.0"
|
||||||
serial_test_derive = "0.3.1"
|
serial_test_derive = "0.4.0"
|
||||||
solana-local-cluster = { path = "../local-cluster", version = "0.22.0" }
|
solana-local-cluster = { path = "../local-cluster", version = "1.2.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
move = ["solana-librapay", "solana-move-loader-program"]
|
move = ["solana-librapay", "solana-move-loader-program"]
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@ -7,7 +7,7 @@ use solana_faucet::faucet::request_airdrop_transaction;
|
|||||||
#[cfg(feature = "move")]
|
#[cfg(feature = "move")]
|
||||||
use solana_librapay::{create_genesis, upload_mint_script, upload_payment_script};
|
use solana_librapay::{create_genesis, upload_mint_script, upload_payment_script};
|
||||||
use solana_measure::measure::Measure;
|
use solana_measure::measure::Measure;
|
||||||
use solana_metrics::{self, datapoint_debug};
|
use solana_metrics::{self, datapoint_info};
|
||||||
use solana_sdk::{
|
use solana_sdk::{
|
||||||
client::Client,
|
client::Client,
|
||||||
clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE},
|
clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE},
|
||||||
@ -15,21 +15,20 @@ use solana_sdk::{
|
|||||||
fee_calculator::FeeCalculator,
|
fee_calculator::FeeCalculator,
|
||||||
hash::Hash,
|
hash::Hash,
|
||||||
pubkey::Pubkey,
|
pubkey::Pubkey,
|
||||||
signature::{Keypair, KeypairUtil},
|
signature::{Keypair, Signer},
|
||||||
system_instruction, system_transaction,
|
system_instruction, system_transaction,
|
||||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||||
transaction::Transaction,
|
transaction::Transaction,
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
cmp,
|
collections::{HashSet, VecDeque},
|
||||||
collections::VecDeque,
|
|
||||||
net::SocketAddr,
|
net::SocketAddr,
|
||||||
process::exit,
|
process::exit,
|
||||||
sync::{
|
sync::{
|
||||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||||
Arc, RwLock,
|
Arc, Mutex, RwLock,
|
||||||
},
|
},
|
||||||
thread::{sleep, Builder},
|
thread::{sleep, Builder, JoinHandle},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -65,11 +64,148 @@ fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn wait_for_target_slots_per_epoch<T>(target_slots_per_epoch: u64, client: &Arc<T>)
|
||||||
|
where
|
||||||
|
T: 'static + Client + Send + Sync,
|
||||||
|
{
|
||||||
|
if target_slots_per_epoch != 0 {
|
||||||
|
info!(
|
||||||
|
"Waiting until epochs are {} slots long..",
|
||||||
|
target_slots_per_epoch
|
||||||
|
);
|
||||||
|
loop {
|
||||||
|
if let Ok(epoch_info) = client.get_epoch_info() {
|
||||||
|
if epoch_info.slots_in_epoch >= target_slots_per_epoch {
|
||||||
|
info!("Done epoch_info: {:?}", epoch_info);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
info!(
|
||||||
|
"Waiting for epoch: {} now: {}",
|
||||||
|
target_slots_per_epoch, epoch_info.slots_in_epoch
|
||||||
|
);
|
||||||
|
}
|
||||||
|
sleep(Duration::from_secs(3));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_sampler_thread<T>(
|
||||||
|
client: &Arc<T>,
|
||||||
|
exit_signal: &Arc<AtomicBool>,
|
||||||
|
sample_period: u64,
|
||||||
|
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
|
||||||
|
) -> JoinHandle<()>
|
||||||
|
where
|
||||||
|
T: 'static + Client + Send + Sync,
|
||||||
|
{
|
||||||
|
info!("Sampling TPS every {} second...", sample_period);
|
||||||
|
let exit_signal = exit_signal.clone();
|
||||||
|
let maxes = maxes.clone();
|
||||||
|
let client = client.clone();
|
||||||
|
Builder::new()
|
||||||
|
.name("solana-client-sample".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_chunked_transfers(
|
||||||
|
recent_blockhash: Arc<RwLock<Hash>>,
|
||||||
|
shared_txs: &SharedTransactions,
|
||||||
|
shared_tx_active_thread_count: Arc<AtomicIsize>,
|
||||||
|
source_keypair_chunks: Vec<Vec<&Keypair>>,
|
||||||
|
dest_keypair_chunks: &mut Vec<VecDeque<&Keypair>>,
|
||||||
|
threads: usize,
|
||||||
|
duration: Duration,
|
||||||
|
sustained: bool,
|
||||||
|
libra_args: Option<LibraKeys>,
|
||||||
|
) {
|
||||||
|
// generate and send transactions for the specified duration
|
||||||
|
let start = Instant::now();
|
||||||
|
let keypair_chunks = source_keypair_chunks.len();
|
||||||
|
let mut reclaim_lamports_back_to_source_account = false;
|
||||||
|
let mut chunk_index = 0;
|
||||||
|
while start.elapsed() < duration {
|
||||||
|
generate_txs(
|
||||||
|
shared_txs,
|
||||||
|
&recent_blockhash,
|
||||||
|
&source_keypair_chunks[chunk_index],
|
||||||
|
&dest_keypair_chunks[chunk_index],
|
||||||
|
threads,
|
||||||
|
reclaim_lamports_back_to_source_account,
|
||||||
|
&libra_args,
|
||||||
|
);
|
||||||
|
|
||||||
|
// In sustained mode, overlap the transfers with generation. This has higher average
|
||||||
|
// performance but lower peak performance in tested environments.
|
||||||
|
if sustained {
|
||||||
|
// Ensure that we don't generate more transactions than we can handle.
|
||||||
|
while shared_txs.read().unwrap().len() > 2 * threads {
|
||||||
|
sleep(Duration::from_millis(1));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
while !shared_txs.read().unwrap().is_empty()
|
||||||
|
|| shared_tx_active_thread_count.load(Ordering::Relaxed) > 0
|
||||||
|
{
|
||||||
|
sleep(Duration::from_millis(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rotate destination keypairs so that the next round of transactions will have different
|
||||||
|
// transaction signatures even when blockhash is reused.
|
||||||
|
dest_keypair_chunks[chunk_index].rotate_left(1);
|
||||||
|
|
||||||
|
// Move on to next chunk
|
||||||
|
chunk_index = (chunk_index + 1) % keypair_chunks;
|
||||||
|
|
||||||
|
// Switch directions after transfering for each "chunk"
|
||||||
|
if chunk_index == 0 {
|
||||||
|
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_sender_threads<T>(
|
||||||
|
client: &Arc<T>,
|
||||||
|
shared_txs: &SharedTransactions,
|
||||||
|
thread_batch_sleep_ms: usize,
|
||||||
|
total_tx_sent_count: &Arc<AtomicUsize>,
|
||||||
|
threads: usize,
|
||||||
|
exit_signal: &Arc<AtomicBool>,
|
||||||
|
shared_tx_active_thread_count: &Arc<AtomicIsize>,
|
||||||
|
) -> Vec<JoinHandle<()>>
|
||||||
|
where
|
||||||
|
T: 'static + Client + Send + Sync,
|
||||||
|
{
|
||||||
|
(0..threads)
|
||||||
|
.map(|_| {
|
||||||
|
let exit_signal = exit_signal.clone();
|
||||||
|
let shared_txs = shared_txs.clone();
|
||||||
|
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||||
|
let total_tx_sent_count = total_tx_sent_count.clone();
|
||||||
|
let client = client.clone();
|
||||||
|
Builder::new()
|
||||||
|
.name("solana-client-sender".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
do_tx_transfers(
|
||||||
|
&exit_signal,
|
||||||
|
&shared_txs,
|
||||||
|
&shared_tx_active_thread_count,
|
||||||
|
&total_tx_sent_count,
|
||||||
|
thread_batch_sleep_ms,
|
||||||
|
&client,
|
||||||
|
);
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn do_bench_tps<T>(
|
pub fn do_bench_tps<T>(
|
||||||
clients: Vec<T>,
|
client: Arc<T>,
|
||||||
config: Config,
|
config: Config,
|
||||||
gen_keypairs: Vec<Keypair>,
|
gen_keypairs: Vec<Keypair>,
|
||||||
keypair0_balance: u64,
|
|
||||||
libra_args: Option<LibraKeys>,
|
libra_args: Option<LibraKeys>,
|
||||||
) -> u64
|
) -> u64
|
||||||
where
|
where
|
||||||
@ -82,13 +218,10 @@ where
|
|||||||
duration,
|
duration,
|
||||||
tx_count,
|
tx_count,
|
||||||
sustained,
|
sustained,
|
||||||
num_lamports_per_account,
|
target_slots_per_epoch,
|
||||||
..
|
..
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
|
||||||
let client = &clients[0];
|
|
||||||
|
|
||||||
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
||||||
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
||||||
assert!(gen_keypairs.len() >= 2 * tx_count);
|
assert!(gen_keypairs.len() >= 2 * tx_count);
|
||||||
@ -114,21 +247,7 @@ where
|
|||||||
// collect the max transaction rate and total tx count seen
|
// collect the max transaction rate and total tx count seen
|
||||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||||
let sample_period = 1; // in seconds
|
let sample_period = 1; // in seconds
|
||||||
info!("Sampling TPS every {} second...", sample_period);
|
let sample_thread = create_sampler_thread(&client, &exit_signal, sample_period, &maxes);
|
||||||
let v_threads: Vec<_> = clients
|
|
||||||
.iter()
|
|
||||||
.map(|client| {
|
|
||||||
let exit_signal = exit_signal.clone();
|
|
||||||
let maxes = maxes.clone();
|
|
||||||
let client = client.clone();
|
|
||||||
Builder::new()
|
|
||||||
.name("solana-client-sample".to_string())
|
|
||||||
.spawn(move || {
|
|
||||||
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
|
||||||
})
|
|
||||||
.unwrap()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||||
|
|
||||||
@ -149,78 +268,39 @@ where
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
let s_threads: Vec<_> = (0..threads)
|
let s_threads = create_sender_threads(
|
||||||
.map(|_| {
|
|
||||||
let exit_signal = exit_signal.clone();
|
|
||||||
let shared_txs = shared_txs.clone();
|
|
||||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
|
||||||
let total_tx_sent_count = total_tx_sent_count.clone();
|
|
||||||
let client = client.clone();
|
|
||||||
Builder::new()
|
|
||||||
.name("solana-client-sender".to_string())
|
|
||||||
.spawn(move || {
|
|
||||||
do_tx_transfers(
|
|
||||||
&exit_signal,
|
|
||||||
&shared_txs,
|
|
||||||
&shared_tx_active_thread_count,
|
|
||||||
&total_tx_sent_count,
|
|
||||||
thread_batch_sleep_ms,
|
|
||||||
&client,
|
&client,
|
||||||
);
|
|
||||||
})
|
|
||||||
.unwrap()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// generate and send transactions for the specified duration
|
|
||||||
let start = Instant::now();
|
|
||||||
let keypair_chunks = source_keypair_chunks.len() as u64;
|
|
||||||
let mut reclaim_lamports_back_to_source_account = false;
|
|
||||||
let mut i = keypair0_balance;
|
|
||||||
while start.elapsed() < duration {
|
|
||||||
let chunk_index = (i % keypair_chunks) as usize;
|
|
||||||
generate_txs(
|
|
||||||
&shared_txs,
|
&shared_txs,
|
||||||
&recent_blockhash,
|
thread_batch_sleep_ms,
|
||||||
&source_keypair_chunks[chunk_index],
|
&total_tx_sent_count,
|
||||||
&dest_keypair_chunks[chunk_index],
|
|
||||||
threads,
|
threads,
|
||||||
reclaim_lamports_back_to_source_account,
|
&exit_signal,
|
||||||
&libra_args,
|
&shared_tx_active_thread_count,
|
||||||
);
|
);
|
||||||
|
|
||||||
// In sustained mode, overlap the transfers with generation. This has higher average
|
wait_for_target_slots_per_epoch(target_slots_per_epoch, &client);
|
||||||
// performance but lower peak performance in tested environments.
|
|
||||||
if sustained {
|
|
||||||
// Ensure that we don't generate more transactions than we can handle.
|
|
||||||
while shared_txs.read().unwrap().len() > 2 * threads {
|
|
||||||
sleep(Duration::from_millis(1));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
|
|
||||||
sleep(Duration::from_millis(1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rotate destination keypairs so that the next round of transactions will have different
|
let start = Instant::now();
|
||||||
// transaction signatures even when blockhash is reused.
|
|
||||||
dest_keypair_chunks[chunk_index].rotate_left(1);
|
|
||||||
|
|
||||||
i += 1;
|
generate_chunked_transfers(
|
||||||
if should_switch_directions(num_lamports_per_account, keypair_chunks, i) {
|
recent_blockhash,
|
||||||
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
&shared_txs,
|
||||||
}
|
shared_tx_active_thread_count,
|
||||||
}
|
source_keypair_chunks,
|
||||||
|
&mut dest_keypair_chunks,
|
||||||
|
threads,
|
||||||
|
duration,
|
||||||
|
sustained,
|
||||||
|
libra_args,
|
||||||
|
);
|
||||||
|
|
||||||
// Stop the sampling threads so it will collect the stats
|
// Stop the sampling threads so it will collect the stats
|
||||||
exit_signal.store(true, Ordering::Relaxed);
|
exit_signal.store(true, Ordering::Relaxed);
|
||||||
|
|
||||||
info!("Waiting for validator threads...");
|
info!("Waiting for sampler threads...");
|
||||||
for t in v_threads {
|
if let Err(err) = sample_thread.join() {
|
||||||
if let Err(err) = t.join() {
|
|
||||||
info!(" join() failed with: {:?}", err);
|
info!(" join() failed with: {:?}", err);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// join the tx send threads
|
// join the tx send threads
|
||||||
info!("Waiting for transmit threads...");
|
info!("Waiting for transmit threads...");
|
||||||
@ -251,7 +331,7 @@ where
|
|||||||
|
|
||||||
fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
||||||
info!("Token balance: {}", lamport_balance);
|
info!("Token balance: {}", lamport_balance);
|
||||||
datapoint_debug!(
|
datapoint_info!(
|
||||||
"bench-tps-lamport_balance",
|
"bench-tps-lamport_balance",
|
||||||
("balance", lamport_balance, i64)
|
("balance", lamport_balance, i64)
|
||||||
);
|
);
|
||||||
@ -382,7 +462,7 @@ fn generate_txs(
|
|||||||
duration_as_ms(&duration),
|
duration_as_ms(&duration),
|
||||||
blockhash,
|
blockhash,
|
||||||
);
|
);
|
||||||
datapoint_debug!(
|
datapoint_info!(
|
||||||
"bench-tps-generate_txs",
|
"bench-tps-generate_txs",
|
||||||
("duration", duration_as_us(&duration), i64)
|
("duration", duration_as_us(&duration), i64)
|
||||||
);
|
);
|
||||||
@ -488,7 +568,7 @@ fn do_tx_transfers<T: Client>(
|
|||||||
duration_as_ms(&transfer_start.elapsed()),
|
duration_as_ms(&transfer_start.elapsed()),
|
||||||
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
|
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
|
||||||
);
|
);
|
||||||
datapoint_debug!(
|
datapoint_info!(
|
||||||
"bench-tps-do_tx_transfers",
|
"bench-tps-do_tx_transfers",
|
||||||
("duration", duration_as_us(&transfer_start.elapsed()), i64),
|
("duration", duration_as_us(&transfer_start.elapsed()), i64),
|
||||||
("count", tx_len, i64)
|
("count", tx_len, i64)
|
||||||
@ -500,86 +580,80 @@ fn do_tx_transfers<T: Client>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64) -> bool {
|
fn verify_funding_transfer<T: Client>(client: &Arc<T>, tx: &Transaction, amount: u64) -> bool {
|
||||||
for a in &tx.message().account_keys[1..] {
|
for a in &tx.message().account_keys[1..] {
|
||||||
if client
|
match client.get_balance_with_commitment(a, CommitmentConfig::recent()) {
|
||||||
.get_balance_with_commitment(a, CommitmentConfig::recent())
|
Ok(balance) => return balance >= amount,
|
||||||
.unwrap_or(0)
|
Err(err) => error!("failed to get balance {:?}", err),
|
||||||
>= amount
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
trait FundingTransactions<'a> {
|
||||||
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
fn fund<T: 'static + Client + Send + Sync>(
|
||||||
/// or full
|
&mut self,
|
||||||
pub fn fund_keys<T: Client>(
|
client: &Arc<T>,
|
||||||
client: &T,
|
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
|
||||||
source: &Keypair,
|
to_lamports: u64,
|
||||||
dests: &[Keypair],
|
);
|
||||||
total: u64,
|
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]);
|
||||||
max_fee: u64,
|
fn sign(&mut self, blockhash: Hash);
|
||||||
mut extra: u64,
|
fn send<T: Client>(&self, client: &Arc<T>);
|
||||||
|
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||||
|
fn fund<T: 'static + Client + Send + Sync>(
|
||||||
|
&mut self,
|
||||||
|
client: &Arc<T>,
|
||||||
|
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
|
||||||
|
to_lamports: u64,
|
||||||
) {
|
) {
|
||||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
self.make(to_fund);
|
||||||
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
|
||||||
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"funding keys {} with lamports: {:?} total: {}",
|
|
||||||
dests.len(),
|
|
||||||
client.get_balance(&source.pubkey()),
|
|
||||||
total
|
|
||||||
);
|
|
||||||
while !notfunded.is_empty() {
|
|
||||||
let mut new_funded: Vec<(&Keypair, u64)> = vec![];
|
|
||||||
let mut to_fund = vec![];
|
|
||||||
info!("creating from... {}", funded.len());
|
|
||||||
let mut build_to_fund = Measure::start("build_to_fund");
|
|
||||||
for f in &mut funded {
|
|
||||||
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX);
|
|
||||||
if max_units == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let start = notfunded.len() - max_units as usize;
|
|
||||||
let fees = if extra > 0 { max_fee } else { 0 };
|
|
||||||
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
|
|
||||||
let moves: Vec<_> = notfunded[start..]
|
|
||||||
.iter()
|
|
||||||
.map(|k| (k.pubkey(), per_unit))
|
|
||||||
.collect();
|
|
||||||
notfunded[start..]
|
|
||||||
.iter()
|
|
||||||
.for_each(|k| new_funded.push((k, per_unit)));
|
|
||||||
notfunded.truncate(start);
|
|
||||||
if !moves.is_empty() {
|
|
||||||
to_fund.push((f.0, moves));
|
|
||||||
}
|
|
||||||
extra -= 1;
|
|
||||||
}
|
|
||||||
build_to_fund.stop();
|
|
||||||
debug!("build to_fund vec: {}us", build_to_fund.as_us());
|
|
||||||
|
|
||||||
// try to transfer a "few" at a time with recent blockhash
|
|
||||||
// assume 4MB network buffers, and 512 byte packets
|
|
||||||
const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512;
|
|
||||||
|
|
||||||
to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
|
|
||||||
let mut tries = 0;
|
let mut tries = 0;
|
||||||
|
while !self.is_empty() {
|
||||||
let mut make_txs = Measure::start("make_txs");
|
info!(
|
||||||
// this set of transactions just initializes us for bookkeeping
|
"{} {} each to {} accounts in {} txs",
|
||||||
#[allow(clippy::clone_double_ref)] // sigh
|
if tries == 0 {
|
||||||
let mut to_fund_txs: Vec<_> = chunk
|
"transferring"
|
||||||
.par_iter()
|
} else {
|
||||||
.map(|(k, m)| {
|
" retrying"
|
||||||
let tx = Transaction::new_unsigned_instructions(
|
},
|
||||||
system_instruction::transfer_many(&k.pubkey(), &m),
|
to_lamports,
|
||||||
|
self.len() * MAX_SPENDS_PER_TX as usize,
|
||||||
|
self.len(),
|
||||||
);
|
);
|
||||||
(k.clone(), tx)
|
|
||||||
|
let (blockhash, _fee_calculator) = get_recent_blockhash(client.as_ref());
|
||||||
|
|
||||||
|
// re-sign retained to_fund_txes with updated blockhash
|
||||||
|
self.sign(blockhash);
|
||||||
|
self.send(&client);
|
||||||
|
|
||||||
|
// Sleep a few slots to allow transactions to process
|
||||||
|
sleep(Duration::from_secs(1));
|
||||||
|
|
||||||
|
self.verify(&client, to_lamports);
|
||||||
|
|
||||||
|
// retry anything that seems to have dropped through cracks
|
||||||
|
// again since these txs are all or nothing, they're fine to
|
||||||
|
// retry
|
||||||
|
tries += 1;
|
||||||
|
}
|
||||||
|
info!("transferred");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]) {
|
||||||
|
let mut make_txs = Measure::start("make_txs");
|
||||||
|
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||||
|
.par_iter()
|
||||||
|
.map(|(k, t)| {
|
||||||
|
let tx = Transaction::new_unsigned_instructions(
|
||||||
|
&system_instruction::transfer_many(&k.pubkey(), &t),
|
||||||
|
);
|
||||||
|
(*k, tx)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
make_txs.stop();
|
make_txs.stop();
|
||||||
@ -588,89 +662,141 @@ pub fn fund_keys<T: Client>(
|
|||||||
to_fund_txs.len(),
|
to_fund_txs.len(),
|
||||||
make_txs.as_us()
|
make_txs.as_us()
|
||||||
);
|
);
|
||||||
|
self.extend(to_fund_txs);
|
||||||
|
}
|
||||||
|
|
||||||
let amount = chunk[0].1[0].1;
|
fn sign(&mut self, blockhash: Hash) {
|
||||||
|
|
||||||
while !to_fund_txs.is_empty() {
|
|
||||||
let receivers = to_fund_txs
|
|
||||||
.iter()
|
|
||||||
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"{} {} to {} in {} txs",
|
|
||||||
if tries == 0 {
|
|
||||||
"transferring"
|
|
||||||
} else {
|
|
||||||
" retrying"
|
|
||||||
},
|
|
||||||
amount,
|
|
||||||
receivers,
|
|
||||||
to_fund_txs.len(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
|
||||||
|
|
||||||
// re-sign retained to_fund_txes with updated blockhash
|
|
||||||
let mut sign_txs = Measure::start("sign_txs");
|
let mut sign_txs = Measure::start("sign_txs");
|
||||||
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
self.par_iter_mut().for_each(|(k, tx)| {
|
||||||
tx.sign(&[*k], blockhash);
|
tx.sign(&[*k], blockhash);
|
||||||
});
|
});
|
||||||
sign_txs.stop();
|
sign_txs.stop();
|
||||||
debug!("sign {} txs: {}us", to_fund_txs.len(), sign_txs.as_us());
|
debug!("sign {} txs: {}us", self.len(), sign_txs.as_us());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send<T: Client>(&self, client: &Arc<T>) {
|
||||||
let mut send_txs = Measure::start("send_txs");
|
let mut send_txs = Measure::start("send_txs");
|
||||||
to_fund_txs.iter().for_each(|(_, tx)| {
|
self.iter().for_each(|(_, tx)| {
|
||||||
client.async_send_transaction(tx.clone()).expect("transfer");
|
client.async_send_transaction(tx.clone()).expect("transfer");
|
||||||
});
|
});
|
||||||
send_txs.stop();
|
send_txs.stop();
|
||||||
debug!("send {} txs: {}us", to_fund_txs.len(), send_txs.as_us());
|
debug!("send {} txs: {}us", self.len(), send_txs.as_us());
|
||||||
|
}
|
||||||
|
|
||||||
let mut verify_txs = Measure::start("verify_txs");
|
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64) {
|
||||||
let mut starting_txs = to_fund_txs.len();
|
let starting_txs = self.len();
|
||||||
let mut verified_txs = 0;
|
let verified_txs = Arc::new(AtomicUsize::new(0));
|
||||||
let mut failed_verify = 0;
|
let too_many_failures = Arc::new(AtomicBool::new(false));
|
||||||
|
let loops = if starting_txs < 1000 { 3 } else { 1 };
|
||||||
// Only loop multiple times for small (quick) transaction batches
|
// Only loop multiple times for small (quick) transaction batches
|
||||||
for _ in 0..(if starting_txs < 1000 { 3 } else { 1 }) {
|
let time = Arc::new(Mutex::new(Instant::now()));
|
||||||
let mut timer = Instant::now();
|
for _ in 0..loops {
|
||||||
to_fund_txs.retain(|(_, tx)| {
|
let time = time.clone();
|
||||||
if timer.elapsed() >= Duration::from_secs(5) {
|
let failed_verify = Arc::new(AtomicUsize::new(0));
|
||||||
if failed_verify > 0 {
|
let client = client.clone();
|
||||||
debug!("total txs failed verify: {}", failed_verify);
|
let verified_txs = &verified_txs;
|
||||||
|
let failed_verify = &failed_verify;
|
||||||
|
let too_many_failures = &too_many_failures;
|
||||||
|
let verified_set: HashSet<Pubkey> = self
|
||||||
|
.par_iter()
|
||||||
|
.filter_map(move |(k, tx)| {
|
||||||
|
if too_many_failures.load(Ordering::Relaxed) {
|
||||||
|
return None;
|
||||||
}
|
}
|
||||||
info!(
|
|
||||||
"Verifying transfers... {} remaining",
|
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
|
||||||
starting_txs - verified_txs
|
verified_txs.fetch_add(1, Ordering::Relaxed);
|
||||||
);
|
Some(k.pubkey())
|
||||||
timer = Instant::now();
|
|
||||||
}
|
|
||||||
let verified = verify_funding_transfer(client, &tx, amount);
|
|
||||||
if verified {
|
|
||||||
verified_txs += 1;
|
|
||||||
} else {
|
} else {
|
||||||
failed_verify += 1;
|
failed_verify.fetch_add(1, Ordering::Relaxed);
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let verified_txs = verified_txs.load(Ordering::Relaxed);
|
||||||
|
let failed_verify = failed_verify.load(Ordering::Relaxed);
|
||||||
|
let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify);
|
||||||
|
if failed_verify > 100 && failed_verify > verified_txs {
|
||||||
|
too_many_failures.store(true, Ordering::Relaxed);
|
||||||
|
warn!(
|
||||||
|
"Too many failed transfers... {} remaining, {} verified, {} failures",
|
||||||
|
remaining_count, verified_txs, failed_verify
|
||||||
|
);
|
||||||
}
|
}
|
||||||
!verified
|
if remaining_count > 0 {
|
||||||
});
|
let mut time_l = time.lock().unwrap();
|
||||||
if to_fund_txs.is_empty() {
|
if time_l.elapsed().as_secs() > 2 {
|
||||||
|
info!(
|
||||||
|
"Verifying transfers... {} remaining, {} verified, {} failures",
|
||||||
|
remaining_count, verified_txs, failed_verify
|
||||||
|
);
|
||||||
|
*time_l = Instant::now();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
verified
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
self.retain(|(k, _)| !verified_set.contains(&k.pubkey()));
|
||||||
|
if self.is_empty() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
debug!("Looping verifications");
|
info!("Looping verifications");
|
||||||
info!("Verifying transfers... {} remaining", to_fund_txs.len());
|
|
||||||
|
let verified_txs = verified_txs.load(Ordering::Relaxed);
|
||||||
|
let failed_verify = failed_verify.load(Ordering::Relaxed);
|
||||||
|
let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify);
|
||||||
|
info!(
|
||||||
|
"Verifying transfers... {} remaining, {} verified, {} failures",
|
||||||
|
remaining_count, verified_txs, failed_verify
|
||||||
|
);
|
||||||
sleep(Duration::from_millis(100));
|
sleep(Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
starting_txs -= to_fund_txs.len();
|
|
||||||
verify_txs.stop();
|
|
||||||
debug!("verified {} txs: {}us", starting_txs, verify_txs.as_us());
|
|
||||||
|
|
||||||
// retry anything that seems to have dropped through cracks
|
|
||||||
// again since these txs are all or nothing, they're fine to
|
|
||||||
// retry
|
|
||||||
tries += 1;
|
|
||||||
}
|
}
|
||||||
info!("transferred");
|
}
|
||||||
|
|
||||||
|
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
||||||
|
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
||||||
|
/// or full
|
||||||
|
pub fn fund_keys<T: 'static + Client + Send + Sync>(
|
||||||
|
client: Arc<T>,
|
||||||
|
source: &Keypair,
|
||||||
|
dests: &[Keypair],
|
||||||
|
total: u64,
|
||||||
|
max_fee: u64,
|
||||||
|
lamports_per_account: u64,
|
||||||
|
) {
|
||||||
|
let mut funded: Vec<&Keypair> = vec![source];
|
||||||
|
let mut funded_funds = total;
|
||||||
|
let mut not_funded: Vec<&Keypair> = dests.iter().collect();
|
||||||
|
while !not_funded.is_empty() {
|
||||||
|
// Build to fund list and prepare funding sources for next iteration
|
||||||
|
let mut new_funded: Vec<&Keypair> = vec![];
|
||||||
|
let mut to_fund: Vec<(&Keypair, Vec<(Pubkey, u64)>)> = vec![];
|
||||||
|
let to_lamports = (funded_funds - lamports_per_account - max_fee) / MAX_SPENDS_PER_TX;
|
||||||
|
for f in funded {
|
||||||
|
let start = not_funded.len() - MAX_SPENDS_PER_TX as usize;
|
||||||
|
let dests: Vec<_> = not_funded.drain(start..).collect();
|
||||||
|
let spends: Vec<_> = dests.iter().map(|k| (k.pubkey(), to_lamports)).collect();
|
||||||
|
to_fund.push((f, spends));
|
||||||
|
new_funded.extend(dests.into_iter());
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to transfer a "few" at a time with recent blockhash
|
||||||
|
// assume 4MB network buffers, and 512 byte packets
|
||||||
|
const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512;
|
||||||
|
|
||||||
|
to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
|
||||||
|
Vec::<(&Keypair, Transaction)>::with_capacity(chunk.len()).fund(
|
||||||
|
&client,
|
||||||
|
chunk,
|
||||||
|
to_lamports,
|
||||||
|
);
|
||||||
});
|
});
|
||||||
info!("funded: {} left: {}", new_funded.len(), notfunded.len());
|
|
||||||
|
info!("funded: {} left: {}", new_funded.len(), not_funded.len());
|
||||||
funded = new_funded;
|
funded = new_funded;
|
||||||
|
funded_funds = to_lamports;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -678,14 +804,14 @@ pub fn airdrop_lamports<T: Client>(
|
|||||||
client: &T,
|
client: &T,
|
||||||
faucet_addr: &SocketAddr,
|
faucet_addr: &SocketAddr,
|
||||||
id: &Keypair,
|
id: &Keypair,
|
||||||
tx_count: u64,
|
desired_balance: u64,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||||
metrics_submit_lamport_balance(starting_balance);
|
metrics_submit_lamport_balance(starting_balance);
|
||||||
info!("starting balance {}", starting_balance);
|
info!("starting balance {}", starting_balance);
|
||||||
|
|
||||||
if starting_balance < tx_count {
|
if starting_balance < desired_balance {
|
||||||
let airdrop_amount = tx_count - starting_balance;
|
let airdrop_amount = desired_balance - starting_balance;
|
||||||
info!(
|
info!(
|
||||||
"Airdropping {:?} lamports from {} for {}",
|
"Airdropping {:?} lamports from {} for {}",
|
||||||
airdrop_amount,
|
airdrop_amount,
|
||||||
@ -810,17 +936,6 @@ fn compute_and_report_stats(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// First transfer 2/3 of the lamports to the dest accounts
|
|
||||||
// then ping-pong 1/3 of the lamports back to the other account
|
|
||||||
// this leaves 1/3 lamport buffer in each account
|
|
||||||
fn should_switch_directions(num_lamports_per_account: u64, keypair_chunks: u64, i: u64) -> bool {
|
|
||||||
if i < keypair_chunks * (2 * num_lamports_per_account) / 3 {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
i % (keypair_chunks * num_lamports_per_account / 3) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
|
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
||||||
@ -908,7 +1023,7 @@ fn fund_move_keys<T: Client>(
|
|||||||
.collect();
|
.collect();
|
||||||
let tx = Transaction::new_signed_instructions(
|
let tx = Transaction::new_signed_instructions(
|
||||||
&[funding_key],
|
&[funding_key],
|
||||||
system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts),
|
&system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts),
|
||||||
blockhash,
|
blockhash,
|
||||||
);
|
);
|
||||||
client.send_message(&[funding_key], tx.message).unwrap();
|
client.send_message(&[funding_key], tx.message).unwrap();
|
||||||
@ -1004,23 +1119,25 @@ fn fund_move_keys<T: Client>(
|
|||||||
info!("done funding keys, took {} ms", funding_time.as_ms());
|
info!("done funding keys, took {} ms", funding_time.as_ms());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_and_fund_keypairs<T: Client>(
|
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||||
client: &T,
|
client: Arc<T>,
|
||||||
faucet_addr: Option<SocketAddr>,
|
faucet_addr: Option<SocketAddr>,
|
||||||
funding_key: &Keypair,
|
funding_key: &Keypair,
|
||||||
keypair_count: usize,
|
keypair_count: usize,
|
||||||
lamports_per_account: u64,
|
lamports_per_account: u64,
|
||||||
use_move: bool,
|
use_move: bool,
|
||||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
|
) -> Result<(Vec<Keypair>, Option<LibraKeys>)> {
|
||||||
info!("Creating {} keypairs...", keypair_count);
|
info!("Creating {} keypairs...", keypair_count);
|
||||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||||
info!("Get lamports...");
|
info!("Get lamports...");
|
||||||
|
|
||||||
// Sample the first keypair, see if it has lamports, if so then resume.
|
// Sample the first keypair, to prevent lamport loss on repeated solana-bench-tps executions
|
||||||
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
|
let first_key = keypairs[0].pubkey();
|
||||||
let last_keypair_balance = client
|
let first_keypair_balance = client.get_balance(&first_key).unwrap_or(0);
|
||||||
.get_balance(&keypairs[keypair_count - 1].pubkey())
|
|
||||||
.unwrap_or(0);
|
// Sample the last keypair, to check if funding was already completed
|
||||||
|
let last_key = keypairs[keypair_count - 1].pubkey();
|
||||||
|
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
|
||||||
|
|
||||||
#[cfg(feature = "move")]
|
#[cfg(feature = "move")]
|
||||||
let mut move_keypairs_ret = None;
|
let mut move_keypairs_ret = None;
|
||||||
@ -1028,31 +1145,38 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
|||||||
#[cfg(not(feature = "move"))]
|
#[cfg(not(feature = "move"))]
|
||||||
let move_keypairs_ret = None;
|
let move_keypairs_ret = None;
|
||||||
|
|
||||||
if lamports_per_account > last_keypair_balance {
|
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
|
||||||
let (_blockhash, fee_calculator) = get_recent_blockhash(client);
|
// start another bench-tps run without re-funding all of the keypairs, check if the
|
||||||
let account_desired_balance =
|
// keypairs still have at least 80% of the expected funds. That should be enough to
|
||||||
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
// pay for the transaction fees in a new run.
|
||||||
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
|
let enough_lamports = 8 * lamports_per_account / 10;
|
||||||
let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
|
if first_keypair_balance < enough_lamports || last_keypair_balance < enough_lamports {
|
||||||
|
let fee_rate_governor = client.get_fee_rate_governor().unwrap();
|
||||||
|
let max_fee = fee_rate_governor.max_lamports_per_signature;
|
||||||
|
let extra_fees = extra * max_fee;
|
||||||
|
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||||
|
let mut total = lamports_per_account * total_keypairs + extra_fees;
|
||||||
if use_move {
|
if use_move {
|
||||||
total *= 3;
|
total *= 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}",
|
let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0);
|
||||||
last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra,
|
info!(
|
||||||
account_desired_balance, total
|
"Funding keypair balance: {} max_fee: {} lamports_per_account: {} extra: {} total: {}",
|
||||||
|
funding_key_balance, max_fee, lamports_per_account, extra, total
|
||||||
);
|
);
|
||||||
|
|
||||||
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
||||||
airdrop_lamports(client, &faucet_addr.unwrap(), funding_key, total)?;
|
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "move")]
|
#[cfg(feature = "move")]
|
||||||
{
|
{
|
||||||
if use_move {
|
if use_move {
|
||||||
let libra_genesis_keypair = create_genesis(&funding_key, client, 10_000_000);
|
let libra_genesis_keypair =
|
||||||
let libra_mint_program_id = upload_mint_script(&funding_key, client);
|
create_genesis(&funding_key, client.as_ref(), 10_000_000);
|
||||||
let libra_pay_program_id = upload_payment_script(&funding_key, client);
|
let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref());
|
||||||
|
let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref());
|
||||||
|
|
||||||
// Generate another set of keypairs for move accounts.
|
// Generate another set of keypairs for move accounts.
|
||||||
// Still fund the solana ones which will be used for fees.
|
// Still fund the solana ones which will be used for fees.
|
||||||
@ -1060,7 +1184,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
|||||||
let mut rnd = GenKeys::new(seed);
|
let mut rnd = GenKeys::new(seed);
|
||||||
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
||||||
fund_move_keys(
|
fund_move_keys(
|
||||||
client,
|
client.as_ref(),
|
||||||
funding_key,
|
funding_key,
|
||||||
&move_keypairs,
|
&move_keypairs,
|
||||||
total / 3,
|
total / 3,
|
||||||
@ -1085,15 +1209,15 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
|||||||
funding_key,
|
funding_key,
|
||||||
&keypairs,
|
&keypairs,
|
||||||
total,
|
total,
|
||||||
fee_calculator.max_lamports_per_signature,
|
max_fee,
|
||||||
extra,
|
lamports_per_account,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||||
keypairs.truncate(keypair_count);
|
keypairs.truncate(keypair_count);
|
||||||
|
|
||||||
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
|
Ok((keypairs, move_keypairs_ret))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -1102,33 +1226,14 @@ mod tests {
|
|||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_runtime::bank_client::BankClient;
|
use solana_runtime::bank_client::BankClient;
|
||||||
use solana_sdk::client::SyncClient;
|
use solana_sdk::client::SyncClient;
|
||||||
use solana_sdk::fee_calculator::FeeCalculator;
|
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||||
use solana_sdk::genesis_config::create_genesis_config;
|
use solana_sdk::genesis_config::create_genesis_config;
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_switch_directions() {
|
|
||||||
assert_eq!(should_switch_directions(30, 1, 0), false);
|
|
||||||
assert_eq!(should_switch_directions(30, 1, 1), false);
|
|
||||||
assert_eq!(should_switch_directions(30, 1, 20), true);
|
|
||||||
assert_eq!(should_switch_directions(30, 1, 21), false);
|
|
||||||
assert_eq!(should_switch_directions(30, 1, 30), true);
|
|
||||||
assert_eq!(should_switch_directions(30, 1, 90), true);
|
|
||||||
assert_eq!(should_switch_directions(30, 1, 91), false);
|
|
||||||
|
|
||||||
assert_eq!(should_switch_directions(30, 2, 0), false);
|
|
||||||
assert_eq!(should_switch_directions(30, 2, 1), false);
|
|
||||||
assert_eq!(should_switch_directions(30, 2, 20), false);
|
|
||||||
assert_eq!(should_switch_directions(30, 2, 40), true);
|
|
||||||
assert_eq!(should_switch_directions(30, 2, 90), false);
|
|
||||||
assert_eq!(should_switch_directions(30, 2, 100), true);
|
|
||||||
assert_eq!(should_switch_directions(30, 2, 101), false);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bench_tps_bank_client() {
|
fn test_bench_tps_bank_client() {
|
||||||
let (genesis_config, id) = create_genesis_config(10_000);
|
let (genesis_config, id) = create_genesis_config(10_000);
|
||||||
let bank = Bank::new(&genesis_config);
|
let bank = Bank::new(&genesis_config);
|
||||||
let clients = vec![BankClient::new(bank)];
|
let client = Arc::new(BankClient::new(bank));
|
||||||
|
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.id = id;
|
config.id = id;
|
||||||
@ -1136,23 +1241,24 @@ mod tests {
|
|||||||
config.duration = Duration::from_secs(5);
|
config.duration = Duration::from_secs(5);
|
||||||
|
|
||||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
let (keypairs, _move_keypairs) =
|
||||||
generate_and_fund_keypairs(&clients[0], None, &config.id, keypair_count, 20, false)
|
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
do_bench_tps(clients, config, keypairs, 0, None);
|
do_bench_tps(client, config, keypairs, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bench_tps_fund_keys() {
|
fn test_bench_tps_fund_keys() {
|
||||||
let (genesis_config, id) = create_genesis_config(10_000);
|
let (genesis_config, id) = create_genesis_config(10_000);
|
||||||
let bank = Bank::new(&genesis_config);
|
let bank = Bank::new(&genesis_config);
|
||||||
let client = BankClient::new(bank);
|
let client = Arc::new(BankClient::new(bank));
|
||||||
let keypair_count = 20;
|
let keypair_count = 20;
|
||||||
let lamports = 20;
|
let lamports = 20;
|
||||||
|
|
||||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
let (keypairs, _move_keypairs) =
|
||||||
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
|
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
for kp in &keypairs {
|
for kp in &keypairs {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -1167,26 +1273,19 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bench_tps_fund_keys_with_fees() {
|
fn test_bench_tps_fund_keys_with_fees() {
|
||||||
let (mut genesis_config, id) = create_genesis_config(10_000);
|
let (mut genesis_config, id) = create_genesis_config(10_000);
|
||||||
let fee_calculator = FeeCalculator::new(11, 0);
|
let fee_rate_governor = FeeRateGovernor::new(11, 0);
|
||||||
genesis_config.fee_calculator = fee_calculator;
|
genesis_config.fee_rate_governor = fee_rate_governor;
|
||||||
let bank = Bank::new(&genesis_config);
|
let bank = Bank::new(&genesis_config);
|
||||||
let client = BankClient::new(bank);
|
let client = Arc::new(BankClient::new(bank));
|
||||||
let keypair_count = 20;
|
let keypair_count = 20;
|
||||||
let lamports = 20;
|
let lamports = 20;
|
||||||
|
|
||||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
let (keypairs, _move_keypairs) =
|
||||||
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
|
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let max_fee = client
|
|
||||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
|
||||||
.unwrap()
|
|
||||||
.1
|
|
||||||
.max_lamports_per_signature;
|
|
||||||
for kp in &keypairs {
|
for kp in &keypairs {
|
||||||
assert_eq!(
|
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||||
client.get_balance(&kp.pubkey()).unwrap(),
|
|
||||||
lamports + max_fee
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
||||||
use solana_faucet::faucet::FAUCET_PORT;
|
use solana_faucet::faucet::FAUCET_PORT;
|
||||||
use solana_sdk::fee_calculator::FeeCalculator;
|
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||||
use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil};
|
use solana_sdk::signature::{read_keypair_file, Keypair};
|
||||||
use std::{net::SocketAddr, process::exit, time::Duration};
|
use std::{net::SocketAddr, process::exit, time::Duration};
|
||||||
|
|
||||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::SOL_LAMPORTS;
|
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||||
|
|
||||||
/// Holds the configuration for a single run of the benchmark
|
/// Holds the configuration for a single run of the benchmark
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
@ -25,6 +25,7 @@ pub struct Config {
|
|||||||
pub multi_client: bool,
|
pub multi_client: bool,
|
||||||
pub use_move: bool,
|
pub use_move: bool,
|
||||||
pub num_lamports_per_account: u64,
|
pub num_lamports_per_account: u64,
|
||||||
|
pub target_slots_per_epoch: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -43,10 +44,11 @@ impl Default for Config {
|
|||||||
client_ids_and_stake_file: String::new(),
|
client_ids_and_stake_file: String::new(),
|
||||||
write_to_client_file: false,
|
write_to_client_file: false,
|
||||||
read_from_client_file: false,
|
read_from_client_file: false,
|
||||||
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature,
|
||||||
multi_client: true,
|
multi_client: true,
|
||||||
use_move: false,
|
use_move: false,
|
||||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||||
|
target_slots_per_epoch: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -172,6 +174,15 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
|||||||
"Number of lamports per account.",
|
"Number of lamports per account.",
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("target_slots_per_epoch")
|
||||||
|
.long("target-slots-per-epoch")
|
||||||
|
.value_name("SLOTS")
|
||||||
|
.takes_value(true)
|
||||||
|
.help(
|
||||||
|
"Wait until epochs are this many slots long.",
|
||||||
|
),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parses a clap `ArgMatches` structure into a `Config`
|
/// Parses a clap `ArgMatches` structure into a `Config`
|
||||||
@ -259,5 +270,12 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
|||||||
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");
|
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(t) = matches.value_of("target_slots_per_epoch") {
|
||||||
|
args.target_slots_per_epoch = t
|
||||||
|
.to_string()
|
||||||
|
.parse()
|
||||||
|
.expect("can't parse target slots per epoch");
|
||||||
|
}
|
||||||
|
|
||||||
args
|
args
|
||||||
}
|
}
|
||||||
|
@ -3,19 +3,19 @@ use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate
|
|||||||
use solana_bench_tps::cli;
|
use solana_bench_tps::cli;
|
||||||
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||||
use solana_genesis::Base64Account;
|
use solana_genesis::Base64Account;
|
||||||
use solana_sdk::fee_calculator::FeeCalculator;
|
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, Signer};
|
||||||
use solana_sdk::system_program;
|
use solana_sdk::system_program;
|
||||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit};
|
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
||||||
|
|
||||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup_with_filter("solana=info");
|
solana_logger::setup_with_default("solana=info");
|
||||||
solana_metrics::set_panic_hook("bench-tps");
|
solana_metrics::set_panic_hook("bench-tps");
|
||||||
|
|
||||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||||
let cli_config = cli::extract_args(&matches);
|
let cli_config = cli::extract_args(&matches);
|
||||||
|
|
||||||
let cli::Config {
|
let cli::Config {
|
||||||
@ -41,7 +41,7 @@ fn main() {
|
|||||||
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
||||||
let num_accounts = keypairs.len() as u64;
|
let num_accounts = keypairs.len() as u64;
|
||||||
let max_fee =
|
let max_fee =
|
||||||
FeeCalculator::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||||
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
||||||
/ num_accounts
|
/ num_accounts
|
||||||
+ num_lamports_per_account;
|
+ num_lamports_per_account;
|
||||||
@ -67,8 +67,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!("Connecting to the cluster");
|
info!("Connecting to the cluster");
|
||||||
let (nodes, _archivers) =
|
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||||
discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
|
||||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
@ -82,12 +81,12 @@ fn main() {
|
|||||||
);
|
);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
client
|
Arc::new(client)
|
||||||
} else {
|
} else {
|
||||||
get_client(&nodes)
|
Arc::new(get_client(&nodes))
|
||||||
};
|
};
|
||||||
|
|
||||||
let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move {
|
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
|
||||||
let path = Path::new(&client_ids_and_stake_file);
|
let path = Path::new(&client_ids_and_stake_file);
|
||||||
let file = File::open(path).unwrap();
|
let file = File::open(path).unwrap();
|
||||||
|
|
||||||
@ -117,10 +116,10 @@ fn main() {
|
|||||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||||
// across multiple runs.
|
// across multiple runs.
|
||||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
||||||
(keypairs, None, last_balance)
|
(keypairs, None)
|
||||||
} else {
|
} else {
|
||||||
generate_and_fund_keypairs(
|
generate_and_fund_keypairs(
|
||||||
&client,
|
client.clone(),
|
||||||
Some(*faucet_addr),
|
Some(*faucet_addr),
|
||||||
&id,
|
&id,
|
||||||
keypair_count,
|
keypair_count,
|
||||||
@ -133,11 +132,5 @@ fn main() {
|
|||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
do_bench_tps(
|
do_bench_tps(client, cli_config, keypairs, move_keypairs);
|
||||||
vec![client],
|
|
||||||
cli_config,
|
|
||||||
keypairs,
|
|
||||||
keypair_balance,
|
|
||||||
move_keypairs,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,8 @@ use solana_faucet::faucet::run_local_faucet;
|
|||||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||||
#[cfg(feature = "move")]
|
#[cfg(feature = "move")]
|
||||||
use solana_sdk::move_loader::solana_move_loader_program;
|
use solana_sdk::move_loader::solana_move_loader_program;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, Signer};
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::{mpsc::channel, Arc};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
fn test_bench_tps_local_cluster(config: Config) {
|
fn test_bench_tps_local_cluster(config: Config) {
|
||||||
@ -36,10 +36,10 @@ fn test_bench_tps_local_cluster(config: Config) {
|
|||||||
100_000_000,
|
100_000_000,
|
||||||
);
|
);
|
||||||
|
|
||||||
let client = create_client(
|
let client = Arc::new(create_client(
|
||||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
||||||
VALIDATOR_PORT_RANGE,
|
VALIDATOR_PORT_RANGE,
|
||||||
);
|
));
|
||||||
|
|
||||||
let (addr_sender, addr_receiver) = channel();
|
let (addr_sender, addr_receiver) = channel();
|
||||||
run_local_faucet(faucet_keypair, addr_sender, None);
|
run_local_faucet(faucet_keypair, addr_sender, None);
|
||||||
@ -48,8 +48,8 @@ fn test_bench_tps_local_cluster(config: Config) {
|
|||||||
let lamports_per_account = 100;
|
let lamports_per_account = 100;
|
||||||
|
|
||||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||||
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
|
||||||
&client,
|
client.clone(),
|
||||||
Some(faucet_addr),
|
Some(faucet_addr),
|
||||||
&config.id,
|
&config.id,
|
||||||
keypair_count,
|
keypair_count,
|
||||||
@ -58,7 +58,7 @@ fn test_bench_tps_local_cluster(config: Config) {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let _total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
|
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
|
||||||
|
|
||||||
#[cfg(not(debug_assertions))]
|
#[cfg(not(debug_assertions))]
|
||||||
assert!(_total > 100);
|
assert!(_total > 100);
|
||||||
|
@ -1,26 +0,0 @@
|
|||||||
Building the Solana book
|
|
||||||
---
|
|
||||||
|
|
||||||
Install the book's dependnecies, build, and test the book:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ ./build.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
Run any Rust tests in the markdown:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ make test
|
|
||||||
```
|
|
||||||
|
|
||||||
Render markdown as HTML:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ make build
|
|
||||||
```
|
|
||||||
|
|
||||||
Render and view the book:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ make open
|
|
||||||
```
|
|
@ -1,18 +0,0 @@
|
|||||||
+------------+
|
|
||||||
| Bank-Merkle|
|
|
||||||
+------------+
|
|
||||||
^ ^
|
|
||||||
/ \
|
|
||||||
+-----------------+ +-------------+
|
|
||||||
| Bank-Diff-Merkle| | Block-Merkle|
|
|
||||||
+-----------------+ +-------------+
|
|
||||||
^ ^
|
|
||||||
/ \
|
|
||||||
+------+ +--------------------------+
|
|
||||||
| Hash | | Previous Bank-Diff-Merkle|
|
|
||||||
+------+ +--------------------------+
|
|
||||||
^ ^
|
|
||||||
/ \
|
|
||||||
+---------------+ +---------------+
|
|
||||||
| Hash(Account1)| | Hash(Account2)|
|
|
||||||
+---------------+ +---------------+
|
|
@ -1,22 +0,0 @@
|
|||||||
.--------.
|
|
||||||
| Leader |
|
|
||||||
`--------`
|
|
||||||
^
|
|
||||||
|
|
|
||||||
.------------------------------------|--------------------.
|
|
||||||
| TVU | |
|
|
||||||
| | |
|
|
||||||
| .-------. .------------. .----+---. .---------. |
|
|
||||||
.------------. | | Shred | | Retransmit | | Replay | | Storage | |
|
|
||||||
| Upstream +----->| Fetch +-->| Stage +-->| Stage +-->| Stage | |
|
|
||||||
| Validators | | | Stage | | | | | | | |
|
|
||||||
`------------` | `-------` `----+-------` `----+---` `---------` |
|
|
||||||
| ^ | | |
|
|
||||||
| | | | |
|
|
||||||
`--------|----------|----------------|--------------------`
|
|
||||||
| | |
|
|
||||||
| V v
|
|
||||||
.+-----------. .------.
|
|
||||||
| Gossip | | Bank |
|
|
||||||
| Service | `------`
|
|
||||||
`------------`
|
|
@ -1,6 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd "$(dirname "$0")"
|
|
||||||
|
|
||||||
make -j"$(nproc)" test
|
|
@ -1,89 +0,0 @@
|
|||||||
# Table of contents
|
|
||||||
|
|
||||||
* [Introduction](introduction.md)
|
|
||||||
* [Terminology](terminology.md)
|
|
||||||
* [Getting Started](getting-started/README.md)
|
|
||||||
* [Testnet Participation](getting-started/testnet-participation.md)
|
|
||||||
* [Example Client: Web Wallet](getting-started/webwallet.md)
|
|
||||||
* [Programming Model](programs/README.md)
|
|
||||||
* [Example: Tic-Tac-Toe](programs/tictactoe.md)
|
|
||||||
* [Drones](programs/drones.md)
|
|
||||||
* [A Solana Cluster](cluster/README.md)
|
|
||||||
* [Synchronization](cluster/synchronization.md)
|
|
||||||
* [Leader Rotation](cluster/leader-rotation.md)
|
|
||||||
* [Fork Generation](cluster/fork-generation.md)
|
|
||||||
* [Managing Forks](cluster/managing-forks.md)
|
|
||||||
* [Turbine Block Propagation](cluster/turbine-block-propagation.md)
|
|
||||||
* [Ledger Replication](cluster/ledger-replication.md)
|
|
||||||
* [Secure Vote Signing](cluster/vote-signing.md)
|
|
||||||
* [Stake Delegation and Rewards](cluster/stake-delegation-and-rewards.md)
|
|
||||||
* [Performance Metrics](cluster/performance-metrics.md)
|
|
||||||
* [Anatomy of a Validator](validator/README.md)
|
|
||||||
* [TPU](validator/tpu.md)
|
|
||||||
* [TVU](validator/tvu/README.md)
|
|
||||||
* [Blocktree](validator/tvu/blocktree.md)
|
|
||||||
* [Gossip Service](validator/gossip.md)
|
|
||||||
* [The Runtime](validator/runtime.md)
|
|
||||||
* [Anatomy of a Transaction](transaction.md)
|
|
||||||
* [Running a Validator](running-validator/README.md)
|
|
||||||
* [Validator Requirements](running-validator/validator-reqs.md)
|
|
||||||
* [Choosing a Testnet](running-validator/validator-testnet.md)
|
|
||||||
* [Installing the Validator Software](running-validator/validator-software.md)
|
|
||||||
* [Starting a Validator](running-validator/validator-start.md)
|
|
||||||
* [Staking](running-validator/validator-stake.md)
|
|
||||||
* [Monitoring a Validator](running-validator/validator-monitor.md)
|
|
||||||
* [Publishing Validator Info](running-validator/validator-info.md)
|
|
||||||
* [Troubleshooting](running-validator/validator-troubleshoot.md)
|
|
||||||
* [Running an Archiver](running-archiver.md)
|
|
||||||
* [Paper Wallet](paper-wallet/README.md)
|
|
||||||
* [Installation](paper-wallet/installation.md)
|
|
||||||
* [Paper Wallet Usage](paper-wallet/usage.md)
|
|
||||||
* [Offline Signing](offline-signing/README.md)
|
|
||||||
* [API Reference](api-reference/README.md)
|
|
||||||
* [Transaction](api-reference/transaction-api.md)
|
|
||||||
* [Instruction](api-reference/instruction-api.md)
|
|
||||||
* [Blockstreamer](api-reference/blockstreamer.md)
|
|
||||||
* [JSON RPC API](api-reference/jsonrpc-api.md)
|
|
||||||
* [JavaScript API](api-reference/javascript-api.md)
|
|
||||||
* [solana CLI](api-reference/cli.md)
|
|
||||||
* [Accepted Design Proposals](proposals/README.md)
|
|
||||||
* [Ledger Replication](proposals/ledger-replication-to-implement.md)
|
|
||||||
* [Secure Vote Signing](proposals/vote-signing-to-implement.md)
|
|
||||||
* [Cluster Test Framework](proposals/cluster-test-framework.md)
|
|
||||||
* [Validator](proposals/validator-proposal.md)
|
|
||||||
* [Simple Payment and State Verification](proposals/simple-payment-and-state-verification.md)
|
|
||||||
* [Cross-Program Invocation](proposals/cross-program-invocation.md)
|
|
||||||
* [Inter-chain Transaction Verification](proposals/interchain-transaction-verification.md)
|
|
||||||
* [Snapshot Verification](proposals/snapshot-verification.md)
|
|
||||||
* [Bankless Leader](proposals/bankless-leader.md)
|
|
||||||
* [Slashing](proposals/slashing.md)
|
|
||||||
* [Implemented Design Proposals](implemented-proposals/README.md)
|
|
||||||
* [Blocktree](implemented-proposals/blocktree.md)
|
|
||||||
* [Cluster Software Installation and Updates](implemented-proposals/installer.md)
|
|
||||||
* [Cluster Economics](implemented-proposals/ed_overview/README.md)
|
|
||||||
* [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md)
|
|
||||||
* [State-validation Protocol-based Rewards](implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md)
|
|
||||||
* [State-validation Transaction Fees](implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md)
|
|
||||||
* [Replication-validation Transaction Fees](implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md)
|
|
||||||
* [Validation Stake Delegation](implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md)
|
|
||||||
* [Replication-client Economics](implemented-proposals/ed_overview/ed_replication_client_economics/README.md)
|
|
||||||
* [Storage-replication Rewards](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md)
|
|
||||||
* [Replication-client Reward Auto-delegation](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md)
|
|
||||||
* [Economic Sustainability](implemented-proposals/ed_overview/ed_economic_sustainability.md)
|
|
||||||
* [Attack Vectors](implemented-proposals/ed_overview/ed_attack_vectors.md)
|
|
||||||
* [Economic Design MVP](implemented-proposals/ed_overview/ed_mvp.md)
|
|
||||||
* [References](implemented-proposals/ed_overview/ed_references.md)
|
|
||||||
* [Deterministic Transaction Fees](implemented-proposals/transaction-fees.md)
|
|
||||||
* [Tower BFT](implemented-proposals/tower-bft.md)
|
|
||||||
* [Leader-to-Leader Transition](implemented-proposals/leader-leader-transition.md)
|
|
||||||
* [Leader-to-Validator Transition](implemented-proposals/leader-validator-transition.md)
|
|
||||||
* [Persistent Account Storage](implemented-proposals/persistent-account-storage.md)
|
|
||||||
* [Reliable Vote Transmission](implemented-proposals/reliable-vote-transmission.md)
|
|
||||||
* [Repair Service](implemented-proposals/repair-service.md)
|
|
||||||
* [Testing Programs](implemented-proposals/testing-programs.md)
|
|
||||||
* [Credit-only Accounts](implemented-proposals/readonly-accounts.md)
|
|
||||||
* [Embedding the Move Langauge](implemented-proposals/embedding-move.md)
|
|
||||||
* [Staking Rewards](implemented-proposals/staking-rewards.md)
|
|
||||||
* [Rent](implemented-proposals/rent.md)
|
|
||||||
* [Durable Transaction Nonces](implemented-proposals/durable-tx-nonces.md)
|
|
||||||
* [Validator Timestamp Oracle](implemented-proposals/validator-timestamp-oracle.md)
|
|
@ -1,4 +0,0 @@
|
|||||||
# API Reference
|
|
||||||
|
|
||||||
The following sections contain API references material you may find useful when developing applications utilizing a Solana cluster.
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
|||||||
# Blockstreamer
|
|
||||||
|
|
||||||
Solana supports a node type called an _blockstreamer_. This validator variation is intended for applications that need to observe the data plane without participating in transaction validation or ledger replication.
|
|
||||||
|
|
||||||
A blockstreamer runs without a vote signer, and can optionally stream ledger entries out to a Unix domain socket as they are processed. The JSON-RPC service still functions as on any other node.
|
|
||||||
|
|
||||||
To run a blockstreamer, include the argument `no-signer` and \(optional\) `blockstream` socket location:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ ./multinode-demo/validator-x.sh --no-signer --blockstream <SOCKET>
|
|
||||||
```
|
|
||||||
|
|
||||||
The stream will output a series of JSON objects:
|
|
||||||
|
|
||||||
* An Entry event JSON object is sent when each ledger entry is processed, with the following fields:
|
|
||||||
* `dt`, the system datetime, as RFC3339-formatted string
|
|
||||||
* `t`, the event type, always "entry"
|
|
||||||
* `s`, the slot height, as unsigned 64-bit integer
|
|
||||||
* `h`, the tick height, as unsigned 64-bit integer
|
|
||||||
* `entry`, the entry, as JSON object
|
|
||||||
* A Block event JSON object is sent when a block is complete, with the following fields:
|
|
||||||
* `dt`, the system datetime, as RFC3339-formatted string
|
|
||||||
* `t`, the event type, always "block"
|
|
||||||
* `s`, the slot height, as unsigned 64-bit integer
|
|
||||||
* `h`, the tick height, as unsigned 64-bit integer
|
|
||||||
* `l`, the slot leader id, as base-58 encoded string
|
|
||||||
* `hash`, the [blockhash](terminology.md#blockhash), as base-58 encoded string
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
@ -1,38 +0,0 @@
|
|||||||
# Instruction
|
|
||||||
|
|
||||||
For the purposes of building a [Transaction](../transaction.md), a more verbose instruction format is used:
|
|
||||||
|
|
||||||
* **Instruction:**
|
|
||||||
* **program\_id:** The pubkey of the on-chain program that executes the
|
|
||||||
|
|
||||||
instruction
|
|
||||||
|
|
||||||
* **accounts:** An ordered list of accounts that should be passed to
|
|
||||||
|
|
||||||
the program processing the instruction, including metadata detailing
|
|
||||||
|
|
||||||
if an account is a signer of the transaction and if it is a credit
|
|
||||||
|
|
||||||
only account.
|
|
||||||
|
|
||||||
* **data:** A byte array that is passed to the program executing the
|
|
||||||
|
|
||||||
instruction
|
|
||||||
|
|
||||||
A more compact form is actually included in a `Transaction`:
|
|
||||||
|
|
||||||
* **CompiledInstruction:**
|
|
||||||
* **program\_id\_index:** The index of the `program_id` in the
|
|
||||||
|
|
||||||
`account_keys` list
|
|
||||||
|
|
||||||
* **accounts:** An ordered list of indices into `account_keys`
|
|
||||||
|
|
||||||
specifying the accounds that should be passed to the program
|
|
||||||
|
|
||||||
processing the instruction.
|
|
||||||
|
|
||||||
* **data:** A byte array that is passed to the program executing the
|
|
||||||
|
|
||||||
instruction
|
|
||||||
|
|
@ -1,994 +0,0 @@
|
|||||||
# JSON RPC API
|
|
||||||
|
|
||||||
Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification.
|
|
||||||
|
|
||||||
To interact with a Solana node inside a JavaScript application, use the [solana-web3.js](https://github.com/solana-labs/solana-web3.js) library, which gives a convenient interface for the RPC methods.
|
|
||||||
|
|
||||||
## RPC HTTP Endpoint
|
|
||||||
|
|
||||||
**Default port:** 8899 eg. [http://localhost:8899](http://localhost:8899), [http://192.168.1.88:8899](http://192.168.1.88:8899)
|
|
||||||
|
|
||||||
## RPC PubSub WebSocket Endpoint
|
|
||||||
|
|
||||||
**Default port:** 8900 eg. ws://localhost:8900, [http://192.168.1.88:8900](http://192.168.1.88:8900)
|
|
||||||
|
|
||||||
## Methods
|
|
||||||
|
|
||||||
* [confirmTransaction](jsonrpc-api.md#confirmtransaction)
|
|
||||||
* [getAccountInfo](jsonrpc-api.md#getaccountinfo)
|
|
||||||
* [getBalance](jsonrpc-api.md#getbalance)
|
|
||||||
* [getBlockCommitment](jsonrpc-api.md#getblockcommitment)
|
|
||||||
* [getBlockTime](jsonrpc-api.md#getblocktime)
|
|
||||||
* [getClusterNodes](jsonrpc-api.md#getclusternodes)
|
|
||||||
* [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
|
|
||||||
* [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
|
|
||||||
* [getEpochInfo](jsonrpc-api.md#getepochinfo)
|
|
||||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
|
||||||
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
|
|
||||||
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
|
|
||||||
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
|
|
||||||
* [getNumBlocksSinceSignatureConfirmation](jsonrpc-api.md#getnumblockssincesignatureconfirmation)
|
|
||||||
* [getProgramAccounts](jsonrpc-api.md#getprogramaccounts)
|
|
||||||
* [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash)
|
|
||||||
* [getSignatureStatus](jsonrpc-api.md#getsignaturestatus)
|
|
||||||
* [getSlot](jsonrpc-api.md#getslot)
|
|
||||||
* [getSlotLeader](jsonrpc-api.md#getslotleader)
|
|
||||||
* [getSlotsPerSegment](jsonrpc-api.md#getslotspersegment)
|
|
||||||
* [getStorageTurn](jsonrpc-api.md#getstorageturn)
|
|
||||||
* [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate)
|
|
||||||
* [getTransactionCount](jsonrpc-api.md#gettransactioncount)
|
|
||||||
* [getTotalSupply](jsonrpc-api.md#gettotalsupply)
|
|
||||||
* [getVersion](jsonrpc-api.md#getversion)
|
|
||||||
* [getVoteAccounts](jsonrpc-api.md#getvoteaccounts)
|
|
||||||
* [requestAirdrop](jsonrpc-api.md#requestairdrop)
|
|
||||||
* [sendTransaction](jsonrpc-api.md#sendtransaction)
|
|
||||||
* [startSubscriptionChannel](jsonrpc-api.md#startsubscriptionchannel)
|
|
||||||
* [Subscription Websocket](jsonrpc-api.md#subscription-websocket)
|
|
||||||
* [accountSubscribe](jsonrpc-api.md#accountsubscribe)
|
|
||||||
* [accountUnsubscribe](jsonrpc-api.md#accountunsubscribe)
|
|
||||||
* [programSubscribe](jsonrpc-api.md#programsubscribe)
|
|
||||||
* [programUnsubscribe](jsonrpc-api.md#programunsubscribe)
|
|
||||||
* [signatureSubscribe](jsonrpc-api.md#signaturesubscribe)
|
|
||||||
* [signatureUnsubscribe](jsonrpc-api.md#signatureunsubscribe)
|
|
||||||
|
|
||||||
## Request Formatting
|
|
||||||
|
|
||||||
To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields:
|
|
||||||
|
|
||||||
* `jsonrpc`, set to `"2.0"`
|
|
||||||
* `id`, a unique client-generated identifying integer
|
|
||||||
* `method`, a string containing the method to be invoked
|
|
||||||
* `params`, a JSON array of ordered parameter values
|
|
||||||
|
|
||||||
Example using curl:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri"]}' 192.168.1.88:8899
|
|
||||||
```
|
|
||||||
|
|
||||||
The response output will be a JSON object with the following fields:
|
|
||||||
|
|
||||||
* `jsonrpc`, matching the request specification
|
|
||||||
* `id`, matching the request identifier
|
|
||||||
* `result`, requested data or success confirmation
|
|
||||||
|
|
||||||
Requests can be sent in batches by sending an array of JSON-RPC request objects as the data for a single POST.
|
|
||||||
|
|
||||||
## Definitions
|
|
||||||
|
|
||||||
* Hash: A SHA-256 hash of a chunk of data.
|
|
||||||
* Pubkey: The public key of a Ed25519 key-pair.
|
|
||||||
* Signature: An Ed25519 signature of a chunk of data.
|
|
||||||
* Transaction: A Solana instruction signed by a client key-pair.
|
|
||||||
|
|
||||||
## Configuring State Commitment
|
|
||||||
|
|
||||||
Solana nodes choose which bank state to query based on a commitment requirement
|
|
||||||
set by the client. Clients may specify either:
|
|
||||||
* `{"commitment":"max"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations
|
|
||||||
* `{"commitment":"recent"}` - the node will query its most recent bank state
|
|
||||||
|
|
||||||
The commitment parameter should be included as the last element in the `params` array:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri",{"commitment":"max"}]}' 192.168.1.88:8899
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Default:
|
|
||||||
If commitment configuration is not provided, the node will default to `"commitment":"max"`
|
|
||||||
|
|
||||||
Only methods that query bank state accept the commitment parameter. They are indicated in the API Reference below.
|
|
||||||
|
|
||||||
#### RpcResponse Structure
|
|
||||||
Many methods that take a commitment parameter return an RpcResponse JSON object comprised of two parts:
|
|
||||||
|
|
||||||
* `context` : An RpcResponseContext JSON structure including a `slot` field at which the operation was evaluated.
|
|
||||||
* `value` : The value returned by the operation itself.
|
|
||||||
|
|
||||||
|
|
||||||
## JSON RPC API Reference
|
|
||||||
|
|
||||||
### confirmTransaction
|
|
||||||
|
|
||||||
Returns a transaction receipt
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `RpcResponse<boolean>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"confirmTransaction", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":true},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getAccountInfo
|
|
||||||
|
|
||||||
Returns all information associated with the account of provided Pubkey
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result value will be an RpcResponse JSON object containing an AccountInfo JSON object.
|
|
||||||
|
|
||||||
* `RpcResponse<AccountInfo>`, RpcResponse JSON object with `value` field set to AccountInfo, a JSON object containing:
|
|
||||||
* `lamports`, number of lamports assigned to this account, as a u64
|
|
||||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
|
||||||
* `data`, array of bytes representing any data associated with the account
|
|
||||||
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]}},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getBalance
|
|
||||||
|
|
||||||
Returns the balance of the account of provided Pubkey
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `RpcResponse<u64>` - RpcResponse JSON object with `value` field set to quantity
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri"]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":0},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getBlockCommitment
|
|
||||||
|
|
||||||
Returns commitment for particular block
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `u64` - block, identified by Slot
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be an array with two fields:
|
|
||||||
|
|
||||||
* Commitment
|
|
||||||
* `null` - Unknown block
|
|
||||||
* `object` - BlockCommitment
|
|
||||||
* `array` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
|
|
||||||
* 'integer' - total active stake, in lamports, of the current epoch
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockCommitment","params":[5]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":[{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32]},42],"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getBlockTime
|
|
||||||
|
|
||||||
Returns the estimated production time of a block. Validators report their UTC
|
|
||||||
time to the ledger on a regular interval. A block's time is calculated as an
|
|
||||||
offset from the median value of the most recent validator time report.
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `u64` - block, identified by Slot
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `null` - block has not yet been produced
|
|
||||||
* `i64` - estimated production time, as Unix timestamp (seconds since the Unix epoch)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockTime","params":[5]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":1574721591,"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getClusterNodes
|
|
||||||
|
|
||||||
Returns information about all the nodes participating in the cluster
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
None
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be an array of JSON objects, each with the following sub fields:
|
|
||||||
|
|
||||||
* `pubkey` - Node public key, as base-58 encoded string
|
|
||||||
* `gossip` - Gossip network address for the node
|
|
||||||
* `tpu` - TPU network address for the node
|
|
||||||
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getConfirmedBlock
|
|
||||||
|
|
||||||
Returns identity and transaction information about a confirmed block in the ledger
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `integer` - slot, as u64 integer
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be an object with the following fields:
|
|
||||||
|
|
||||||
* `blockhash` - the blockhash of this block
|
|
||||||
* `previousBlockhash` - the blockhash of this block's parent
|
|
||||||
* `parentSlot` - the slot index of this block's parent
|
|
||||||
* `transactions` - an array of tuples containing:
|
|
||||||
* [Transaction](transaction-api.md) object, in JSON format
|
|
||||||
* Transaction status object, containing:
|
|
||||||
* `status` - Transaction status:
|
|
||||||
* `"Ok": null` - Transaction was successful
|
|
||||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
|
||||||
* `fee` - fee this transaction was charged, as u64 integer
|
|
||||||
* `preBalances` - array of u64 account balances from before the transaction was processed
|
|
||||||
* `postBalances` - array of u64 account balances after the transaction was processed
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430]}' localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"blockhash":[165,245,120,183,32,205,89,222,249,114,229,49,250,231,149,122,156,232,181,83,238,194,157,153,7,213,180,54,177,6,25,101],"parentSlot":429,"previousBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166],"transactions":[[{"message":{"accountKeys":[[5],[219,181,202,40,52,148,34,136,186,59,137,160,250,225,234,17,244,160,88,116,24,176,30,227,68,11,199,38,141,68,131,228],[233,48,179,56,91,40,254,206,53,48,196,176,119,248,158,109,121,77,11,69,108,160,128,27,228,122,146,249,53,184,68,87],[6,167,213,23,25,47,10,175,198,242,101,227,251,119,204,122,218,130,197,41,208,190,59,19,110,45,0,85,32,0,0,0],[6,167,213,23,24,199,116,201,40,86,99,152,105,29,94,182,139,94,184,163,155,75,109,92,115,85,91,33,0,0,0,0],[7,97,72,29,53,116,116,187,124,77,118,36,235,211,189,179,216,53,94,115,209,16,67,252,13,163,83,128,0,0,0,0]],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[[1],{"accounts":[[3],1,2,3],"data":[[52],2,0,0,0,1,0,0,0,0,0,0,0,173,1,0,0,0,0,0,0,86,55,9,248,142,238,135,114,103,83,247,124,67,68,163,233,55,41,59,129,64,50,110,221,234,234,27,213,205,193,219,50],"program_id_index":4}],"recentBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166]},"signatures":[[2],[119,9,95,108,35,95,7,1,69,101,65,45,5,204,61,114,172,88,123,238,32,201,135,229,57,50,13,21,106,216,129,183,238,43,37,101,148,81,56,232,88,136,80,65,46,189,39,106,94,13,238,54,186,48,118,186,0,62,121,122,172,171,66,5],[78,40,77,250,10,93,6,157,48,173,100,40,251,9,7,218,7,184,43,169,76,240,254,34,235,48,41,175,119,126,75,107,106,248,45,161,119,48,174,213,57,69,111,225,245,60,148,73,124,82,53,6,203,126,120,180,111,169,89,64,29,23,237,13]]},{"fee":100000,"status":{"Ok":null},"preBalances":[499998337500,15298080,1,1,1],"postBalances":[499998237500,15298080,1,1,1]}]]},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getConfirmedBlocks
|
|
||||||
|
|
||||||
Returns a list of confirmed blocks
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `integer` - start_slot, as u64 integer
|
|
||||||
* `integer` - (optional) end_slot, as u64 integer
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be an array of u64 integers listing confirmed blocks
|
|
||||||
between start_slot and either end_slot, if provided, or latest confirmed block,
|
|
||||||
inclusive.
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlocks","params":[5, 10]}' localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":[5,6,7,8,9,10],"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getEpochInfo
|
|
||||||
|
|
||||||
Returns information about the current epoch
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be an object with the following fields:
|
|
||||||
|
|
||||||
* `epoch`, the current epoch
|
|
||||||
* `slotIndex`, the current slot relative to the start of the current epoch
|
|
||||||
* `slotsInEpoch`, the number of slots in this epoch
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"epoch":3,"slotIndex":126,"slotsInEpoch":256},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getEpochSchedule
|
|
||||||
|
|
||||||
Returns epoch schedule information from this cluster's genesis config
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
None
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be an object with the following fields:
|
|
||||||
|
|
||||||
* `slots_per_epoch`, the maximum number of slots in each epoch
|
|
||||||
* `leader_schedule_slot_offset`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch
|
|
||||||
* `warmup`, whether epochs start short and grow
|
|
||||||
* `first_normal_epoch`, first normal-length epoch, log2(slots_per_epoch) - log2(MINIMUM_SLOTS_PER_EPOCH)
|
|
||||||
* `first_normal_slot`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(first_normal_epoch) - 1)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochSchedule"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"first_normal_epoch":8,"first_normal_slot":8160,"leader_schedule_slot_offset":8192,"slots_per_epoch":8192,"warmup":true},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getGenesisHash
|
|
||||||
|
|
||||||
Returns the genesis hash
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
None
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `string` - a Hash as base-58 encoded string
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getGenesisHash"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getLeaderSchedule
|
|
||||||
|
|
||||||
Returns the leader schedule for an epoch
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `slot` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be a dictionary of leader public keys \(as base-58 encoded
|
|
||||||
strings\) and their corresponding leader slot indices as values (indices are to
|
|
||||||
the first slot in the requested epoch)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63]},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getMinimumBalanceForRentExemption
|
|
||||||
|
|
||||||
Returns minimum balance required to make account rent exempt.
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `u64` - account data length
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `u64` - minimum lamports required in account
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getMinimumBalanceForRentExemption", "params":[50]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":500,"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getNumBlocksSinceSignatureConfirmation
|
|
||||||
|
|
||||||
Returns the current number of blocks since signature has been confirmed.
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `u64` - count
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getNumBlocksSinceSignatureConfirmation", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":8,"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getProgramAccounts
|
|
||||||
|
|
||||||
Returns all accounts owned by the provided program Pubkey
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - Pubkey of program, as base-58 encoded string
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be an array of arrays. Each sub array will contain:
|
|
||||||
|
|
||||||
* `string` - the account Pubkey as base-58 encoded string and a JSON object, with the following sub fields:
|
|
||||||
* `lamports`, number of lamports assigned to this account, as a u64
|
|
||||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
|
||||||
* `data`, array of bytes representing any data associated with the account
|
|
||||||
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getRecentBlockhash
|
|
||||||
|
|
||||||
Returns a recent block hash from the ledger, and a fee schedule that can be used to compute the cost of submitting a transaction using it.
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
An RpcResponse containing an array consisting of a string blockhash and FeeCalculator JSON object.
|
|
||||||
|
|
||||||
* `RpcResponse<array>` - RpcResponse JSON object with `value` field set to an array including:
|
|
||||||
* `string` - a Hash as base-58 encoded string
|
|
||||||
* `FeeCalculator object` - the fee schedule for this block hash
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC",{"lamportsPerSignature": 0}]},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getSignatureStatus
|
|
||||||
|
|
||||||
Returns the status of a given signature. This method is similar to [confirmTransaction](jsonrpc-api.md#confirmtransaction) but provides more resolution for error events.
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `null` - Unknown transaction
|
|
||||||
* `object` - Transaction status:
|
|
||||||
* `"Ok": null` - Transaction was successful
|
|
||||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getSlot
|
|
||||||
|
|
||||||
Returns the current slot the node is processing
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `u64` - Current slot
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlot"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"1234","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getSlotLeader
|
|
||||||
|
|
||||||
Returns the current slot leader
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `string` - Node Id as base-58 encoded string
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotLeader"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getSlotsPerSegment
|
|
||||||
|
|
||||||
Returns the current storage segment size in terms of slots
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `u64` - Number of slots in a storage segment
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"1024","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getStorageTurn
|
|
||||||
|
|
||||||
Returns the current storage turn's blockhash and slot
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
None
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
An array consisting of
|
|
||||||
|
|
||||||
* `string` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
|
||||||
* `u64` - the current storage turn slot
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "2048"],"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getStorageTurnRate
|
|
||||||
|
|
||||||
Returns the current storage turn rate in terms of slots per turn
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
None
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `u64` - Number of slots in storage turn
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurnRate"}' http://localhost:8899
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"1024","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getTransactionCount
|
|
||||||
|
|
||||||
Returns the current Transaction count from the ledger
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `u64` - count
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":268,"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getTotalSupply
|
|
||||||
|
|
||||||
Returns the current total supply in Lamports
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `u64` - Total supply
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTotalSupply"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":10126,"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getVersion
|
|
||||||
|
|
||||||
Returns the current solana versions running on the node
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
None
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be a JSON object with the following sub fields:
|
|
||||||
|
|
||||||
* `solana-core`, software version of solana-core
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"solana-core": "0.17.2"},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### getVoteAccounts
|
|
||||||
|
|
||||||
Returns the account info and associated stake for all the voting accounts in the current bank.
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
The result field will be a JSON object of `current` and `delinquent` accounts, each containing an array of JSON objects with the following sub fields:
|
|
||||||
|
|
||||||
* `votePubkey` - Vote account public key, as base-58 encoded string
|
|
||||||
* `nodePubkey` - Node public key, as base-58 encoded string
|
|
||||||
* `activatedStake` - the stake, in lamports, delegated to this vote account and active in this epoch
|
|
||||||
* `epochVoteAccount` - bool, whether the vote account is staked for this epoch
|
|
||||||
* `commission`, percentage (0-100) of rewards payout owed to the vote account
|
|
||||||
* `lastVote` - Most recent slot voted on by this vote account
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### requestAirdrop
|
|
||||||
|
|
||||||
Requests an airdrop of lamports to a Pubkey
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - Pubkey of account to receive lamports, as base-58 encoded string
|
|
||||||
* `integer` - lamports, as a u64
|
|
||||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) (used for retrieving blockhash and verifying airdrop success)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `string` - Transaction Signature of airdrop, as base-58 encoded string
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"requestAirdrop", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri", 50]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### sendTransaction
|
|
||||||
|
|
||||||
Creates new transaction
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `array` - array of octets containing a fully-signed Transaction
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `string` - Transaction Signature, as base-58 encoded string
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":[[61, 98, 55, 49, 15, 187, 41, 215, 176, 49, 234, 229, 228, 77, 129, 221, 239, 88, 145, 227, 81, 158, 223, 123, 14, 229, 235, 247, 191, 115, 199, 71, 121, 17, 32, 67, 63, 209, 239, 160, 161, 2, 94, 105, 48, 159, 235, 235, 93, 98, 172, 97, 63, 197, 160, 164, 192, 20, 92, 111, 57, 145, 251, 6, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 13, 39, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 11, 12, 106, 49, 74, 226, 201, 16, 161, 192, 28, 84, 124, 97, 190, 201, 171, 186, 6, 18, 70, 142, 89, 185, 176, 154, 115, 61, 26, 163, 77, 1, 88, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Subscription Websocket
|
|
||||||
|
|
||||||
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
|
|
||||||
|
|
||||||
* Submit subscription requests to the websocket using the methods below
|
|
||||||
* Multiple subscriptions may be active at once
|
|
||||||
* All subscriptions take an optional `confirmations` parameter, which defines
|
|
||||||
|
|
||||||
how many confirmed blocks the node should wait before sending a notification.
|
|
||||||
|
|
||||||
The greater the number, the more likely the notification is to represent
|
|
||||||
|
|
||||||
consensus across the cluster, and the less likely it is to be affected by
|
|
||||||
|
|
||||||
forking or rollbacks. If unspecified, the default value is 0; the node will
|
|
||||||
|
|
||||||
send a notification as soon as it witnesses the event. The maximum
|
|
||||||
|
|
||||||
`confirmations` wait length is the cluster's `MAX_LOCKOUT_HISTORY`, which
|
|
||||||
|
|
||||||
represents the economic finality of the chain.
|
|
||||||
|
|
||||||
### accountSubscribe
|
|
||||||
|
|
||||||
Subscribe to an account to receive notifications when the lamports or data for a given account public key changes
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - account Pubkey, as base-58 encoded string
|
|
||||||
* `integer` - optional, number of confirmed blocks to wait before notification.
|
|
||||||
|
|
||||||
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `integer` - Subscription id \(needed to unsubscribe\)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
|
|
||||||
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", 15]}
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Notification Format:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
|
||||||
```
|
|
||||||
|
|
||||||
### accountUnsubscribe
|
|
||||||
|
|
||||||
Unsubscribe from account change notifications
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `integer` - id of account Subscription to cancel
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `bool` - unsubscribe success message
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"accountUnsubscribe", "params":[0]}
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### programSubscribe
|
|
||||||
|
|
||||||
Subscribe to a program to receive notifications when the lamports or data for a given account owned by the program changes
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - program\_id Pubkey, as base-58 encoded string
|
|
||||||
* `integer` - optional, number of confirmed blocks to wait before notification.
|
|
||||||
|
|
||||||
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `integer` - Subscription id \(needed to unsubscribe\)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV"]}
|
|
||||||
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV", 15]}
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Notification Format:
|
|
||||||
|
|
||||||
* `string` - account Pubkey, as base-58 encoded string
|
|
||||||
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
|
||||||
```
|
|
||||||
|
|
||||||
### programUnsubscribe
|
|
||||||
|
|
||||||
Unsubscribe from program-owned account change notifications
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `integer` - id of account Subscription to cancel
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `bool` - unsubscribe success message
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"programUnsubscribe", "params":[0]}
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
|
||||||
```
|
|
||||||
|
|
||||||
### signatureSubscribe
|
|
||||||
|
|
||||||
Subscribe to a transaction signature to receive notification when the transaction is confirmed On `signatureNotification`, the subscription is automatically cancelled
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `string` - Transaction Signature, as base-58 encoded string
|
|
||||||
* `integer` - optional, number of confirmed blocks to wait before notification.
|
|
||||||
|
|
||||||
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `integer` - subscription id \(needed to unsubscribe\)
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
|
|
||||||
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b", 15]}
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Notification Format:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
|
|
||||||
```
|
|
||||||
|
|
||||||
### signatureUnsubscribe
|
|
||||||
|
|
||||||
Unsubscribe from signature confirmation notification
|
|
||||||
|
|
||||||
#### Parameters:
|
|
||||||
|
|
||||||
* `integer` - subscription id to cancel
|
|
||||||
|
|
||||||
#### Results:
|
|
||||||
|
|
||||||
* `bool` - unsubscribe success message
|
|
||||||
|
|
||||||
#### Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
{"jsonrpc":"2.0", "id":1, "method":"signatureUnsubscribe", "params":[0]}
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
|
||||||
```
|
|
@ -1,62 +0,0 @@
|
|||||||
# Transaction
|
|
||||||
|
|
||||||
## Components of a `Transaction`
|
|
||||||
|
|
||||||
* **Transaction:**
|
|
||||||
* **message:** Defines the transaction
|
|
||||||
* **header:** Details the account types of and signatures required by
|
|
||||||
|
|
||||||
the transaction
|
|
||||||
|
|
||||||
* **num\_required\_signatures:** The total number of signatures
|
|
||||||
|
|
||||||
required to make the transaction valid.
|
|
||||||
|
|
||||||
* **num\_credit\_only\_signed\_accounts:** The last
|
|
||||||
|
|
||||||
`num_readonly_signed_accounts` signatures refer to signing
|
|
||||||
|
|
||||||
credit only accounts. Credit only accounts can be used concurrently
|
|
||||||
|
|
||||||
by multiple parallel transactions, but their balance may only be
|
|
||||||
|
|
||||||
increased, and their account data is read-only.
|
|
||||||
|
|
||||||
* **num\_credit\_only\_unsigned\_accounts:** The last
|
|
||||||
|
|
||||||
`num_readonly_unsigned_accounts` public keys in `account_keys` refer
|
|
||||||
|
|
||||||
to non-signing credit only accounts
|
|
||||||
|
|
||||||
* **account\_keys:** List of public keys used by the transaction, including
|
|
||||||
|
|
||||||
by the instructions and for signatures. The first
|
|
||||||
|
|
||||||
`num_required_signatures` public keys must sign the transaction.
|
|
||||||
|
|
||||||
* **recent\_blockhash:** The ID of a recent ledger entry. Validators will
|
|
||||||
|
|
||||||
reject transactions with a `recent_blockhash` that is too old.
|
|
||||||
|
|
||||||
* **instructions:** A list of [instructions](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/instruction.md) that are
|
|
||||||
|
|
||||||
run sequentially and committed in one atomic transaction if all
|
|
||||||
|
|
||||||
succeed.
|
|
||||||
* **signatures:** A list of signatures applied to the transaction. The
|
|
||||||
|
|
||||||
list is always of length `num_required_signatures`, and the signature
|
|
||||||
|
|
||||||
at index `i` corresponds to the public key at index `i` in `account_keys`.
|
|
||||||
|
|
||||||
The list is initialized with empty signatures \(i.e. zeros\), and
|
|
||||||
|
|
||||||
populated as signatures are added.
|
|
||||||
|
|
||||||
## Transaction Signing
|
|
||||||
|
|
||||||
A `Transaction` is signed by using an ed25519 keypair to sign the serialization of the `message`. The resulting signature is placed at the index of `signatures` matching the index of the keypair's public key in `account_keys`.
|
|
||||||
|
|
||||||
## Transaction Serialization
|
|
||||||
|
|
||||||
`Transaction`s \(and their `message`s\) are serialized and deserialized using the [bincode](https://crates.io/crates/bincode) crate with a non-standard vector serialization that uses only one byte for the length if it can be encoded in 7 bits, 2 bytes if it fits in 14 bits, or 3 bytes if it requires 15 or 16 bits. The vector serialization is defined by Solana's [short-vec](https://github.com/solana-labs/solana/blob/master/sdk/src/short_vec.rs).
|
|
@ -1,269 +0,0 @@
|
|||||||
# Ledger Replication
|
|
||||||
|
|
||||||
At full capacity on a 1gbps network solana will generate 4 petabytes of data per year. To prevent the network from centralizing around validators that have to store the full data set this protocol proposes a way for mining nodes to provide storage capacity for pieces of the data.
|
|
||||||
|
|
||||||
The basic idea to Proof of Replication is encrypting a dataset with a public symmetric key using CBC encryption, then hash the encrypted dataset. The main problem with the naive approach is that a dishonest storage node can stream the encryption and delete the data as it's hashed. The simple solution is to periodically regenerate the hash based on a signed PoH value. This ensures that all the data is present during the generation of the proof and it also requires validators to have the entirety of the encrypted data present for verification of every proof of every identity. So the space required to validate is `number_of_proofs * data_size`
|
|
||||||
|
|
||||||
## Optimization with PoH
|
|
||||||
|
|
||||||
Our improvement on this approach is to randomly sample the encrypted segments faster than it takes to encrypt, and record the hash of those samples into the PoH ledger. Thus the segments stay in the exact same order for every PoRep and verification can stream the data and verify all the proofs in a single batch. This way we can verify multiple proofs concurrently, each one on its own CUDA core. The total space required for verification is `1_ledger_segment + 2_cbc_blocks * number_of_identities` with core count equal to `number_of_identities`. We use a 64-byte chacha CBC block size.
|
|
||||||
|
|
||||||
## Network
|
|
||||||
|
|
||||||
Validators for PoRep are the same validators that are verifying transactions. If an archiver can prove that a validator verified a fake PoRep, then the validator will not receive a reward for that storage epoch.
|
|
||||||
|
|
||||||
Archivers are specialized _light clients_. They download a part of the ledger \(a.k.a Segment\) and store it, and provide PoReps of storing the ledger. For each verified PoRep archivers earn a reward of sol from the mining pool.
|
|
||||||
|
|
||||||
## Constraints
|
|
||||||
|
|
||||||
We have the following constraints:
|
|
||||||
|
|
||||||
* Verification requires generating the CBC blocks. That requires space of 2
|
|
||||||
|
|
||||||
blocks per identity, and 1 CUDA core per identity for the same dataset. So as
|
|
||||||
|
|
||||||
many identities at once should be batched with as many proofs for those
|
|
||||||
|
|
||||||
identities verified concurrently for the same dataset.
|
|
||||||
|
|
||||||
* Validators will randomly sample the set of storage proofs to the set that
|
|
||||||
|
|
||||||
they can handle, and only the creators of those chosen proofs will be
|
|
||||||
|
|
||||||
rewarded. The validator can run a benchmark whenever its hardware configuration
|
|
||||||
|
|
||||||
changes to determine what rate it can validate storage proofs.
|
|
||||||
|
|
||||||
## Validation and Replication Protocol
|
|
||||||
|
|
||||||
### Constants
|
|
||||||
|
|
||||||
1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The
|
|
||||||
|
|
||||||
unit of storage for an archiver.
|
|
||||||
|
|
||||||
2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which archivers
|
|
||||||
|
|
||||||
regenerate their encryption keys and select a new dataset to store.
|
|
||||||
|
|
||||||
3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof
|
|
||||||
|
|
||||||
claim to be successfully rewarded.
|
|
||||||
|
|
||||||
4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage
|
|
||||||
|
|
||||||
mining proof claim has to contain to be valid for a reward.
|
|
||||||
|
|
||||||
5. NUM\_STORAGE\_SAMPLES: Number of samples required for a storage mining
|
|
||||||
|
|
||||||
proof.
|
|
||||||
|
|
||||||
6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate
|
|
||||||
|
|
||||||
encrypted state.
|
|
||||||
|
|
||||||
7. NUM\_SLOTS\_PER\_TURN: Number of slots that define a single storage epoch or
|
|
||||||
|
|
||||||
a "turn" of the PoRep game.
|
|
||||||
|
|
||||||
### Validator behavior
|
|
||||||
|
|
||||||
1. Validators join the network and begin looking for archiver accounts at each
|
|
||||||
|
|
||||||
storage epoch/turn boundary.
|
|
||||||
|
|
||||||
2. Every turn, Validators sign the PoH value at the boundary and use that signature
|
|
||||||
|
|
||||||
to randomly pick proofs to verify from each storage account found in the turn boundary.
|
|
||||||
|
|
||||||
This signed value is also submitted to the validator's storage account and will be used by
|
|
||||||
|
|
||||||
archivers at a later stage to cross-verify.
|
|
||||||
|
|
||||||
3. Every `NUM_SLOTS_PER_TURN` slots the validator advertises the PoH value. This is value
|
|
||||||
|
|
||||||
is also served to Archivers via RPC interfaces.
|
|
||||||
|
|
||||||
4. For a given turn N, all validations get locked out until turn N+3 \(a gap of 2 turn/epoch\).
|
|
||||||
|
|
||||||
At which point all validations during that turn are available for reward collection.
|
|
||||||
|
|
||||||
5. Any incorrect validations will be marked during the turn in between.
|
|
||||||
|
|
||||||
### Archiver behavior
|
|
||||||
|
|
||||||
1. Since an archiver is somewhat of a light client and not downloading all the
|
|
||||||
|
|
||||||
ledger data, they have to rely on other validators and archivers for information.
|
|
||||||
|
|
||||||
Any given validator may or may not be malicious and give incorrect information, although
|
|
||||||
|
|
||||||
there are not any obvious attack vectors that this could accomplish besides having the
|
|
||||||
|
|
||||||
archiver do extra wasted work. For many of the operations there are a number of options
|
|
||||||
|
|
||||||
depending on how paranoid an archiver is:
|
|
||||||
|
|
||||||
* \(a\) archiver can ask a validator
|
|
||||||
* \(b\) archiver can ask multiple validators
|
|
||||||
* \(c\) archiver can ask other archivers
|
|
||||||
* \(d\) archiver can subscribe to the full transaction stream and generate
|
|
||||||
|
|
||||||
the information itself \(assuming the slot is recent enough\)
|
|
||||||
|
|
||||||
* \(e\) archiver can subscribe to an abbreviated transaction stream to
|
|
||||||
|
|
||||||
generate the information itself \(assuming the slot is recent enough\)
|
|
||||||
|
|
||||||
2. An archiver obtains the PoH hash corresponding to the last turn with its slot.
|
|
||||||
3. The archiver signs the PoH hash with its keypair. That signature is the
|
|
||||||
|
|
||||||
seed used to pick the segment to replicate and also the encryption key. The
|
|
||||||
|
|
||||||
archiver mods the signature with the slot to get which segment to
|
|
||||||
|
|
||||||
replicate.
|
|
||||||
|
|
||||||
4. The archiver retrives the ledger by asking peer validators and
|
|
||||||
|
|
||||||
archivers. See 6.5.
|
|
||||||
|
|
||||||
5. The archiver then encrypts that segment with the key with chacha algorithm
|
|
||||||
|
|
||||||
in CBC mode with `NUM_CHACHA_ROUNDS` of encryption.
|
|
||||||
|
|
||||||
6. The archiver initializes a chacha rng with the a signed recent PoH value as
|
|
||||||
|
|
||||||
the seed.
|
|
||||||
|
|
||||||
7. The archiver generates `NUM_STORAGE_SAMPLES` samples in the range of the
|
|
||||||
|
|
||||||
entry size and samples the encrypted segment with sha256 for 32-bytes at each
|
|
||||||
|
|
||||||
offset value. Sampling the state should be faster than generating the encrypted
|
|
||||||
|
|
||||||
segment.
|
|
||||||
|
|
||||||
8. The archiver sends a PoRep proof transaction which contains its sha state
|
|
||||||
|
|
||||||
at the end of the sampling operation, its seed and the samples it used to the
|
|
||||||
|
|
||||||
current leader and it is put onto the ledger.
|
|
||||||
|
|
||||||
9. During a given turn the archiver should submit many proofs for the same segment
|
|
||||||
|
|
||||||
and based on the `RATIO_OF_FAKE_PROOFS` some of those proofs must be fake.
|
|
||||||
|
|
||||||
10. As the PoRep game enters the next turn, the archiver must submit a
|
|
||||||
|
|
||||||
transaction with the mask of which proofs were fake during the last turn. This
|
|
||||||
|
|
||||||
transaction will define the rewards for both archivers and validators.
|
|
||||||
|
|
||||||
11. Finally for a turn N, as the PoRep game enters turn N + 3, archiver's proofs for
|
|
||||||
|
|
||||||
turn N will be counted towards their rewards.
|
|
||||||
|
|
||||||
### The PoRep Game
|
|
||||||
|
|
||||||
The Proof of Replication game has 4 primary stages. For each "turn" multiple PoRep games can be in progress but each in a different stage.
|
|
||||||
|
|
||||||
The 4 stages of the PoRep Game are as follows:
|
|
||||||
|
|
||||||
1. Proof submission stage
|
|
||||||
* Archivers: submit as many proofs as possible during this stage
|
|
||||||
* Validators: No-op
|
|
||||||
2. Proof verification stage
|
|
||||||
* Archivers: No-op
|
|
||||||
* Validators: Select archivers and verify their proofs from the previous turn
|
|
||||||
3. Proof challenge stage
|
|
||||||
* Archivers: Submit the proof mask with justifications \(for fake proofs submitted 2 turns ago\)
|
|
||||||
* Validators: No-op
|
|
||||||
4. Reward collection stage
|
|
||||||
* Archivers: Collect rewards for 3 turns ago
|
|
||||||
* Validators: Collect rewards for 3 turns ago
|
|
||||||
|
|
||||||
For each turn of the PoRep game, both Validators and Archivers evaluate each stage. The stages are run as separate transactions on the storage program.
|
|
||||||
|
|
||||||
### Finding who has a given block of ledger
|
|
||||||
|
|
||||||
1. Validators monitor the turns in the PoRep game and look at the rooted bank
|
|
||||||
|
|
||||||
at turn boundaries for any proofs.
|
|
||||||
|
|
||||||
2. Validators maintain a map of ledger segments and corresponding archiver public keys.
|
|
||||||
|
|
||||||
The map is updated when a Validator processes an archiver's proofs for a segment.
|
|
||||||
|
|
||||||
The validator provides an RPC interface to access the this map. Using this API, clients
|
|
||||||
|
|
||||||
can map a segment to an archiver's network address \(correlating it via cluster\_info table\).
|
|
||||||
|
|
||||||
The clients can then send repair requests to the archiver to retrieve segments.
|
|
||||||
|
|
||||||
3. Validators would need to invalidate this list every N turns.
|
|
||||||
|
|
||||||
## Sybil attacks
|
|
||||||
|
|
||||||
For any random seed, we force everyone to use a signature that is derived from a PoH hash at the turn boundary. Everyone uses the same count, so the same PoH hash is signed by every participant. The signatures are then each cryptographically tied to the keypair, which prevents a leader from grinding on the resulting value for more than 1 identity.
|
|
||||||
|
|
||||||
Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger.
|
|
||||||
|
|
||||||
Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for archivers need to store the first block for free and the network can reward long lived client identities more than new ones.
|
|
||||||
|
|
||||||
## Validator attacks
|
|
||||||
|
|
||||||
* If a validator approves fake proofs, archiver can easily out them by
|
|
||||||
|
|
||||||
showing the initial state for the hash.
|
|
||||||
|
|
||||||
* If a validator marks real proofs as fake, no on-chain computation can be done
|
|
||||||
|
|
||||||
to distinguish who is correct. Rewards would have to rely on the results from
|
|
||||||
|
|
||||||
multiple validators to catch bad actors and archivers from being denied rewards.
|
|
||||||
|
|
||||||
* Validator stealing mining proof results for itself. The proofs are derived
|
|
||||||
|
|
||||||
from a signature from an archiver, since the validator does not know the
|
|
||||||
|
|
||||||
private key used to generate the encryption key, it cannot be the generator of
|
|
||||||
|
|
||||||
the proof.
|
|
||||||
|
|
||||||
## Reward incentives
|
|
||||||
|
|
||||||
Fake proofs are easy to generate but difficult to verify. For this reason, PoRep proof transactions generated by archivers may require a higher fee than a normal transaction to represent the computational cost required by validators.
|
|
||||||
|
|
||||||
Some percentage of fake proofs are also necessary to receive a reward from storage mining.
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
* We can reduce the costs of verification of PoRep by using PoH, and actually
|
|
||||||
|
|
||||||
make it feasible to verify a large number of proofs for a global dataset.
|
|
||||||
|
|
||||||
* We can eliminate grinding by forcing everyone to sign the same PoH hash and
|
|
||||||
|
|
||||||
use the signatures as the seed
|
|
||||||
|
|
||||||
* The game between validators and archivers is over random blocks and random
|
|
||||||
|
|
||||||
encryption identities and random data samples. The goal of randomization is
|
|
||||||
|
|
||||||
to prevent colluding groups from having overlap on data or validation.
|
|
||||||
|
|
||||||
* Archiver clients fish for lazy validators by submitting fake proofs that
|
|
||||||
|
|
||||||
they can prove are fake.
|
|
||||||
|
|
||||||
* To defend against Sybil client identities that try to store the same block we
|
|
||||||
|
|
||||||
force the clients to store for multiple rounds before receiving a reward.
|
|
||||||
|
|
||||||
* Validators should also get rewarded for validating submitted storage proofs
|
|
||||||
|
|
||||||
as incentive for storing the ledger. They can only validate proofs if they
|
|
||||||
|
|
||||||
are storing that slice of the ledger.
|
|
||||||
|
|
@ -1,18 +0,0 @@
|
|||||||
## Storage Rent Economics
|
|
||||||
|
|
||||||
Each transaction that is submitted to the Solana ledger imposes costs. Transaction fees paid by the submitter, and collected by a validator, in theory, account for the acute, transacitonal, costs of validating and adding that data to the ledger. At the same time, our compensation design for archivers (see [Replication-client Economics](ed_replication_client_economics.md)), in theory, accounts for the long term storage of the historical ledger. Unaccounted in this process is the mid-term storage of active ledger state, necessarily maintined by the rotating validator set. This type of storage imposes costs not only to validators but also to the broader network as active state grows so does data transmission and validation overhead. To account for these costs, we describe here our preliminary design and implementation of storage rent.
|
|
||||||
|
|
||||||
Storage rent can be paid via one of two methods:
|
|
||||||
|
|
||||||
Method 1: Set it and forget it
|
|
||||||
|
|
||||||
With this approach, accounts with two-years worth of rent deposits secured are exempt from network rent charges. By maintaining this minimum-balance, the broader network benefits from reduced liquitity and the account holder can trust that their `Account::data` will be retained for continual access/usage.
|
|
||||||
|
|
||||||
Method 2: Pay per byte
|
|
||||||
|
|
||||||
If an account has less than two-years worth of deposited rent the network charges rent on a per-epoch basis, in credit for the next epoch (but in arrears when necessary). This rent is deducted at a rate specified in genesis, in lamports per kilobyte-year.
|
|
||||||
|
|
||||||
For information on the technical implementation details of this design, see the [Rent](rent.md) section.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
|||||||
# Testnet Participation
|
|
||||||
|
|
||||||
Participate in our testnet:
|
|
||||||
|
|
||||||
* [Running a Validator](../running-validator/)
|
|
||||||
* [Running an Archiver](../running-archiver.md)
|
|
||||||
|
|
@ -1,123 +0,0 @@
|
|||||||
# Durable Transaction Nonces
|
|
||||||
|
|
||||||
## Problem
|
|
||||||
|
|
||||||
To prevent replay, Solana transactions contain a nonce field populated with a
|
|
||||||
"recent" blockhash value. A transaction containing a blockhash that is too old
|
|
||||||
(~2min as of this writing) is rejected by the network as invalid. Unfortunately
|
|
||||||
certain use cases, such as custodial services, require more time to produce a
|
|
||||||
signature for the transaction. A mechanism is needed to enable these potentially
|
|
||||||
offline network participants.
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
1) The transaction's signature needs to cover the nonce value
|
|
||||||
2) The nonce must not be reusable, even in the case of signing key disclosure
|
|
||||||
|
|
||||||
## A Contract-based Solution
|
|
||||||
|
|
||||||
Here we describe a contract-based solution to the problem, whereby a client can
|
|
||||||
"stash" a nonce value for future use in a transaction's `recent_blockhash`
|
|
||||||
field. This approach is akin to the Compare and Swap atomic instruction,
|
|
||||||
implemented by some CPU ISAs.
|
|
||||||
|
|
||||||
When making use of a durable nonce, the client must first query its value from
|
|
||||||
account data. A transaction is now constructed in the normal way, but with the
|
|
||||||
following additional requirements:
|
|
||||||
|
|
||||||
1) The durable nonce value is used in the `recent_blockhash` field
|
|
||||||
2) A `Nonce` instruction is issued (first?)
|
|
||||||
3) The appropriate transaction flag is set, signaling that the usual
|
|
||||||
hash age check should be skipped and the previous requirements enforced. This
|
|
||||||
may be unnecessary, see [Runtime Support](#runtime-support) below
|
|
||||||
|
|
||||||
### Contract Mechanics
|
|
||||||
|
|
||||||
TODO: svgbob this into a flowchart
|
|
||||||
|
|
||||||
```text
|
|
||||||
Start
|
|
||||||
Create Account
|
|
||||||
state = Uninitialized
|
|
||||||
NonceInstruction
|
|
||||||
if state == Uninitialized
|
|
||||||
if account.balance < rent_exempt
|
|
||||||
error InsufficientFunds
|
|
||||||
state = Initialized
|
|
||||||
elif state != Initialized
|
|
||||||
error BadState
|
|
||||||
if sysvar.recent_blockhashes.is_empty()
|
|
||||||
error EmptyRecentBlockhashes
|
|
||||||
if !sysvar.recent_blockhashes.contains(stored_nonce)
|
|
||||||
error NotReady
|
|
||||||
stored_hash = sysvar.recent_blockhashes[0]
|
|
||||||
success
|
|
||||||
WithdrawInstruction(to, lamports)
|
|
||||||
if state == Uninitialized
|
|
||||||
if !signers.contains(owner)
|
|
||||||
error MissingRequiredSignatures
|
|
||||||
elif state == Initialized
|
|
||||||
if !sysvar.recent_blockhashes.contains(stored_nonce)
|
|
||||||
error NotReady
|
|
||||||
if lamports != account.balance && lamports + rent_exempt > account.balance
|
|
||||||
error InsufficientFunds
|
|
||||||
account.balance -= lamports
|
|
||||||
to.balance += lamports
|
|
||||||
success
|
|
||||||
```
|
|
||||||
|
|
||||||
A client wishing to use this feature starts by creating a nonce account and
|
|
||||||
depositing sufficient lamports as to make it rent-exempt. The resultant account
|
|
||||||
will be in the `Uninitialized` state with no stored hash and thus unusable.
|
|
||||||
|
|
||||||
The `Nonce` instruction is used to request that a new nonce be stored for the
|
|
||||||
calling account. The first `Nonce` instruction run on a newly created account
|
|
||||||
will drive the account's state to `Initialized`. As such, a `Nonce` instruction
|
|
||||||
MUST be issued before the account can be used.
|
|
||||||
|
|
||||||
To discard a `NonceAccount`, the client should issue a `Withdraw` instruction
|
|
||||||
which withdraws all lamports, leaving a zero balance and making the account
|
|
||||||
eligible for deletion.
|
|
||||||
|
|
||||||
`Nonce` and `Withdraw` instructions each will only succeed if the stored
|
|
||||||
blockhash is no longer resident in sysvar.recent_blockhashes.
|
|
||||||
|
|
||||||
### Runtime Support
|
|
||||||
|
|
||||||
The contract alone is not sufficient for implementing this feature. To enforce
|
|
||||||
an extant `recent_blockhash` on the transaction and prevent fee theft via
|
|
||||||
failed transaction replay, runtime modifications are necessary.
|
|
||||||
|
|
||||||
Any transaction failing the usual `check_hash_age` validation will be tested
|
|
||||||
for a Durable Transaction Nonce. This specifics of this test are undecided, some
|
|
||||||
options:
|
|
||||||
|
|
||||||
1) Require that the `Nonce` instruction be the first in the transaction
|
|
||||||
* + No ABI changes
|
|
||||||
* + Fast and simple
|
|
||||||
* - Sets a precedent that may lead to incompatible instruction combinations
|
|
||||||
2) Blind search for a `Nonce` instruction over all instructions in the
|
|
||||||
transaction
|
|
||||||
* + No ABI changes
|
|
||||||
* - Potentially slow
|
|
||||||
3) [2], but guarded by a transaction flag
|
|
||||||
* - ABI changes
|
|
||||||
* - Wire size increase
|
|
||||||
* + We'll probably end up with some sort of flags eventually anyway
|
|
||||||
|
|
||||||
Current prototyping will use [1]. If it is determined that a Durable Transaction
|
|
||||||
Nonce is in use, the runtime will take the following actions to validate the
|
|
||||||
transaction:
|
|
||||||
|
|
||||||
1) The `NonceAccount` specified in the `Nonce` instruction is loaded.
|
|
||||||
2) The `NonceState` is deserialized from the `NonceAccount`'s data field and
|
|
||||||
confirmed to be in the `Initialized` state.
|
|
||||||
3) The nonce value stored in the `NonceAccount` is tested to match against the
|
|
||||||
one specified in the transaction's `recent_blockhash` field.
|
|
||||||
|
|
||||||
If all three of the above checks succeed, the transaction is allowed to continue
|
|
||||||
validation.
|
|
||||||
|
|
||||||
### Open Questions
|
|
||||||
|
|
||||||
* Should this feature be restricted in the number of uses per transaction?
|
|
@ -1,16 +0,0 @@
|
|||||||
# Cluster Economics
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
Solana’s crypto-economic system is designed to promote a healthy, long term self-sustaining economy with participant incentives aligned to the security and decentralization of the network. The main participants in this economy are validation-clients and replication-clients. Their contributions to the network, state validation and data storage respectively, and their requisite incentive mechanisms are discussed below.
|
|
||||||
|
|
||||||
The main channels of participant remittances are referred to as protocol-based rewards and transaction fees. Protocol-based rewards are issuances from a global, protocol-defined, inflation rate. These rewards will constitute the total reward delivered to replication and validation clients, the remaining sourced from transaction fees. In the early days of the network, it is likely that protocol-based rewards, deployed based on predefined issuance schedule, will drive the majority of participant incentives to participate in the network.
|
|
||||||
|
|
||||||
These protocol-based rewards, to be distributed to participating validation and replication clients, are to be a result of a global supply inflation rate, calculated per Solana epoch and distributed amongst the active validator set. As discussed further below, the per annum inflation rate is based on a pre-determined disinflationary schedule. This provides the network with monetary supply predictability which supports long term economic stability and security.
|
|
||||||
|
|
||||||
Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction \(be it a state execution or proof-of-replication verification\). A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below.
|
|
||||||
|
|
||||||
A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics/) chapter will review the Solana network design for global ledger storage/redundancy and archiver-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with an archiver-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
|
|
||||||
|
|
||||||
**Figure 1**: Schematic overview of Solana economic incentive design.
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
|||||||
# Attack Vectors
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
## Colluding validation and replication clients
|
|
||||||
|
|
||||||
A colluding validation-client, may take the strategy to mark PoReps from non-colluding archiver nodes as invalid as an attempt to maximize the rewards for the colluding archiver nodes. In this case, it isn’t feasible for the offended-against archiver nodes to petition the network for resolution as this would result in a network-wide vote on each offending PoRep and create too much overhead for the network to progress adequately. Also, this mitigation attempt would still be vulnerable to a >= 51% staked colluder.
|
|
||||||
|
|
||||||
Alternatively, transaction fees from submitted PoReps are pooled and distributed across validation-clients in proportion to the number of valid PoReps discounted by the number of invalid PoReps as voted by each validator-client. Thus invalid votes are directly dis-incentivized through this reward channel. Invalid votes that are revealed by archiver nodes as fishing PoReps, will not be discounted from the payout PoRep count.
|
|
||||||
|
|
||||||
Another collusion attack involves a validator-client who may take the strategy to ignore invalid PoReps from colluding archiver and vote them as valid. In this case, colluding archiver-clients would not have to store the data while still receiving rewards for validated PoReps. Additionally, colluding validator nodes would also receive rewards for validating these PoReps. To mitigate this attack, validators must randomly sample PoReps corresponding to the ledger block they are validating and because of this, there will be multiple validators that will receive the colluding archiver’s invalid submissions. These non-colluding validators will be incentivized to mark these PoReps as invalid as they have no way to determine whether the proposed invalid PoRep is actually a fishing PoRep, for which a confirmation vote would result in the validator’s stake being slashed.
|
|
||||||
|
|
||||||
In this case, the proportion of time a colluding pair will be successful has an upper limit determined by the % of stake of the network claimed by the colluding validator. This also sets bounds to the value of such an attack. For example, if a colluding validator controls 10% of the total validator stake, transaction fees will be lost \(likely sent to mining pool\) by the colluding archiver 90% of the time and so the attack vector is only profitable if the per-PoRep reward at least 90% higher than the average PoRep transaction fee. While, probabilistically, some colluding archiver-client PoReps will find their way to colluding validation-clients, the network can also monitor rates of paired \(validator + archiver\) discrepancies in voting patterns and censor identified colluders in these cases.
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
|||||||
# Economic Sustainability
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
Long term economic sustainability is one of the guiding principles of Solana’s economic design. While it is impossible to predict how decentralized economies will develop over time, especially economies with flexible decentralized governances, we can arrange economic components such that, under certain conditions, a sustainable economy may take shape in the long term. In the case of Solana’s network, these components take the form of token issuance \(via inflation\) and token burning’.
|
|
||||||
|
|
||||||
The dominant remittances from the Solana mining pool are validator and archiver rewards. The disinflationary mechanism is a flat, protocol-specified and adjusted, % of each transaction fee.
|
|
||||||
|
|
||||||
The Archiver rewards are to be delivered to archivers as a portion of the network inflation after successful PoRep validation. The per-PoRep reward amount is determined as a function of the total network storage redundancy at the time of the PoRep validation and the network goal redundancy. This function is likely to take the form of a discount from a base reward to be delivered when the network has achieved and maintained its goal redundancy. An example of such a reward function is shown in **Figure 3**
|
|
||||||
|
|
||||||
**Figure 3**: Example PoRep reward design as a function of global network storage redundancy.
|
|
||||||
|
|
||||||
In the example shown in Figure 1, multiple per PoRep base rewards are explored \(as a % of Tx Fee\) to be delivered when the global ledger replication redundancy meets 10X. When the global ledger replication redundancy is less than 10X, the base reward is discounted as a function of the square of the ratio of the actual ledger replication redundancy to the goal redundancy \(i.e. 10X\).
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
|||||||
# Economic Design MVP
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
The preceeding sections, outlined in the [Economic Design Overview](./), describe a long-term vision of a sustainable Solana economy. Of course, we don't expect the final implementation to perfectly match what has been described above. We intend to fully engage with network stakeholders throughout the implementation phases \(i.e. pre-testnet, testnet, mainnet\) to ensure the system supports, and is representative of, the various network participants' interests. The first step toward this goal, however, is outlining a some desired MVP economic features to be available for early pre-testnet and testnet participants. Below is a rough sketch outlining basic economic functionality from which a more complete and functional system can be developed.
|
|
||||||
|
|
||||||
## MVP Economic Features
|
|
||||||
|
|
||||||
* Faucet to deliver testnet SOLs to validators for staking and application development.
|
|
||||||
* Mechanism by which validators are rewarded via network inflation.
|
|
||||||
* Ability to delegate tokens to validator nodes
|
|
||||||
* Validator set commission fees on interest from delegated tokens.
|
|
||||||
* Archivers to receive fixed, arbitrary reward for submitting validated PoReps. Reward size mechanism \(i.e. PoRep reward as a function of total ledger redundancy\) to come later.
|
|
||||||
* Pooling of archiver PoRep transaction fees and weighted distribution to validators based on PoRep verification \(see [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). It will be useful to test this protection against attacks on testnet.
|
|
||||||
* Nice-to-have: auto-delegation of archiver rewards to validator.
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
|||||||
# Replication-client Economics
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
Replication-clients should be rewarded for providing the network with storage space. Incentivization of the set of archivers provides data security through redundancy of the historical ledger. Replication nodes are rewarded in proportion to the amount of ledger data storage provided, as proved by successfully submitting Proofs-of-Replication to the cluster.. These rewards are captured by generating and entering Proofs of Replication \(PoReps\) into the PoH stream which can be validated by Validation nodes as described above in the [Replication-validation Transaction Fees](../ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md) chapter.
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
|||||||
# Replication-client Reward Auto-delegation
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
The ability for Solana network participants to earn rewards by providing storage service is a unique on-boarding path that requires little hardware overhead and minimal upfront capital. It offers an avenue for individuals with extra-storage space on their home laptops or PCs to contribute to the security of the network and become integrated into the Solana economy.
|
|
||||||
|
|
||||||
To enhance this on-boarding ramp and facilitate further participation and investment in the Solana economy, replication-clients have the opportunity to auto-delegate their rewards to validation-clients of their choice. Much like the automatic reinvestment of stock dividends, in this scenario, an archiver-client can earn Solana tokens by providing some storage capacity to the network \(i.e. via submitting valid PoReps\), have the protocol-based rewards automatically assigned as delegation to a staked validator node of the archiver's choice and earn interest, less a fee, from the validation-client's network participation.
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
|||||||
# Storage-replication Rewards
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
Archiver-clients download, encrypt and submit PoReps for ledger block sections.3 PoReps submitted to the PoH stream, and subsequently validated, function as evidence that the submitting archiver client is indeed storing the assigned ledger block sections on local hard drive space as a service to the network. Therefore, archiver clients should earn protocol rewards proportional to the amount of storage, and the number of successfully validated PoReps, that they are verifiably providing to the network.
|
|
||||||
|
|
||||||
Additionally, archiver clients have the opportunity to capture a portion of slashed bounties \[TBD\] of dishonest validator clients. This can be accomplished by an archiver client submitting a verifiably false PoRep for which a dishonest validator client receives and signs as a valid PoRep. This reward incentive is to prevent lazy validators and minimize validator-archiver collusion attacks, more on this below.
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
|||||||
# Validation-client Economics
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
Validator-clients are eligible to receive protocol-based \(i.e. inflation-based\) rewards issued via stake-based annual interest rates \(calculated per epoch\) by providing compute \(CPU+GPU\) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. The network is expected to launch with an annual inflation rate around 15%, set to decrease by 15% per year until a long-term stable rate of 1-2% is reached. These issuances are to be split and distributed to participating validators and archivers, with around 90% of the issued tokens allocated for validator rewards. Because the network will be distributing a fixed amount of inflation rewards across the stake-weighted valdiator set, any individual validator's interest rate will be a function of the amount of staked SOL in relation to the circulating SOL.
|
|
||||||
|
|
||||||
Additionally, validator clients may earn revenue through fees via state-validation transactions and Proof-of-Replication \(PoRep\) transactions. For clarity, we separately describe the design and motivation of these revenue distriubutions for validation-clients below: state-validation protocol-based rewards, state-validation transaction fees and rent, and PoRep-validation transaction fees.
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
|||||||
# Replication-validation Transaction Fees
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
As previously mentioned, validator-clients will also be responsible for validating PoReps submitted into the PoH stream by archiver-clients. In this case, validators are providing compute \(CPU/GPU\) and light storage resources to confirm that these replication proofs could only be generated by a client that is storing the referenced PoH leger block.
|
|
||||||
|
|
||||||
While replication-clients are incentivized and rewarded through protocol-based rewards schedule \(see [Replication-client Economics](../ed_replication_client_economics/)\), validator-clients will be incentivized to include and validate PoReps in PoH through collection of transaction fees associated with the submitted PoReps and distribution of protocol rewards proportional to the validated PoReps. As will be described in detail in the Section 3.1, replication-client rewards are protocol-based and designed to reward based on a global data redundancy factor. I.e. the protocol will incentivize replication-client participation through rewards based on a target ledger redundancy \(e.g. 10x data redundancy\).
|
|
||||||
|
|
||||||
The validation of PoReps by validation-clients is computationally more expensive than state-validation \(detail in the [Economic Sustainability](../ed_economic_sustainability.md) chapter\), thus the transaction fees are expected to be proportionally higher.
|
|
||||||
|
|
||||||
There are various attack vectors available for colluding validation and replication clients, also described in detail below in [Economic Sustainability](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_economic_sustainability/README.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the archivers challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\).
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
|||||||
# State-validation Protocol-based Rewards
|
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
Validator-clients have two functional roles in the Solana network:
|
|
||||||
|
|
||||||
* Validate \(vote\) the current global state of that PoH along with any Proofs-of-Replication \(see [Replication Client Economics](../ed_replication_client_economics/)\) that they are eligible to validate.
|
|
||||||
* Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and Proofs-of-Replication and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity.
|
|
||||||
|
|
||||||
Validator-client rewards for these services are to be distributed at the end of each Solana epoch. As previously discussed, compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator \(see below\) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed \(see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)\). PoRep transaction fees are also collected by the leader client and validator PoRep rewards are distributed in proportion to the number of validated PoReps less the number of PoReps that mismatch an archiver's challenge. \(see [Replication-client Transaction Fees](ed_vce_replication_validation_transaction_fees.md)\)
|
|
||||||
|
|
||||||
The effective protocol-based annual interest rate \(%\) per epoch received by validation-clients is to be a function of:
|
|
||||||
|
|
||||||
* the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule \(see [Validation-client Economics](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_validartion_client_economics.md)\)
|
|
||||||
* the fraction of staked SOLs out of the current total circulating supply,
|
|
||||||
* the up-time/participation \[% of available slots that validator had opportunity to vote on\] of a given validator over the previous epoch.
|
|
||||||
|
|
||||||
The first factor is a function of protocol parameters only \(i.e. independent of validator behavior in a given epoch\) and results in a global validation reward schedule designed to incentivize early participation, provide clear montetary stability and provide optimal security in the network.
|
|
||||||
|
|
||||||
At any given point in time, a specific validator's interest rate can be determined based on the porportion of circulating supply that is staked by the network and the validator's uptime/activity in the previous epoch. For example, consider a hypothetical instance of the network with an initial circulating token supply of 250MM tokens with an additional 250MM vesting over 3 years. Additionally an inflation rate is specified at network launch of 7.5%, and a disinflationary schedule of 20% decrease in inflation rate per year \(the actual rates to be implemented are to be worked out during the testnet experimentation phase of mainnet launch\). With these broad assumptions, the 10-year inflation rate \(adjusted daily for this example\) is shown in **Figure 2**, while the total circulating token supply is illustrated in **Figure 3**. Neglected in this toy-model is the inflation supression due to the portion of each transaction fee that is to be destroyed.
|
|
||||||
|
|
||||||
 \*\*Figure 2:\*\* In this example schedule, the annual inflation rate \[%\] reduces at around 20% per year, until it reaches the long-term, fixed, 1.5% rate.
|
|
||||||
|
|
||||||
 \*\*Figure 3:\*\* The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in \*\*Figure 2\*\* Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabalize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for state-validation and ledger storage replication \(PoReps\) are not accounted for here. Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators and archiver nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assummed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in\*\* Figure 4\*\*.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
**Figure 4:** Shown here are example validator interest rates over time, neglecting transaction fees, segmented by fraction of total circulating supply bonded as stake.
|
|
||||||
|
|
||||||
This epoch-specific protocol-defined interest rate sets an upper limit of _protocol-generated_ annual interest rate \(not absolute total interest rate\) possible to be delivered to any validator-client per epoch. The distributed interest rate per epoch is then discounted from this value based on the participation of the validator-client during the previous epoch.
|
|
@ -1,49 +0,0 @@
|
|||||||
# Repair Service
|
|
||||||
|
|
||||||
## Repair Service
|
|
||||||
|
|
||||||
The RepairService is in charge of retrieving missing shreds that failed to be delivered by primary communication protocols like Avalanche. It is in charge of managing the protocols described below in the `Repair Protocols` section below.
|
|
||||||
|
|
||||||
## Challenges:
|
|
||||||
|
|
||||||
1\) Validators can fail to receive particular shreds due to network failures
|
|
||||||
|
|
||||||
2\) Consider a scenario where blocktree contains the set of slots {1, 3, 5}. Then Blocktree receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -> 7 is stored in blocktree. However, there is no way to chain these slots to any of the existing banks in Blocktree, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node.
|
|
||||||
|
|
||||||
3\) Validators that find themselves behind the cluster by an entire epoch struggle/fail to catch up because they do not have a leader schedule for future epochs. If nodes were to blindly accept repair shreds in these future epochs, this exposes nodes to spam.
|
|
||||||
|
|
||||||
## Repair Protocols
|
|
||||||
|
|
||||||
The repair protocol makes best attempts to progress the forking structure of Blocktree.
|
|
||||||
|
|
||||||
The different protocol strategies to address the above challenges:
|
|
||||||
|
|
||||||
1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blocktree tracks the latest root slot. RepairService will then periodically iterate every fork in blocktree starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration.
|
|
||||||
|
|
||||||
Note: Validators will only accept shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\).
|
|
||||||
|
|
||||||
2. Preemptive Slot Repair \(Addresses Challenge \#2\): The goal of this protocol is to discover the chaining relationship of "orphan" slots that do not currently chain to any known fork.
|
|
||||||
* Blocktree will track the set of "orphan" slots in a separate column family.
|
|
||||||
* RepairService will periodically make `RequestOrphan` requests for each of the orphans in blocktree.
|
|
||||||
|
|
||||||
`RequestOrphan(orphan)` request - `orphan` is the orphan slot that the requestor wants to know the parents of `RequestOrphan(orphan)` response - The highest shreds for each of the first `N` parents of the requested `orphan`
|
|
||||||
|
|
||||||
On receiving the responses `p`, where `p` is some shred in a parent slot, validators will:
|
|
||||||
|
|
||||||
* Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist.
|
|
||||||
* If `p.slot` does exist, update the parent of `p` based on `parents`
|
|
||||||
|
|
||||||
Note: that once these empty slots are added to blocktree, the `Shred Repair` protocol should attempt to fill those slots.
|
|
||||||
|
|
||||||
Note: Validators will only accept responses containing shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\).
|
|
||||||
3. Repairmen \(Addresses Challenge \#3\): This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every shred that they receive against a known leader schedule.
|
|
||||||
|
|
||||||
Each validator advertises in gossip:
|
|
||||||
|
|
||||||
* Current root
|
|
||||||
* The set of all completed slots in the confirmed epochs \(an epoch that was calculated based on a bank <= current root\) past the current root
|
|
||||||
|
|
||||||
Observers of this gossip message with higher epochs \(repairmen\) send shreds to catch the lagging node up with the rest of the cluster. The repairmen are responsible for sending the slots within the epochs that are confrimed by the advertised `root` in gossip. The repairmen divide the responsibility of sending each of the missing slots in these epochs based on a random seed \(simple shred.index iteration by N, seeded with the repairman's node\_pubkey\). Ideally, each repairman in an N node cluster \(N nodes whose epochs are higher than that of the repairee\) sends 1/N of the missing shreds. Both data and coding shreds for missing slots are sent. Repairmen do not send shreds again to the same validator until they see the message in gossip updated, at which point they perform another iteration of this protocol.
|
|
||||||
|
|
||||||
Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blocktree and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blocktree, which holds the latest root.
|
|
||||||
|
|
@ -1,37 +0,0 @@
|
|||||||
# Introduction
|
|
||||||
|
|
||||||
## What is Solana?
|
|
||||||
|
|
||||||
Solana is an open source project implementing a new, high-performance, permissionless blockchain. Solana is also the name of a company headquartered in San Francisco that maintains the open source project.
|
|
||||||
|
|
||||||
## About this Book
|
|
||||||
|
|
||||||
This book describes the Solana open source project, a blockchain built from the ground up for scale. The book covers why Solana is useful, how to use it, how it works, and why it will continue to work long after the company Solana closes its doors. The goal of the Solana architecture is to demonstrate there exists a set of software algorithms that when used in combination to implement a blockchain, removes software as a performance bottleneck, allowing transaction throughput to scale proportionally with network bandwidth. The architecture goes on to satisfy all three desirable properties of a proper blockchain: it is scalable, secure and decentralized.
|
|
||||||
|
|
||||||
The architecture describes a theoretical upper bound of 710 thousand transactions per second \(tps\) on a standard gigabit network and 28.4 million tps on 40 gigabit. Furthermore, the architecture supports safe, concurrent execution of programs authored in general purpose programming languages such as C or Rust.
|
|
||||||
|
|
||||||
## Disclaimer
|
|
||||||
|
|
||||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore, nothing in this project constitutes a solicitation for investment.
|
|
||||||
|
|
||||||
## History of the Solana Codebase
|
|
||||||
|
|
||||||
In November of 2017, Anatoly Yakovenko published a whitepaper describing Proof of History, a technique for keeping time between computers that do not trust one another. From Anatoly's previous experience designing distributed systems at Qualcomm, Mesosphere and Dropbox, he knew that a reliable clock makes network synchronization very simple. When synchronization is simple the resulting network can be blazing fast, bound only by network bandwidth.
|
|
||||||
|
|
||||||
Anatoly watched as blockchain systems without clocks, such as Bitcoin and Ethereum, struggled to scale beyond 15 transactions per second worldwide when centralized payment systems such as Visa required peaks of 65,000 tps. Without a clock, it was clear they'd never graduate to being the global payment system or global supercomputer most had dreamed them to be. When Anatoly solved the problem of getting computers that don’t trust each other to agree on time, he knew he had the key to bring 40 years of distributed systems research to the world of blockchain. The resulting cluster wouldn't be just 10 times faster, or a 100 times, or a 1,000 times, but 10,000 times faster, right out of the gate!
|
|
||||||
|
|
||||||
Anatoly's implementation began in a private codebase and was implemented in the C programming language. Greg Fitzgerald, who had previously worked with Anatoly at semiconductor giant Qualcomm Incorporated, encouraged him to reimplement the project in the Rust programming language. Greg had worked on the LLVM compiler infrastructure, which underlies both the Clang C/C++ compiler as well as the Rust compiler. Greg claimed that the language's safety guarantees would improve software productivity and that its lack of a garbage collector would allow programs to perform as well as those written in C. Anatoly gave it a shot and just two weeks later, had migrated his entire codebase to Rust. Sold. With plans to weave all the world's transactions together on a single, scalable blockchain, Anatoly called the project Loom.
|
|
||||||
|
|
||||||
On February 13th of 2018, Greg began prototyping the first open source implementation of Anatoly's whitepaper. The project was published to GitHub under the name Silk in the loomprotocol organization. On February 28th, Greg made his first release, demonstrating 10 thousand signed transactions could be verified and processed in just over half a second. Shortly after, another former Qualcomm cohort, Stephen Akridge, demonstrated throughput could be massively improved by offloading signature verification to graphics processors. Anatoly recruited Greg, Stephen and three others to co-found a company, then called Loom.
|
|
||||||
|
|
||||||
Around the same time, Ethereum-based project Loom Network sprung up and many people were confused about whether they were the same project. The Loom team decided it would rebrand. They chose the name Solana, a nod to a small beach town North of San Diego called Solana Beach, where Anatoly, Greg and Stephen lived and surfed for three years when they worked for Qualcomm. On March 28th, the team created the Solana Labs GitHub organization and renamed Greg's prototype Silk to Solana.
|
|
||||||
|
|
||||||
In June of 2018, the team scaled up the technology to run on cloud-based networks and on July 19th, published a 50-node, permissioned, public testnet consistently supporting bursts of 250,000 transactions per second. In a later release in December, called v0.10 Pillbox, the team published a permissioned testnet running 150 nodes on a gigabit network and demonstrated soak tests processing an _average_ of 200 thousand transactions per second with bursts over 500 thousand. The project was also extended to support on-chain programs written in the C programming language and run concurrently in a safe execution environment called BPF.
|
|
||||||
|
|
||||||
## What is a Solana Cluster?
|
|
||||||
|
|
||||||
A cluster is a set of computers that work together and can be viewed from the outside as a single system. A Solana cluster is a set of independently owned computers working together \(and sometimes against each other\) to verify the output of untrusted, user-submitted programs. A Solana cluster can be utilized any time a user wants to preserve an immutable record of events in time or programmatic interpretations of those events. One use is to track which of the computers did meaningful work to keep the cluster running. Another use might be to track the possession of real-world assets. In each case, the cluster produces a record of events called the ledger. It will be preserved for the lifetime of the cluster. As long as someone somewhere in the world maintains a copy of the ledger, the output of its programs \(which may contain a record of who possesses what\) will forever be reproducible, independent of the organization that launched it.
|
|
||||||
|
|
||||||
## What are SOLs?
|
|
||||||
|
|
||||||
A SOL is the name of Solana's native token, which can be passed to nodes in a Solana cluster in exchange for running an on-chain program or validating its output. The system may perform micropayments of fractional SOLs, which are called _lamports_. They are named in honor of Solana's biggest technical influence, [Leslie Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value of 0.000000001 SOL.
|
|
@ -1,77 +0,0 @@
|
|||||||
# Offline Transaction Signing
|
|
||||||
|
|
||||||
Some security models require keeping signing keys, and thus the signing
|
|
||||||
process, separated from transaction creation and network broadcast. Examples
|
|
||||||
include:
|
|
||||||
* Collecting signatures from geographically disparate signers in a
|
|
||||||
[multi-signature scheme](../api-reference/cli.md#multiple-witnesses)
|
|
||||||
* Signing transactions using an [airgapped](https://en.wikipedia.org/wiki/Air_gap_(networking))
|
|
||||||
signing device
|
|
||||||
|
|
||||||
This document describes using Solana's CLI to separately sign and submit a
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
## Commands Supporting Offline Signing
|
|
||||||
|
|
||||||
At present, the following commands support offline signing:
|
|
||||||
* [`delegate-stake`](../api-reference/cli.md#solana-delegate-stake)
|
|
||||||
* [`deactivate-stake`](../api-reference/cli.md#solana-deactivate-stake)
|
|
||||||
* [`pay`](../api-reference/cli.md#solana-pay)
|
|
||||||
|
|
||||||
## Signing Transactions Offline
|
|
||||||
|
|
||||||
To sign a transaction offline, pass the following arguments on the command line
|
|
||||||
1) `--sign-only`, prevents the client from submitting the signed transaction
|
|
||||||
to the network. Instead, the pubkey/signature pairs are printed to stdout.
|
|
||||||
2) `--blockhash BASE58_HASH`, allows the caller to specify the value used to
|
|
||||||
fill the transaction's `recent_blockhash` field. This serves a number of
|
|
||||||
purposes, namely:
|
|
||||||
* Eliminates the need to connect to the network and query a recent blockhash
|
|
||||||
via RPC
|
|
||||||
* Enables the signers to coordinate the blockhash in a multiple-signature
|
|
||||||
scheme
|
|
||||||
|
|
||||||
### Example: Offline Signing a Payment
|
|
||||||
|
|
||||||
Command
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana@offline$ solana pay --sign-only --blockhash 5Tx8F3jgSHx21CbtjwmdaKPLM5tWmreWAnPrbqHomSJF \
|
|
||||||
recipient-keypair.json 1 SOL
|
|
||||||
```
|
|
||||||
|
|
||||||
Output
|
|
||||||
|
|
||||||
```text
|
|
||||||
|
|
||||||
Blockhash: 5Tx8F3jgSHx21CbtjwmdaKPLM5tWmreWAnPrbqHomSJF
|
|
||||||
Signers (Pubkey=Signature):
|
|
||||||
FhtzLVsmcV7S5XqGD79ErgoseCLhZYmEZnz9kQg1Rp7j=4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN
|
|
||||||
|
|
||||||
{"blockhash":"5Tx8F3jgSHx21CbtjwmdaKPLM5tWmreWAnPrbqHomSJF","signers":["FhtzLVsmcV7S5XqGD79ErgoseCLhZYmEZnz9kQg1Rp7j=4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN"]}'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Submitting Offline Signed Transactions to the Network
|
|
||||||
|
|
||||||
To submit a transaction that has been signed offline to the network, pass the
|
|
||||||
following arguments on the command line
|
|
||||||
1) `--blockhash BASE58_HASH`, must be the same blockhash as was used to sign
|
|
||||||
2) `--signer BASE58_PUBKEY=BASE58_SIGNATURE`, one for each offline signer. This
|
|
||||||
includes the pubkey/signature pairs directly in the transaction rather than
|
|
||||||
signing it with any local keypair(s)
|
|
||||||
|
|
||||||
### Example: Submitting an Offline Signed Payment
|
|
||||||
|
|
||||||
Command
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana@online$ solana pay --blockhash 5Tx8F3jgSHx21CbtjwmdaKPLM5tWmreWAnPrbqHomSJF \
|
|
||||||
--signer FhtzLVsmcV7S5XqGD79ErgoseCLhZYmEZnz9kQg1Rp7j=4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN
|
|
||||||
recipient-keypair.json 1 SOL
|
|
||||||
```
|
|
||||||
|
|
||||||
Output
|
|
||||||
|
|
||||||
```text
|
|
||||||
4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN
|
|
||||||
```
|
|
@ -1,51 +0,0 @@
|
|||||||
# Installation Guide
|
|
||||||
Follow this guide to setup Solana's key generation tool called `solana-keygen`
|
|
||||||
|
|
||||||
{% hint style="warn" %}
|
|
||||||
After installation, ensure your version is `0.21.1` or higher by running `solana-keygen -V`
|
|
||||||
{% endhint %}
|
|
||||||
|
|
||||||
## Download
|
|
||||||
First, download the latest release tarball from GitHub.
|
|
||||||
|
|
||||||
1. Setup download url
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana_downloads=https://github.com/solana-labs/solana/releases/latest/download
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Specify the download file based on your machine
|
|
||||||
|
|
||||||
**MacOS**
|
|
||||||
```bash
|
|
||||||
solana_release=solana-release-x86_64-apple-darwin.tar.bz2
|
|
||||||
```
|
|
||||||
|
|
||||||
**Linux**
|
|
||||||
```bash
|
|
||||||
solana_release=solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Download
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -L -sSf -o solana-release.tar.bz2 $solana_downloads/$solana_release
|
|
||||||
```
|
|
||||||
|
|
||||||
## Extract
|
|
||||||
Next, extract the tarball
|
|
||||||
```bash
|
|
||||||
tar xf solana-release.tar.bz2
|
|
||||||
```
|
|
||||||
|
|
||||||
## Add to "PATH"
|
|
||||||
Now add the tool to your PATH environment variable with the following command
|
|
||||||
```bash
|
|
||||||
export PATH="$(pwd)/solana-release/bin:${PATH}"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Check
|
|
||||||
Finally, check that `solana-keygen` can be run by running
|
|
||||||
```bash
|
|
||||||
solana-keygen -V
|
|
||||||
```
|
|
@ -1,71 +0,0 @@
|
|||||||
# Cross-Program Invocation
|
|
||||||
|
|
||||||
## Problem
|
|
||||||
|
|
||||||
In today's implementation a client can create a transaction that modifies two accounts, each owned by a separate on-chain program:
|
|
||||||
|
|
||||||
```text
|
|
||||||
let message = Message::new(vec![
|
|
||||||
token_instruction::pay(&alice_pubkey),
|
|
||||||
acme_instruction::launch_missiles(&bob_pubkey),
|
|
||||||
]);
|
|
||||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
|
||||||
```
|
|
||||||
|
|
||||||
The current implementation does not, however, allow the `acme` program to conveniently invoke `token` instructions on the client's behalf:
|
|
||||||
|
|
||||||
```text
|
|
||||||
let message = Message::new(vec![
|
|
||||||
acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey),
|
|
||||||
]);
|
|
||||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
|
||||||
```
|
|
||||||
|
|
||||||
Currently, there is no way to create instruction `pay_and_launch_missiles` that executes `token_instruction::pay` from the `acme` program. The workaround is to extend the `acme` program with the implementation of the `token` program, and create `token` accounts with `ACME_PROGRAM_ID`, which the `acme` program is permitted to modify. With that workaround, `acme` can modify token-like accounts created by the `acme` program, but not token accounts created by the `token` program.
|
|
||||||
|
|
||||||
## Proposed Solution
|
|
||||||
|
|
||||||
The goal of this design is to modify Solana's runtime such that an on-chain program can invoke an instruction from another program.
|
|
||||||
|
|
||||||
Given two on-chain programs `token` and `acme`, each implementing instructions `pay()` and `launch_missiles()` respectively, we would ideally like to implement the `acme` module with a call to a function defined in the `token` module:
|
|
||||||
|
|
||||||
```text
|
|
||||||
use token;
|
|
||||||
|
|
||||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
|
||||||
token::pay(&keyed_accounts[1..])?;
|
|
||||||
|
|
||||||
launch_missiles(keyed_accounts)?;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The above code would require that the `token` crate be dynamically linked, so that a custom linker could intercept calls and validate accesses to `keyed_accounts`. That is, even though the client intends to modify both `token` and `acme` accounts, only `token` program is permitted to modify the `token` account, and only the `acme` program is permitted to modify the `acme` account.
|
|
||||||
|
|
||||||
Backing off from that ideal cross-program call, a slightly more verbose solution is to expose token's existing `process_instruction()` entrypoint to the acme program:
|
|
||||||
|
|
||||||
```text
|
|
||||||
use token_instruction;
|
|
||||||
|
|
||||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
|
||||||
let alice_pubkey = keyed_accounts[1].key;
|
|
||||||
let instruction = token_instruction::pay(&alice_pubkey);
|
|
||||||
process_instruction(&instruction)?;
|
|
||||||
|
|
||||||
launch_missiles(keyed_accounts)?;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
where `process_instruction()` is built into Solana's runtime and responsible for routing the given instruction to the `token` program via the instruction's `program_id` field. Before invoking `pay()`, the runtime must also ensure that `acme` didn't modify any accounts owned by `token`. It does this by calling `runtime::verify_account_changes()` and then afterward updating all the `pre_*` variables to tentatively commit `acme`'s account modifications. After `pay()` completes, the runtime must again ensure that `token` didn't modify any accounts owned by `acme`. It should call `verify_account_changes()` again, but this time with the `token` program ID. Lastly, after `pay_and_launch_missiles()` completes, the runtime must call `verify_account_changes()` one more time, where it normally would, but using all updated `pre_*` variables. If executing `pay_and_launch_missiles()` up to `pay()` made no invalid account changes, `pay()` made no invalid changes, and executing from `pay()` until `pay_and_launch_missiles()` returns made no invalid changes, then the runtime can transitively assume `pay_and_launch_missiles()` as whole made no invalid account changes, and therefore commit all account modifications.
|
|
||||||
|
|
||||||
### Setting `KeyedAccount.is_signer`
|
|
||||||
|
|
||||||
When `process_instruction()` is invoked, the runtime must create a new `KeyedAccounts` parameter using the signatures from the _original_ transaction data. Since the `token` program is immutable and existed on-chain prior to the `acme` program, the runtime can safely treat the transaction signature as a signature of a transaction with a `token` instruction. When the runtime sees the given instruction references `alice_pubkey`, it looks up the key in the transaction to see if that key corresponds to a transaction signature. In this case it does and so sets `KeyedAccount.is_signer`, thereby authorizing the `token` program to modify Alice's account.
|
|
||||||
|
|
@ -1,137 +0,0 @@
|
|||||||
# Ledger Replication
|
|
||||||
|
|
||||||
Replication behavior yet to be implemented.
|
|
||||||
|
|
||||||
## Storage epoch
|
|
||||||
|
|
||||||
The storage epoch should be the number of slots which results in around 100GB-1TB of ledger to be generated for archivers to store. Archivers will start storing ledger when a given fork has a high probability of not being rolled back.
|
|
||||||
|
|
||||||
## Validator behavior
|
|
||||||
|
|
||||||
1. Every NUM\_KEY\_ROTATION\_TICKS it also validates samples received from
|
|
||||||
|
|
||||||
archivers. It signs the PoH hash at that point and uses the following
|
|
||||||
|
|
||||||
algorithm with the signature as the input:
|
|
||||||
|
|
||||||
* The low 5 bits of the first byte of the signature creates an index into
|
|
||||||
|
|
||||||
another starting byte of the signature.
|
|
||||||
|
|
||||||
* The validator then looks at the set of storage proofs where the byte of
|
|
||||||
|
|
||||||
the proof's sha state vector starting from the low byte matches exactly
|
|
||||||
|
|
||||||
with the chosen byte\(s\) of the signature.
|
|
||||||
|
|
||||||
* If the set of proofs is larger than the validator can handle, then it
|
|
||||||
|
|
||||||
increases to matching 2 bytes in the signature.
|
|
||||||
|
|
||||||
* Validator continues to increase the number of matching bytes until a
|
|
||||||
|
|
||||||
workable set is found.
|
|
||||||
|
|
||||||
* It then creates a mask of valid proofs and fake proofs and sends it to
|
|
||||||
|
|
||||||
the leader. This is a storage proof confirmation transaction.
|
|
||||||
|
|
||||||
2. After a lockout period of NUM\_SECONDS\_STORAGE\_LOCKOUT seconds, the
|
|
||||||
|
|
||||||
validator then submits a storage proof claim transaction which then causes the
|
|
||||||
|
|
||||||
distribution of the storage reward if no challenges were seen for the proof to
|
|
||||||
|
|
||||||
the validators and archivers party to the proofs.
|
|
||||||
|
|
||||||
## Archiver behavior
|
|
||||||
|
|
||||||
1. The archiver then generates another set of offsets which it submits a fake
|
|
||||||
|
|
||||||
proof with an incorrect sha state. It can be proven to be fake by providing the
|
|
||||||
|
|
||||||
seed for the hash result.
|
|
||||||
|
|
||||||
* A fake proof should consist of an archiver hash of a signature of a PoH
|
|
||||||
|
|
||||||
value. That way when the archiver reveals the fake proof, it can be
|
|
||||||
|
|
||||||
verified on chain.
|
|
||||||
|
|
||||||
2. The archiver monitors the ledger, if it sees a fake proof integrated, it
|
|
||||||
|
|
||||||
creates a challenge transaction and submits it to the current leader. The
|
|
||||||
|
|
||||||
transacation proves the validator incorrectly validated a fake storage proof.
|
|
||||||
|
|
||||||
The archiver is rewarded and the validator's staking balance is slashed or
|
|
||||||
|
|
||||||
frozen.
|
|
||||||
|
|
||||||
## Storage proof contract logic
|
|
||||||
|
|
||||||
Each archiver and validator will have their own storage account. The validator's account would be separate from their gossip id similiar to their vote account. These should be implemented as two programs one which handles the validator as the keysigner and one for the archiver. In that way when the programs reference other accounts, they can check the program id to ensure it is a validator or archiver account they are referencing.
|
|
||||||
|
|
||||||
### SubmitMiningProof
|
|
||||||
|
|
||||||
```text
|
|
||||||
SubmitMiningProof {
|
|
||||||
slot: u64,
|
|
||||||
sha_state: Hash,
|
|
||||||
signature: Signature,
|
|
||||||
};
|
|
||||||
keys = [archiver_keypair]
|
|
||||||
```
|
|
||||||
|
|
||||||
Archivers create these after mining their stored ledger data for a certain hash value. The slot is the end slot of the segment of ledger they are storing, the sha\_state the result of the archiver using the hash function to sample their encrypted ledger segment. The signature is the signature that was created when they signed a PoH value for the current storage epoch. The list of proofs from the current storage epoch should be saved in the account state, and then transfered to a list of proofs for the previous epoch when the epoch passes. In a given storage epoch a given archiver should only submit proofs for one segment.
|
|
||||||
|
|
||||||
The program should have a list of slots which are valid storage mining slots. This list should be maintained by keeping track of slots which are rooted slots in which a significant portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT number of slots would be added to this set. The program should check that the slot is in this set. The set can be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state.
|
|
||||||
|
|
||||||
The program should do a signature verify check on the signature, public key from the transaction submitter and the message of the previous storage epoch PoH value.
|
|
||||||
|
|
||||||
### ProofValidation
|
|
||||||
|
|
||||||
```text
|
|
||||||
ProofValidation {
|
|
||||||
proof_mask: Vec<ProofStatus>,
|
|
||||||
}
|
|
||||||
keys = [validator_keypair, archiver_keypair(s) (unsigned)]
|
|
||||||
```
|
|
||||||
|
|
||||||
A validator will submit this transaction to indicate that a set of proofs for a given segment are valid/not-valid or skipped where the validator did not look at it. The keypairs for the archivers that it looked at should be referenced in the keys so the program logic can go to those accounts and see that the proofs are generated in the previous epoch. The sampling of the storage proofs should be verified ensuring that the correct proofs are skipped by the validator according to the logic outlined in the validator behavior of sampling.
|
|
||||||
|
|
||||||
The included archiver keys will indicate the the storage samples which are being referenced; the length of the proof\_mask should be verified against the set of storage proofs in the referenced archiver account\(s\), and should match with the number of proofs submitted in the previous storage epoch in the state of said archiver account.
|
|
||||||
|
|
||||||
### ClaimStorageReward
|
|
||||||
|
|
||||||
```text
|
|
||||||
ClaimStorageReward {
|
|
||||||
}
|
|
||||||
keys = [validator_keypair or archiver_keypair, validator/archiver_keypairs (unsigned)]
|
|
||||||
```
|
|
||||||
|
|
||||||
Archivers and validators will use this transaction to get paid tokens from a program state where SubmitStorageProof, ProofValidation and ChallengeProofValidations are in a state where proofs have been submitted and validated and there are no ChallengeProofValidations referencing those proofs. For a validator, it should reference the archiver keypairs to which it has validated proofs in the relevant epoch. And for an archiver it should reference validator keypairs for which it has validated and wants to be rewarded.
|
|
||||||
|
|
||||||
### ChallengeProofValidation
|
|
||||||
|
|
||||||
```text
|
|
||||||
ChallengeProofValidation {
|
|
||||||
proof_index: u64,
|
|
||||||
hash_seed_value: Vec<u8>,
|
|
||||||
}
|
|
||||||
keys = [archiver_keypair, validator_keypair]
|
|
||||||
```
|
|
||||||
|
|
||||||
This transaction is for catching lazy validators who are not doing the work to validate proofs. An archiver will submit this transaction when it sees a validator has approved a fake SubmitMiningProof transaction. Since the archiver is a light client not looking at the full chain, it will have to ask a validator or some set of validators for this information maybe via RPC call to obtain all ProofValidations for a certain segment in the previous storage epoch. The program will look in the validator account state see that a ProofValidation is submitted in the previous storage epoch and hash the hash\_seed\_value and see that the hash matches the SubmitMiningProof transaction and that the validator marked it as valid. If so, then it will save the challenge to the list of challenges that it has in its state.
|
|
||||||
|
|
||||||
### AdvertiseStorageRecentBlockhash
|
|
||||||
|
|
||||||
```text
|
|
||||||
AdvertiseStorageRecentBlockhash {
|
|
||||||
hash: Hash,
|
|
||||||
slot: u64,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Validators and archivers will submit this to indicate that a new storage epoch has passed and that the storage proofs which are current proofs should now be for the previous epoch. Other transactions should check to see that the epoch that they are referencing is accurate according to current chain state.
|
|
||||||
|
|
@ -1,109 +0,0 @@
|
|||||||
# Simple Payment and State Verification
|
|
||||||
|
|
||||||
It is often useful to allow low resourced clients to participate in a Solana cluster. Be this participation economic or contract execution, verification that a client's activity has been accepted by the network is typically expensive. This proposal lays out a mechanism for such clients to confirm that their actions have been committed to the ledger state with minimal resource expenditure and third-party trust.
|
|
||||||
|
|
||||||
## A Naive Approach
|
|
||||||
|
|
||||||
Validators store the signatures of recently confirmed transactions for a short period of time to ensure that they are not processed more than once. Validators provide a JSON RPC endpoint, which clients can use to query the cluster if a transaction has been recently processed. Validators also provide a PubSub notification, whereby a client registers to be notified when a given signature is observed by the validator. While these two mechanisms allow a client to verify a payment, they are not a proof and rely on completely trusting a validator.
|
|
||||||
|
|
||||||
We will describe a way to minimize this trust using Merkle Proofs to anchor the validator's response in the ledger, allowing the client to confirm on their own that a sufficient number of their preferred validators have confirmed a transaction. Requiring multiple validator attestations further reduces trust in the validator, as it increases both the technical and economic difficulty of compromising several other network participants.
|
|
||||||
|
|
||||||
## Light Clients
|
|
||||||
|
|
||||||
A 'light client' is a cluster participant that does not itself run a validator. This light client would provide a level of security greater than trusting a remote validator, without requiring the light client to spend a lot of resources verifying the ledger.
|
|
||||||
|
|
||||||
Rather than providing transaction signatures directly to a light client, the validator instead generates a Merkle Proof from the transaction of interest to the root of a Merkle Tree of all transactions in the including block. This Merkle Root is stored in a ledger entry which is voted on by validators, providing it consensus legitimacy. The additional level of security for a light client depends on an initial canonical set of validators the light client considers to be the stakeholders of the cluster. As that set is changed, the client can update its internal set of known validators with [receipts](simple-payment-and-state-verification.md#receipts). This may become challenging with a large number of delegated stakes.
|
|
||||||
|
|
||||||
Validators themselves may want to use light client APIs for performance reasons. For example, during the initial launch of a validator, the validator may use a cluster provided checkpoint of the state and verify it with a receipt.
|
|
||||||
|
|
||||||
## Receipts
|
|
||||||
|
|
||||||
A receipt is a minimal proof that; a transaction has been included in a block, that the block has been voted on by the client's preferred set of validators and that the votes have reached the desired confirmation depth.
|
|
||||||
|
|
||||||
The receipts for both state and payments start with a Merkle Path from the value into a Bank-Merkle that has been voted on and included in the ledger. A chain of PoH Entries containing subsequent validator votes, deriving from the Bank-Merkle, is the confirmation proof.
|
|
||||||
|
|
||||||
Clients can examine this ledger data and compute the finality using Solana's fork selection rules.
|
|
||||||
|
|
||||||
### Payment Merkle Path
|
|
||||||
|
|
||||||
A payment receipt is a data structure that contains a Merkle Path from a transaction to the required set of validator votes.
|
|
||||||
|
|
||||||
An Entry-Merkle is a Merkle Root including all transactions in the entry, sorted by signature.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
A Block-Merkle is a Merkle root of all the Entry-Merkles sequenced in the block. Transaction status is necessary for the receipt because the state receipt is constructed for the block. Two transactions over the same state can appear in the block, and therefore, there is no way to infer from just the state whether a transaction that is committed to the ledger has succeeded or failed in modifying the intended state. It may not be necessary to encode the full status code, but a single status bit to indicate the transaction's success.
|
|
||||||
|
|
||||||
### State Merkle Path
|
|
||||||
|
|
||||||
A state receipt provides a confirmation that a specific state is committed at the end of the block. Inter-block state transitions do not generate a receipt.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
* A sends 5 Lamports to B
|
|
||||||
* B spends 5 Lamports
|
|
||||||
* C sends 5 Lamports to A
|
|
||||||
|
|
||||||
At the end of the block, A and B are in the exact same starting state, and any state receipt would point to the same value for A or B.
|
|
||||||
|
|
||||||
The Bank-Merkle is computed from the Merkle Tree of the new state changes, along with the Previous Bank-Merkle, and the Block-Merkle.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
A state receipt contains only the state changes occurring in the block. A direct Merkle Path to the current Bank-Merkle guarantees the state value at that bank hash, but it cannot be used to generate a “current” receipt to the latest state if the state modification occurred in some previous block. There is no guarantee that the path provided by the validator is the latest one available out of all the previous Bank-Merkles.
|
|
||||||
|
|
||||||
Clients that want to query the chain for a receipt of the "latest" state would need to create a transaction that would update the Merkle Path for that account, such as a credit of 0 Lamports.
|
|
||||||
|
|
||||||
### Validator Votes
|
|
||||||
|
|
||||||
Leaders should coalesce the validator votes by stake weight into a single entry. This will reduce the number of entries necessary to create a receipt.
|
|
||||||
|
|
||||||
### Chain of Entries
|
|
||||||
|
|
||||||
A receipt has a PoH link from the payment or state Merkle Path root to a list of consecutive validation votes.
|
|
||||||
|
|
||||||
It contains the following:
|
|
||||||
|
|
||||||
* State -> Bank-Merkle
|
|
||||||
|
|
||||||
or
|
|
||||||
|
|
||||||
* Transaction -> Entry-Merkle -> Block-Merkle -> Bank-Merkle
|
|
||||||
|
|
||||||
And a vector of PoH entries:
|
|
||||||
|
|
||||||
* Validator vote entries
|
|
||||||
* Ticks
|
|
||||||
* Light entries
|
|
||||||
|
|
||||||
```text
|
|
||||||
/// This Entry definition skips over the transactions and only contains the
|
|
||||||
/// hash of the transactions used to modify PoH.
|
|
||||||
LightEntry {
|
|
||||||
/// The number of hashes since the previous Entry ID.
|
|
||||||
pub num_hashes: u64,
|
|
||||||
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
|
|
||||||
hash: Hash,
|
|
||||||
/// The Merkle Root of the transactions encoded into the Entry.
|
|
||||||
entry_hash: Hash,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The light entries are reconstructed from Entries and simply show the entry Merkle Root that was mixed in to the PoH hash, instead of the full transaction set.
|
|
||||||
|
|
||||||
Clients do not need the starting vote state. The [fork selection](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/fork-selection.md) algorithm is defined such that only votes that appear after the transaction provide finality for the transaction, and finality is independent of the starting state.
|
|
||||||
|
|
||||||
### Verification
|
|
||||||
|
|
||||||
A light client that is aware of the supermajority set validators can verify a receipt by following the Merkle Path to the PoH chain. The Bank-Merkle is the Merkle Root and will appear in votes included in an Entry. The light client can simulate [fork selection](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/fork-selection.md) for the consecutive votes and verify that the receipt is confirmed at the desired lockout threshold.
|
|
||||||
|
|
||||||
### Synthetic State
|
|
||||||
|
|
||||||
Synthetic state should be computed into the Bank-Merkle along with the bank generated state.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
* Epoch validator accounts and their stakes and weights.
|
|
||||||
* Computed fee rates
|
|
||||||
|
|
||||||
These values should have an entry in the Bank-Merkle. They should live under known accounts, and therefore have an exact address in the Merkle Path.
|
|
@ -1,35 +0,0 @@
|
|||||||
# Validator
|
|
||||||
|
|
||||||
## History
|
|
||||||
|
|
||||||
When we first started Solana, the goal was to de-risk our TPS claims. We knew that between optimistic concurrency control and sufficiently long leader slots, that PoS consensus was not the biggest risk to TPS. It was GPU-based signature verification, software pipelining and concurrent banking. Thus, the TPU was born. After topping 100k TPS, we split the team into one group working toward 710k TPS and another to flesh out the validator pipeline. Hence, the TVU was born. The current architecture is a consequence of incremental development with that ordering and project priorities. It is not a reflection of what we ever believed was the most technically elegant cross-section of those technologies. In the context of leader rotation, the strong distinction between leading and validating is blurred.
|
|
||||||
|
|
||||||
## Difference between validating and leading
|
|
||||||
|
|
||||||
The fundamental difference between the pipelines is when the PoH is present. In a leader, we process transactions, removing bad ones, and then tag the result with a PoH hash. In the validator, we verify that hash, peel it off, and process the transactions in exactly the same way. The only difference is that if a validator sees a bad transaction, it can't simply remove it like the leader does, because that would cause the PoH hash to change. Instead, it rejects the whole block. The other difference between the pipelines is what happens _after_ banking. The leader broadcasts entries to downstream validators whereas the validator will have already done that in RetransmitStage, which is a confirmation time optimization. The validation pipeline, on the other hand, has one last step. Any time it finishes processing a block, it needs to weigh any forks it's observing, possibly cast a vote, and if so, reset its PoH hash to the block hash it just voted on.
|
|
||||||
|
|
||||||
## Proposed Design
|
|
||||||
|
|
||||||
We unwrap the many abstraction layers and build a single pipeline that can toggle leader mode on whenever the validator's ID shows up in the leader schedule.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Notable changes
|
|
||||||
|
|
||||||
* No threads are shut down to switch out of leader mode. Instead, FetchStage
|
|
||||||
|
|
||||||
should forward transactions to the next leader.
|
|
||||||
|
|
||||||
* Hoist FetchStage and BroadcastStage out of TPU
|
|
||||||
* Blocktree renamed to Blockstore
|
|
||||||
* BankForks renamed to Banktree
|
|
||||||
* TPU moves to new socket-free crate called solana-tpu.
|
|
||||||
* TPU's BankingStage absorbs ReplayStage
|
|
||||||
* TVU goes away
|
|
||||||
* New RepairStage absorbs Shred Fetch Stage and repair requests
|
|
||||||
* JSON RPC Service is optional - used for debugging. It should instead be part
|
|
||||||
|
|
||||||
of a separate `solana-blockstreamer` executable.
|
|
||||||
|
|
||||||
* New MulticastStage absorbs retransmit part of RetransmitStage
|
|
||||||
* MulticastStage downstream of Blockstore
|
|
@ -1,156 +0,0 @@
|
|||||||
# Running an Archiver
|
|
||||||
|
|
||||||
This document describes how to setup an archiver in the testnet
|
|
||||||
|
|
||||||
Please note some of the information and instructions described here may change in future releases.
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Archivers are specialized light clients. They download a part of the ledger \(a.k.a Segment\) and store it. They earn rewards for storing segments.
|
|
||||||
|
|
||||||
The testnet features a validator running at testnet.solana.com, which serves as the entrypoint to the cluster for your archiver node.
|
|
||||||
|
|
||||||
Additionally there is a blockexplorer available at [http://testnet.solana.com/](http://testnet.solana.com/).
|
|
||||||
|
|
||||||
The testnet is configured to reset the ledger daily, or sooner should the hourly automated cluster sanity test fail.
|
|
||||||
|
|
||||||
## Machine Requirements
|
|
||||||
|
|
||||||
Archivers don't need specialized hardware. Anything with more than 128GB of disk space will be able to participate in the cluster as an archiver node.
|
|
||||||
|
|
||||||
Currently the disk space requirements are very low but we expect them to change in the future.
|
|
||||||
|
|
||||||
Prebuilt binaries are available for Linux x86\_64 \(Ubuntu 18.04 recommended\), macOS, and Windows.
|
|
||||||
|
|
||||||
### Confirm The Testnet Is Reachable
|
|
||||||
|
|
||||||
Before starting an archiver node, sanity check that the cluster is accessible to your machine by running some simple commands. If any of the commands fail, please retry 5-10 minutes later to confirm the testnet is not just restarting itself before debugging further.
|
|
||||||
|
|
||||||
Fetch the current transaction count over JSON RPC:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
|
||||||
```
|
|
||||||
|
|
||||||
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
|
|
||||||
|
|
||||||
View the [metrics dashboard](https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet) for more detail on cluster activity.
|
|
||||||
|
|
||||||
## Archiver Setup
|
|
||||||
|
|
||||||
#### Obtaining The Software
|
|
||||||
|
|
||||||
#### Bootstrap with `solana-install`
|
|
||||||
|
|
||||||
The `solana-install` tool can be used to easily install and upgrade the cluster software.
|
|
||||||
|
|
||||||
#### Linux and mac OS
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana-install init
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Windows
|
|
||||||
|
|
||||||
Download and install **solana-install-init** from [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest)
|
|
||||||
|
|
||||||
After a successful install, `solana-install update` may be used to easily update the software to a newer version at any time.
|
|
||||||
|
|
||||||
#### Download Prebuilt Binaries
|
|
||||||
|
|
||||||
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
|
|
||||||
|
|
||||||
#### Linux
|
|
||||||
|
|
||||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
|
||||||
cd solana-release/
|
|
||||||
export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
#### mac OS
|
|
||||||
|
|
||||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
|
||||||
cd solana-release/
|
|
||||||
export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Windows
|
|
||||||
|
|
||||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-pc-windows-msvc.tar.bz2**, then extract it into a folder. It is a good idea to add this extracted folder to your windows PATH.
|
|
||||||
|
|
||||||
## Starting The Archiver
|
|
||||||
|
|
||||||
Try running following command to join the gossip network and view all the other nodes in the cluster:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
|
||||||
# Press ^C to exit
|
|
||||||
```
|
|
||||||
|
|
||||||
Now configure the keypairs for your archiver by running:
|
|
||||||
|
|
||||||
Navigate to the solana install location and open a cmd prompt
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana-keygen new -o archiver-keypair.json
|
|
||||||
solana-keygen new -o storage-keypair.json
|
|
||||||
```
|
|
||||||
|
|
||||||
Use solana-keygen to show the public keys for each of the keypairs, they will be needed in the next step:
|
|
||||||
|
|
||||||
* Windows
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# The archiver's identity
|
|
||||||
solana-keygen pubkey archiver-keypair.json
|
|
||||||
solana-keygen pubkey storage-keypair.json
|
|
||||||
```
|
|
||||||
|
|
||||||
* Linux and mac OS
|
|
||||||
|
|
||||||
\`\`\`bash
|
|
||||||
|
|
||||||
export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
|
|
||||||
|
|
||||||
export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
|
|
||||||
|
|
||||||
```text
|
|
||||||
Then set up the storage accounts for your archiver by running:
|
|
||||||
```bash
|
|
||||||
solana --keypair archiver-keypair.json airdrop 100000 lamports
|
|
||||||
solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
|
|
||||||
```
|
|
||||||
|
|
||||||
Note: Every time the testnet restarts, run the steps to setup the archiver accounts again.
|
|
||||||
|
|
||||||
To start the archiver:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana-archiver --entrypoint testnet.solana.com:8001 --identity-keypair archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
|
|
||||||
```
|
|
||||||
|
|
||||||
## Verify Archiver Setup
|
|
||||||
|
|
||||||
From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
|
||||||
```
|
|
||||||
|
|
||||||
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
|
||||||
```
|
|
@ -1,34 +0,0 @@
|
|||||||
# Running a Validator
|
|
||||||
|
|
||||||
This document describes how to participate in the Solana testnet as a validator
|
|
||||||
node.
|
|
||||||
|
|
||||||
Please note some of the information and instructions described here may change
|
|
||||||
in future releases, and documentation will be updated for mainnet participation.
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Solana currently maintains several testnets, each featuring a validator that can
|
|
||||||
serve as the entrypoint to the cluster for your validator.
|
|
||||||
|
|
||||||
Current testnet entrypoints:
|
|
||||||
|
|
||||||
* Stable, testnet.solana.com
|
|
||||||
* Beta, beta.testnet.solana.com
|
|
||||||
* Edge, edge.testnet.solana.com
|
|
||||||
|
|
||||||
Solana may launch special testnets for validator participation; we will provide
|
|
||||||
you with a specific entrypoint URL to use.
|
|
||||||
|
|
||||||
Prior to mainnet, the testnets may be running different versions of solana
|
|
||||||
software, which may feature breaking changes. For information on choosing a
|
|
||||||
testnet and finding software version info, jump to [Choosing a Testnet](validator-testnet.md).
|
|
||||||
|
|
||||||
The testnets are configured to reset the ledger daily, or sooner, should the
|
|
||||||
hourly automated cluster sanity test fail.
|
|
||||||
|
|
||||||
There is a network explorer that shows the status of solana testnets available
|
|
||||||
at [http://explorer.solana.com/](https://explorer.solana.com/).
|
|
||||||
|
|
||||||
Also we'd love it if you choose to register your validator node with us at
|
|
||||||
[https://forms.gle/LfFscZqJELbuUP139](https://forms.gle/LfFscZqJELbuUP139).
|
|
@ -1,79 +0,0 @@
|
|||||||
# Installing the Validator Software
|
|
||||||
|
|
||||||
Install the Solana release
|
|
||||||
[v0.21.0](https://github.com/solana-labs/solana/releases/tag/v0.21.0) on your
|
|
||||||
machine by running:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.21.0/install/solana-install-init.sh | sh -s - 0.21.0
|
|
||||||
```
|
|
||||||
|
|
||||||
If you are connecting to a different testnet, you can replace `0.21.0` with the
|
|
||||||
release tag matching the software version of your desired testnet, or replace it
|
|
||||||
with the named channel `stable`, `beta`, or `edge`.
|
|
||||||
|
|
||||||
The following output indicates a successful update:
|
|
||||||
|
|
||||||
```text
|
|
||||||
looking for latest release
|
|
||||||
downloading v0.21.0 installer
|
|
||||||
Configuration: /home/solana/.config/solana/install/config.yml
|
|
||||||
Active release directory: /home/solana/.local/share/solana/install/active_release
|
|
||||||
* Release version: 0.21.0
|
|
||||||
* Release URL: https://github.com/solana-labs/solana/releases/download/v0.21.0/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
|
||||||
Update successful
|
|
||||||
```
|
|
||||||
|
|
||||||
After a successful install, `solana-install update` may be used to easily update
|
|
||||||
the cluster software to a newer version at any time.
|
|
||||||
|
|
||||||
## Download Prebuilt Binaries
|
|
||||||
|
|
||||||
If you would rather not use `solana-install` to manage the install, you can
|
|
||||||
manually download and install the binaries.
|
|
||||||
|
|
||||||
### Linux
|
|
||||||
|
|
||||||
Download the binaries by navigating to
|
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
|
||||||
download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the
|
|
||||||
archive:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
|
||||||
cd solana-release/
|
|
||||||
export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
### macOS
|
|
||||||
|
|
||||||
Download the binaries by navigating to
|
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
|
||||||
download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the
|
|
||||||
archive:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
|
||||||
cd solana-release/
|
|
||||||
export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
## Build From Source
|
|
||||||
|
|
||||||
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
|
||||||
from source, navigate to
|
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
|
||||||
and download the **Source Code** archive. Extract the code and build the
|
|
||||||
binaries with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/cargo-install-all.sh .
|
|
||||||
export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
You can then run the following command to obtain the same result as with
|
|
||||||
prebuilt binaries:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
solana-install init
|
|
||||||
```
|
|
@ -1,40 +0,0 @@
|
|||||||
# Choosing a Testnet
|
|
||||||
|
|
||||||
Solana maintains several testnets, each featuring a Solana-owned validator
|
|
||||||
that serves as an entrypoint to the cluster.
|
|
||||||
|
|
||||||
Current testnet entrypoints:
|
|
||||||
|
|
||||||
* Stable: testnet.solana.com
|
|
||||||
* Beta: beta.testnet.solana.com
|
|
||||||
|
|
||||||
Application developers should target the Stable testnet. Key differences
|
|
||||||
between the Stable testnet and what will be mainnet:
|
|
||||||
|
|
||||||
* Stable testnet tokens are not real
|
|
||||||
* Stable testnet includes a token faucet for application testing
|
|
||||||
* Stable testnet may be subject to ledger resets
|
|
||||||
* Stable testnet typically runs a newer software version than mainnet
|
|
||||||
* Stable testnet may be maintained by different validators than mainnet
|
|
||||||
|
|
||||||
The Beta testnet is used to showcase and stabilize new features before they
|
|
||||||
are tagged for release. Application developers are free to target the Beta
|
|
||||||
testnet, but should expect instability and periodic ledger resets. Regarding
|
|
||||||
stability, all that can be said is that CI automation was successful.
|
|
||||||
|
|
||||||
### Get Testnet Version
|
|
||||||
|
|
||||||
You can submit a JSON-RPC request to see the specific software version of the
|
|
||||||
cluster. Use this to specify [the software version to install](validator-software.md).
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' testnet.solana.com:8899
|
|
||||||
```
|
|
||||||
Example result:
|
|
||||||
`{"jsonrpc":"2.0","result":{"solana-core":"0.21.0"},"id":1}`
|
|
||||||
|
|
||||||
## Using a Different Testnet
|
|
||||||
|
|
||||||
This guide is written in the context of testnet.solana.com, our most stable
|
|
||||||
cluster. To participate in another testnet, modify the commands in the following
|
|
||||||
pages, replacing `testnet.solana.com` with your desired testnet.
|
|
@ -1,17 +0,0 @@
|
|||||||
# Anatomy of a Transaction
|
|
||||||
|
|
||||||
Transactions encode lists of instructions that are executed sequentially, and only committed if all the instructions complete successfully. All account updates are reverted upon the failure of a transaction. Each transaction details the accounts used, including which must sign and which are read only, a recent blockhash, the instructions, and any signatures.
|
|
||||||
|
|
||||||
## Accounts and Signatures
|
|
||||||
|
|
||||||
Each transaction explicitly lists all account public keys referenced by the transaction's instructions. A subset of those public keys are each accompanied by a transaction signature. Those signatures signal on-chain programs that the account holder has authorized the transaction. Typically, the program uses the authorization to permit debiting the account or modifying its data.
|
|
||||||
|
|
||||||
The transaction also marks some accounts as _read-only accounts_. The runtime permits read-only accounts to be read concurrently. If a program attempts to modify a read-only account, the transaction is rejected by the runtime.
|
|
||||||
|
|
||||||
## Recent Blockhash
|
|
||||||
|
|
||||||
A Transaction includes a recent blockhash to prevent duplication and to give transactions lifetimes. Any transaction that is completely identical to a previous one is rejected, so adding a newer blockhash allows multiple transactions to repeat the exact same action. Transactions also have lifetimes that are defined by the blockhash, as any transaction whose blockhash is too old will be rejected.
|
|
||||||
|
|
||||||
## Instructions
|
|
||||||
|
|
||||||
Each instruction specifies a single program account \(which must be marked executable\), a subset of the transaction's accounts that should be passed to the program, and a data byte array instruction that is passed to the program. The program interprets the data array and operates on the accounts specified by the instructions. The program can return successfully, or with an error code. An error return causes the entire transaction to fail immediately.
|
|
@ -1,4 +0,0 @@
|
|||||||
# TVU
|
|
||||||
|
|
||||||

|
|
||||||
|
|
@ -1,90 +0,0 @@
|
|||||||
# Blocktree
|
|
||||||
|
|
||||||
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized.
|
|
||||||
|
|
||||||
The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
|
|
||||||
|
|
||||||
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
|
|
||||||
|
|
||||||
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree.
|
|
||||||
|
|
||||||
## Functionalities of Blocktree
|
|
||||||
|
|
||||||
1. Persistence: the Blocktree lives in the front of the nodes verification
|
|
||||||
|
|
||||||
pipeline, right behind network receive and signature verification. If the
|
|
||||||
|
|
||||||
shred received is consistent with the leader schedule \(i.e. was signed by the
|
|
||||||
|
|
||||||
leader for the indicated slot\), it is immediately stored.
|
|
||||||
|
|
||||||
2. Repair: repair is the same as window repair above, but able to serve any
|
|
||||||
|
|
||||||
shred that's been received. Blocktree stores shreds with signatures,
|
|
||||||
|
|
||||||
preserving the chain of origination.
|
|
||||||
|
|
||||||
3. Forks: Blocktree supports random access of shreds, so can support a
|
|
||||||
|
|
||||||
validator's need to rollback and replay from a Bank checkpoint.
|
|
||||||
|
|
||||||
4. Restart: with proper pruning/culling, the Blocktree can be replayed by
|
|
||||||
|
|
||||||
ordered enumeration of entries from slot 0. The logic of the replay stage
|
|
||||||
|
|
||||||
\(i.e. dealing with forks\) will have to be used for the most recent entries in
|
|
||||||
|
|
||||||
the Blocktree.
|
|
||||||
|
|
||||||
## Blocktree Design
|
|
||||||
|
|
||||||
1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
|
|
||||||
2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing:
|
|
||||||
* `slot_index` - The index of this slot
|
|
||||||
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
|
|
||||||
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
|
|
||||||
* `received` - The highest received shred index for the slot
|
|
||||||
* `next_slots` - A list of future slots this slot could chain to. Used when rebuilding
|
|
||||||
|
|
||||||
the ledger to find possible fork points.
|
|
||||||
|
|
||||||
* `last_index` - The index of the shred that is flagged as the last shred for this slot. This flag on a shred will be set by the leader for a slot when they are transmitting the last shred for a slot.
|
|
||||||
* `is_rooted` - True iff every block from 0...slot forms a full sequence without any holes. We can derive is\_rooted for each slot with the following rules. Let slot\(n\) be the slot with index `n`, and slot\(n\).is\_full\(\) is true if the slot with index `n` has all the ticks expected for that slot. Let is\_rooted\(n\) be the statement that "the slot\(n\).is\_rooted is true". Then:
|
|
||||||
|
|
||||||
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
|
|
||||||
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
|
|
||||||
4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details.
|
|
||||||
5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
|
|
||||||
|
|
||||||
## Blocktree APIs
|
|
||||||
|
|
||||||
The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
|
|
||||||
|
|
||||||
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
|
|
||||||
|
|
||||||
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree.
|
|
||||||
|
|
||||||
## Interfacing with Bank
|
|
||||||
|
|
||||||
The bank exposes to replay stage:
|
|
||||||
|
|
||||||
1. `prev_hash`: which PoH chain it's working on as indicated by the hash of the last
|
|
||||||
|
|
||||||
entry it processed
|
|
||||||
|
|
||||||
2. `tick_height`: the ticks in the PoH chain currently being verified by this
|
|
||||||
|
|
||||||
bank
|
|
||||||
|
|
||||||
3. `votes`: a stack of records that contain: 1. `prev_hashes`: what anything after this vote must chain to in PoH 2. `tick_height`: the tick height at which this vote was cast 3. `lockout period`: how long a chain must be observed to be in the ledger to
|
|
||||||
|
|
||||||
be able to be chained below this vote
|
|
||||||
|
|
||||||
Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
|
|
||||||
|
|
||||||
## Pruning Blocktree
|
|
||||||
|
|
||||||
Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged.
|
|
||||||
|
|
||||||
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "solana-chacha-sys"
|
|
||||||
version = "0.22.0"
|
|
||||||
description = "Solana chacha-sys"
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
cc = "1.0.48"
|
|
@ -1,8 +0,0 @@
|
|||||||
extern crate cc;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
cc::Build::new()
|
|
||||||
.file("cpu-crypt/chacha20_core.c")
|
|
||||||
.file("cpu-crypt/chacha_cbc.c")
|
|
||||||
.compile("libcpu-crypt");
|
|
||||||
}
|
|
1
chacha-sys/cpu-crypt/.gitignore
vendored
1
chacha-sys/cpu-crypt/.gitignore
vendored
@ -1 +0,0 @@
|
|||||||
release/
|
|
@ -1,25 +0,0 @@
|
|||||||
V:=debug
|
|
||||||
|
|
||||||
LIB:=cpu-crypt
|
|
||||||
|
|
||||||
CFLAGS_common:=-Wall -Werror -pedantic -fPIC
|
|
||||||
CFLAGS_release:=-march=native -O3 $(CFLAGS_common)
|
|
||||||
CFLAGS_debug:=-g $(CFLAGS_common)
|
|
||||||
CFLAGS:=$(CFLAGS_$V)
|
|
||||||
|
|
||||||
all: $V/lib$(LIB).a
|
|
||||||
|
|
||||||
$V/chacha20_core.o: chacha20_core.c chacha.h
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$V/chacha_cbc.o: chacha_cbc.c chacha.h
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$V/lib$(LIB).a: $V/chacha20_core.o $V/chacha_cbc.o
|
|
||||||
$(AR) rcs $@ $^
|
|
||||||
|
|
||||||
.PHONY:clean
|
|
||||||
clean:
|
|
||||||
rm -rf $V
|
|
@ -1,35 +0,0 @@
|
|||||||
#ifndef HEADER_CHACHA_H
|
|
||||||
# define HEADER_CHACHA_H
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
#include <inttypes.h>
|
|
||||||
# include <stddef.h>
|
|
||||||
# ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
# endif
|
|
||||||
|
|
||||||
typedef unsigned int u32;
|
|
||||||
|
|
||||||
#define CHACHA_KEY_SIZE 32
|
|
||||||
#define CHACHA_NONCE_SIZE 12
|
|
||||||
#define CHACHA_BLOCK_SIZE 64
|
|
||||||
#define CHACHA_ROUNDS 500
|
|
||||||
|
|
||||||
void chacha20_encrypt(const u32 input[16],
|
|
||||||
unsigned char output[64],
|
|
||||||
int num_rounds);
|
|
||||||
|
|
||||||
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
|
||||||
const uint8_t key[CHACHA_KEY_SIZE], const uint8_t nonce[CHACHA_NONCE_SIZE],
|
|
||||||
uint32_t counter);
|
|
||||||
|
|
||||||
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
|
||||||
uint32_t len, const uint8_t* key,
|
|
||||||
unsigned char* ivec);
|
|
||||||
|
|
||||||
|
|
||||||
# ifdef __cplusplus
|
|
||||||
}
|
|
||||||
# endif
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,102 +0,0 @@
|
|||||||
#include "chacha.h"
|
|
||||||
|
|
||||||
#define ROTL32(v, n) (((v) << (n)) | ((v) >> (32 - (n))))
|
|
||||||
|
|
||||||
#define ROTATE(v, c) ROTL32((v), (c))
|
|
||||||
|
|
||||||
#define XOR(v, w) ((v) ^ (w))
|
|
||||||
|
|
||||||
#define PLUS(x, y) ((x) + (y))
|
|
||||||
|
|
||||||
#define U32TO8_LITTLE(p, v) \
|
|
||||||
{ (p)[0] = ((v) ) & 0xff; (p)[1] = ((v) >> 8) & 0xff; \
|
|
||||||
(p)[2] = ((v) >> 16) & 0xff; (p)[3] = ((v) >> 24) & 0xff; }
|
|
||||||
|
|
||||||
#define U8TO32_LITTLE(p) \
|
|
||||||
(((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
|
|
||||||
((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
|
|
||||||
|
|
||||||
#define QUARTERROUND(a,b,c,d) \
|
|
||||||
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \
|
|
||||||
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \
|
|
||||||
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \
|
|
||||||
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7);
|
|
||||||
|
|
||||||
// sigma contains the ChaCha constants, which happen to be an ASCII string.
|
|
||||||
static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3',
|
|
||||||
'2', '-', 'b', 'y', 't', 'e', ' ', 'k' };
|
|
||||||
|
|
||||||
void chacha20_encrypt(const u32 input[16],
|
|
||||||
unsigned char output[64],
|
|
||||||
int num_rounds)
|
|
||||||
{
|
|
||||||
u32 x[16];
|
|
||||||
int i;
|
|
||||||
memcpy(x, input, sizeof(u32) * 16);
|
|
||||||
for (i = num_rounds; i > 0; i -= 2) {
|
|
||||||
QUARTERROUND( 0, 4, 8,12)
|
|
||||||
QUARTERROUND( 1, 5, 9,13)
|
|
||||||
QUARTERROUND( 2, 6,10,14)
|
|
||||||
QUARTERROUND( 3, 7,11,15)
|
|
||||||
QUARTERROUND( 0, 5,10,15)
|
|
||||||
QUARTERROUND( 1, 6,11,12)
|
|
||||||
QUARTERROUND( 2, 7, 8,13)
|
|
||||||
QUARTERROUND( 3, 4, 9,14)
|
|
||||||
}
|
|
||||||
for (i = 0; i < 16; ++i) {
|
|
||||||
x[i] = PLUS(x[i], input[i]);
|
|
||||||
}
|
|
||||||
for (i = 0; i < 16; ++i) {
|
|
||||||
U32TO8_LITTLE(output + 4 * i, x[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
|
||||||
const uint8_t key[CHACHA_KEY_SIZE],
|
|
||||||
const uint8_t nonce[CHACHA_NONCE_SIZE],
|
|
||||||
uint32_t counter)
|
|
||||||
{
|
|
||||||
uint32_t input[16];
|
|
||||||
uint8_t buf[64];
|
|
||||||
size_t todo, i;
|
|
||||||
|
|
||||||
input[0] = U8TO32_LITTLE(sigma + 0);
|
|
||||||
input[1] = U8TO32_LITTLE(sigma + 4);
|
|
||||||
input[2] = U8TO32_LITTLE(sigma + 8);
|
|
||||||
input[3] = U8TO32_LITTLE(sigma + 12);
|
|
||||||
|
|
||||||
input[4] = U8TO32_LITTLE(key + 0);
|
|
||||||
input[5] = U8TO32_LITTLE(key + 4);
|
|
||||||
input[6] = U8TO32_LITTLE(key + 8);
|
|
||||||
input[7] = U8TO32_LITTLE(key + 12);
|
|
||||||
|
|
||||||
input[8] = U8TO32_LITTLE(key + 16);
|
|
||||||
input[9] = U8TO32_LITTLE(key + 20);
|
|
||||||
input[10] = U8TO32_LITTLE(key + 24);
|
|
||||||
input[11] = U8TO32_LITTLE(key + 28);
|
|
||||||
|
|
||||||
input[12] = counter;
|
|
||||||
input[13] = U8TO32_LITTLE(nonce + 0);
|
|
||||||
input[14] = U8TO32_LITTLE(nonce + 4);
|
|
||||||
input[15] = U8TO32_LITTLE(nonce + 8);
|
|
||||||
|
|
||||||
while (in_len > 0) {
|
|
||||||
todo = sizeof(buf);
|
|
||||||
if (in_len < todo) {
|
|
||||||
todo = in_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
chacha20_encrypt(input, buf, 20);
|
|
||||||
for (i = 0; i < todo; i++) {
|
|
||||||
out[i] = in[i] ^ buf[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
out += todo;
|
|
||||||
in += todo;
|
|
||||||
in_len -= todo;
|
|
||||||
|
|
||||||
input[12]++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
@ -1,72 +0,0 @@
|
|||||||
#include "chacha.h"
|
|
||||||
|
|
||||||
#if !defined(STRICT_ALIGNMENT) && !defined(PEDANTIC)
|
|
||||||
# define STRICT_ALIGNMENT 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
|
||||||
uint32_t len, const uint8_t* key,
|
|
||||||
unsigned char* ivec)
|
|
||||||
{
|
|
||||||
size_t n;
|
|
||||||
unsigned char *iv = ivec;
|
|
||||||
(void)key;
|
|
||||||
|
|
||||||
if (len == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !defined(OPENSSL_SMALL_FOOTPRINT)
|
|
||||||
if (STRICT_ALIGNMENT &&
|
|
||||||
((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
|
|
||||||
while (len >= CHACHA_BLOCK_SIZE) {
|
|
||||||
for (n = 0; n < CHACHA_BLOCK_SIZE; ++n) {
|
|
||||||
out[n] = in[n] ^ iv[n];
|
|
||||||
//printf("%x ", out[n]);
|
|
||||||
}
|
|
||||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
|
||||||
iv = out;
|
|
||||||
len -= CHACHA_BLOCK_SIZE;
|
|
||||||
in += CHACHA_BLOCK_SIZE;
|
|
||||||
out += CHACHA_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (len >= CHACHA_BLOCK_SIZE) {
|
|
||||||
for (n = 0; n < CHACHA_BLOCK_SIZE; n += sizeof(size_t)) {
|
|
||||||
*(size_t *)(out + n) =
|
|
||||||
*(size_t *)(in + n) ^ *(size_t *)(iv + n);
|
|
||||||
//printf("%zu ", *(size_t *)(iv + n));
|
|
||||||
}
|
|
||||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
|
||||||
iv = out;
|
|
||||||
len -= CHACHA_BLOCK_SIZE;
|
|
||||||
in += CHACHA_BLOCK_SIZE;
|
|
||||||
out += CHACHA_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
while (len) {
|
|
||||||
for (n = 0; n < CHACHA_BLOCK_SIZE && n < len; ++n) {
|
|
||||||
out[n] = in[n] ^ iv[n];
|
|
||||||
}
|
|
||||||
for (; n < CHACHA_BLOCK_SIZE; ++n) {
|
|
||||||
out[n] = iv[n];
|
|
||||||
}
|
|
||||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
|
||||||
iv = out;
|
|
||||||
if (len <= CHACHA_BLOCK_SIZE) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
len -= CHACHA_BLOCK_SIZE;
|
|
||||||
in += CHACHA_BLOCK_SIZE;
|
|
||||||
out += CHACHA_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
memcpy(ivec, iv, CHACHA_BLOCK_SIZE);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void chacha20_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t in_len,
|
|
||||||
const uint8_t key[CHACHA_KEY_SIZE], uint8_t* ivec)
|
|
||||||
{
|
|
||||||
chacha20_cbc128_encrypt(in, out, in_len, key, ivec);
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
extern "C" {
|
|
||||||
fn chacha20_cbc_encrypt(
|
|
||||||
input: *const u8,
|
|
||||||
output: *mut u8,
|
|
||||||
in_len: usize,
|
|
||||||
key: *const u8,
|
|
||||||
ivec: *mut u8,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) {
|
|
||||||
unsafe {
|
|
||||||
chacha20_cbc_encrypt(
|
|
||||||
input.as_ptr(),
|
|
||||||
output.as_mut_ptr(),
|
|
||||||
input.len(),
|
|
||||||
key.as_ptr(),
|
|
||||||
ivec.as_mut_ptr(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
64
ci/README.md
64
ci/README.md
@ -2,7 +2,7 @@
|
|||||||
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
|
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
|
||||||
additional GitHub integration provided by https://github.com/mvines/ci-gate
|
additional GitHub integration provided by https://github.com/mvines/ci-gate
|
||||||
|
|
||||||
## Agent Queues
|
# Agent Queues
|
||||||
|
|
||||||
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
|
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
|
||||||
`queue=default` and `queue=cuda`. The `default` queue should be favored and
|
`queue=default` and `queue=cuda`. The `default` queue should be favored and
|
||||||
@ -12,9 +12,52 @@ be run on the `default` queue, and the [buildkite artifact
|
|||||||
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
|
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
|
||||||
products over to a GPU instance for testing.
|
products over to a GPU instance for testing.
|
||||||
|
|
||||||
## Buildkite Agent Management
|
# Buildkite Agent Management
|
||||||
|
|
||||||
### Buildkite Azure Setup
|
## Manual Node Setup for Colocated Hardware
|
||||||
|
|
||||||
|
This section describes how to set up a new machine that does not have a
|
||||||
|
pre-configured image with all the requirements installed. Used for custom-built
|
||||||
|
hardware at a colocation or office facility. Also works for vanilla Ubuntu cloud
|
||||||
|
instances.
|
||||||
|
|
||||||
|
### Pre-Requisites
|
||||||
|
|
||||||
|
- Install Ubuntu 18.04 LTS Server
|
||||||
|
- Log in as a local or remote user with `sudo` privileges
|
||||||
|
|
||||||
|
### Install Core Requirements
|
||||||
|
|
||||||
|
##### Non-GPU enabled machines
|
||||||
|
```bash
|
||||||
|
sudo ./setup-new-buildkite-agent/setup-new-machine.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
##### GPU-enabled machines
|
||||||
|
- 1 or more NVIDIA GPUs should be installed in the machine (tested with 2080Ti)
|
||||||
|
```bash
|
||||||
|
sudo CUDA=1 ./setup-new-buildkite-agent/setup-new-machine.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure Node for Buildkite-agent based CI
|
||||||
|
|
||||||
|
- Install `buildkite-agent` and set up it user environment with:
|
||||||
|
```bash
|
||||||
|
sudo ./setup-new-buildkite-agent/setup-buildkite.sh
|
||||||
|
```
|
||||||
|
- Copy the pubkey contents from `~buildkite-agent/.ssh/id_ecdsa.pub` and
|
||||||
|
add the pubkey as an authorized SSH key on github.
|
||||||
|
- Edit `/etc/buildkite-agent/buildkite-agent.cfg` and/or `/etc/systemd/system/buildkite-agent@*` to the desired configuration of the agent(s)
|
||||||
|
- Copy `ejson` keys from another CI node at `/opt/ejson/keys/`
|
||||||
|
to the same location on the new node.
|
||||||
|
- Start the new agent(s) with `sudo systemctl enable --now buildkite-agent`
|
||||||
|
|
||||||
|
# Reference
|
||||||
|
|
||||||
|
This section contains details regarding previous CI setups that have been used,
|
||||||
|
and that we may return to one day.
|
||||||
|
|
||||||
|
## Buildkite Azure Setup
|
||||||
|
|
||||||
Create a new Azure-based "queue=default" agent by running the following command:
|
Create a new Azure-based "queue=default" agent by running the following command:
|
||||||
```
|
```
|
||||||
@ -35,7 +78,7 @@ Creating a "queue=cuda" agent follows the same process but additionally:
|
|||||||
2. Edit the tags field in /etc/buildkite-agent/buildkite-agent.cfg to `tags="queue=cuda,queue=default"`
|
2. Edit the tags field in /etc/buildkite-agent/buildkite-agent.cfg to `tags="queue=cuda,queue=default"`
|
||||||
and decrease the value of the priority field by one
|
and decrease the value of the priority field by one
|
||||||
|
|
||||||
#### Updating the CI Disk Image
|
### Updating the CI Disk Image
|
||||||
|
|
||||||
1. Create a new VM Instance as described above
|
1. Create a new VM Instance as described above
|
||||||
1. Modify it as required
|
1. Modify it as required
|
||||||
@ -48,12 +91,7 @@ Creating a "queue=cuda" agent follows the same process but additionally:
|
|||||||
1. Goto the `ci` resource group in the Azure portal and remove all resources
|
1. Goto the `ci` resource group in the Azure portal and remove all resources
|
||||||
with the XYZ name in them
|
with the XYZ name in them
|
||||||
|
|
||||||
## Reference
|
## Buildkite AWS CloudFormation Setup
|
||||||
|
|
||||||
This section contains details regarding previous CI setups that have been used,
|
|
||||||
and that we may return to one day.
|
|
||||||
|
|
||||||
### Buildkite AWS CloudFormation Setup
|
|
||||||
|
|
||||||
**AWS CloudFormation is currently inactive, although it may be restored in the
|
**AWS CloudFormation is currently inactive, although it may be restored in the
|
||||||
future**
|
future**
|
||||||
@ -62,7 +100,7 @@ AWS CloudFormation can be used to scale machines up and down based on the
|
|||||||
current CI load. If no machine is currently running it can take up to 60
|
current CI load. If no machine is currently running it can take up to 60
|
||||||
seconds to spin up a new instance, please remain calm during this time.
|
seconds to spin up a new instance, please remain calm during this time.
|
||||||
|
|
||||||
#### AMI
|
### AMI
|
||||||
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
|
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
|
||||||
|
|
||||||
Use the following process to update this AMI as dependencies change:
|
Use the following process to update this AMI as dependencies change:
|
||||||
@ -84,13 +122,13 @@ The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
|
|||||||
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
|
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
|
||||||
*apply* the stack changes.
|
*apply* the stack changes.
|
||||||
|
|
||||||
### Buildkite GCP Setup
|
## Buildkite GCP Setup
|
||||||
|
|
||||||
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
|
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
|
||||||
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
|
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
|
||||||
VM Instances in each group is manually adjusted.
|
VM Instances in each group is manually adjusted.
|
||||||
|
|
||||||
#### Updating a CI Disk Image
|
### Updating a CI Disk Image
|
||||||
|
|
||||||
Each Instance group has its own disk image, `ci-default-vX` and
|
Each Instance group has its own disk image, `ci-default-vX` and
|
||||||
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
|
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
|
||||||
|
6
ci/_
6
ci/_
@ -5,7 +5,13 @@
|
|||||||
# |source| me
|
# |source| me
|
||||||
#
|
#
|
||||||
|
|
||||||
|
base_dir=$(realpath --strip "$(dirname "$0")/..")
|
||||||
|
|
||||||
_() {
|
_() {
|
||||||
|
if [[ $(pwd) = $base_dir ]]; then
|
||||||
echo "--- $*"
|
echo "--- $*"
|
||||||
|
else
|
||||||
|
echo "--- $* (wd: $(pwd))"
|
||||||
|
fi
|
||||||
"$@"
|
"$@"
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
# ./affects-files.sh ^snap/ -- anything under the snap/ subdirectory
|
# ./affects-files.sh ^snap/ -- anything under the snap/ subdirectory
|
||||||
# ./affects-files.sh snap/ -- also matches foo/snap/
|
# ./affects-files.sh snap/ -- also matches foo/snap/
|
||||||
# Any pattern starting with the ! character will be negated:
|
# Any pattern starting with the ! character will be negated:
|
||||||
# ./affects-files.sh !^book/ -- anything *not* under the book/ subdirectory
|
# ./affects-files.sh !^docs/ -- anything *not* under the docs/ subdirectory
|
||||||
#
|
#
|
||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"/..
|
cd "$(dirname "$0")"/..
|
||||||
|
@ -2,19 +2,23 @@
|
|||||||
# Build steps that run after the primary pipeline on pushes and tags.
|
# Build steps that run after the primary pipeline on pushes and tags.
|
||||||
# Pull requests to not run these steps.
|
# Pull requests to not run these steps.
|
||||||
steps:
|
steps:
|
||||||
|
- command: "ci/publish-tarball.sh"
|
||||||
|
timeout_in_minutes: 60
|
||||||
|
name: "publish tarball"
|
||||||
|
- command: "ci/publish-docs.sh"
|
||||||
|
timeout_in_minutes: 15
|
||||||
|
name: "publish docs"
|
||||||
|
- command: "ci/publish-bpf-sdk.sh"
|
||||||
|
timeout_in_minutes: 5
|
||||||
|
name: "publish bpf sdk"
|
||||||
|
- wait
|
||||||
- command: "sdk/docker-solana/build.sh"
|
- command: "sdk/docker-solana/build.sh"
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
name: "publish docker"
|
name: "publish docker"
|
||||||
- command: "ci/publish-crate.sh"
|
- command: "ci/publish-crate.sh"
|
||||||
timeout_in_minutes: 120
|
timeout_in_minutes: 240
|
||||||
name: "publish crate"
|
name: "publish crate"
|
||||||
branches: "!master"
|
branches: "!master"
|
||||||
- command: "ci/publish-bpf-sdk.sh"
|
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||||
timeout_in_minutes: 5
|
# name: "move"
|
||||||
name: "publish bpf sdk"
|
# timeout_in_minutes: 20
|
||||||
- command: "ci/publish-tarball.sh"
|
|
||||||
timeout_in_minutes: 60
|
|
||||||
name: "publish tarball"
|
|
||||||
- command: "ci/publish-book.sh"
|
|
||||||
timeout_in_minutes: 15
|
|
||||||
name: "publish book"
|
|
||||||
|
26
ci/buildkite-tests.yml
Normal file
26
ci/buildkite-tests.yml
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# These steps are conditionally triggered by ci/buildkite.yml when files
|
||||||
|
# other than those in docs/ are modified
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||||
|
name: "coverage"
|
||||||
|
timeout_in_minutes: 30
|
||||||
|
- wait
|
||||||
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||||
|
name: "stable"
|
||||||
|
timeout_in_minutes: 60
|
||||||
|
artifact_paths: "log-*.txt"
|
||||||
|
- wait
|
||||||
|
- command: "ci/test-stable-perf.sh"
|
||||||
|
name: "stable-perf"
|
||||||
|
timeout_in_minutes: 40
|
||||||
|
artifact_paths: "log-*.txt"
|
||||||
|
agents:
|
||||||
|
- "queue=cuda"
|
||||||
|
- command: "ci/test-bench.sh"
|
||||||
|
name: "bench"
|
||||||
|
timeout_in_minutes: 30
|
||||||
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||||
|
name: "local-cluster"
|
||||||
|
timeout_in_minutes: 45
|
||||||
|
artifact_paths: "log-*.txt"
|
@ -1,38 +1,32 @@
|
|||||||
# Build steps that run on pushes and pull requests.
|
# Build steps that run on pushes and pull requests.
|
||||||
|
# If files other than those in docs/ were modified, this will be followed up by
|
||||||
|
# ci/buildkite-tests.yml
|
||||||
#
|
#
|
||||||
# Release tags use buildkite-release.yml instead
|
# Release tags use buildkite-release.yml instead
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- command: "ci/shellcheck.sh"
|
- command: "ci/dependabot-pr.sh"
|
||||||
name: "shellcheck"
|
name: "dependabot"
|
||||||
timeout_in_minutes: 5
|
timeout_in_minutes: 5
|
||||||
|
if: build.env("GITHUB_USER") == "dependabot-preview[bot]"
|
||||||
|
|
||||||
|
- wait
|
||||||
|
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||||
name: "checks"
|
name: "checks"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
|
- command: "ci/shellcheck.sh"
|
||||||
|
name: "shellcheck"
|
||||||
|
timeout_in_minutes: 5
|
||||||
|
|
||||||
- wait
|
- wait
|
||||||
- command: "ci/test-stable-perf.sh"
|
|
||||||
name: "stable-perf"
|
- command: "ci/maybe-trigger-tests.sh"
|
||||||
timeout_in_minutes: 40
|
name: "maybe-trigger-tests"
|
||||||
artifact_paths: "log-*.txt"
|
timeout_in_minutes: 2
|
||||||
agents:
|
|
||||||
- "queue=cuda"
|
|
||||||
- command: "ci/test-bench.sh"
|
|
||||||
name: "bench"
|
|
||||||
timeout_in_minutes: 30
|
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
|
||||||
name: "stable"
|
|
||||||
timeout_in_minutes: 40
|
|
||||||
artifact_paths: "log-*.txt"
|
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
|
||||||
name: "move"
|
|
||||||
timeout_in_minutes: 20
|
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
|
||||||
name: "local-cluster"
|
|
||||||
timeout_in_minutes: 30
|
|
||||||
artifact_paths: "log-*.txt"
|
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
|
||||||
name: "coverage"
|
|
||||||
timeout_in_minutes: 30
|
|
||||||
- wait
|
- wait
|
||||||
|
|
||||||
- trigger: "solana-secondary"
|
- trigger: "solana-secondary"
|
||||||
branches: "!pull/*"
|
branches: "!pull/*"
|
||||||
async: true
|
async: true
|
||||||
|
36
ci/dependabot-pr.sh
Executable file
36
ci/dependabot-pr.sh
Executable file
@ -0,0 +1,36 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
if ! echo "$BUILDKITE_BRANCH" | grep -E '^pull/[0-9]+/head$'; then
|
||||||
|
echo "not pull request!?" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
source ci/rust-version.sh stable
|
||||||
|
|
||||||
|
ci/docker-run.sh $rust_nightly_docker_image ci/dependabot-updater.sh
|
||||||
|
|
||||||
|
if [[ $(git status --short :**/Cargo.lock | wc -l) -eq 0 ]]; then
|
||||||
|
echo --- ok
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo --- "(FAILING) Backpropagating dependabot-triggered Cargo.lock updates"
|
||||||
|
|
||||||
|
name="dependabot-buildkite"
|
||||||
|
api_base="https://api.github.com/repos/solana-labs/solana/pulls"
|
||||||
|
pr_num=$(echo "$BUILDKITE_BRANCH" | grep -Eo '[0-9]+')
|
||||||
|
branch=$(curl -s "$api_base/$pr_num" | python -c 'import json,sys;print json.load(sys.stdin)["head"]["ref"]')
|
||||||
|
|
||||||
|
git add :**/Cargo.lock
|
||||||
|
EMAIL="dependabot-buildkite@noreply.solana.com" \
|
||||||
|
GIT_AUTHOR_NAME="$name" \
|
||||||
|
GIT_COMMITTER_NAME="$name" \
|
||||||
|
git commit -m "[auto-commit] Update all Cargo lock files"
|
||||||
|
git push origin "HEAD:$branch"
|
||||||
|
|
||||||
|
echo "Source branch is updated; failing this build for the next"
|
||||||
|
exit 1
|
35
ci/dependabot-updater.sh
Executable file
35
ci/dependabot-updater.sh
Executable file
@ -0,0 +1,35 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
source ci/_
|
||||||
|
|
||||||
|
commit_range="$(git merge-base HEAD origin/master)..HEAD"
|
||||||
|
parsed_update_args="$(
|
||||||
|
git log "$commit_range" --author "dependabot-preview" --oneline -n1 |
|
||||||
|
grep -o 'Bump.*$' |
|
||||||
|
sed -r 's/Bump ([^ ]+) from ([^ ]+) to ([^ ]+)/-p \1:\2 --precise \3/'
|
||||||
|
)"
|
||||||
|
# relaxed_parsed_update_args is temporal measure...
|
||||||
|
relaxed_parsed_update_args="$(
|
||||||
|
git log "$commit_range" --author "dependabot-preview" --oneline -n1 |
|
||||||
|
grep -o 'Bump.*$' |
|
||||||
|
sed -r 's/Bump ([^ ]+) from [^ ]+ to ([^ ]+)/-p \1 --precise \2/'
|
||||||
|
)"
|
||||||
|
package=$(echo "$parsed_update_args" | awk '{print $2}' | grep -o "^[^:]*")
|
||||||
|
if [[ -n $parsed_update_args ]]; then
|
||||||
|
# find other Cargo.lock files and update them, excluding the default Cargo.lock
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
for lock in $(git grep --files-with-matches '^name = "'$package'"$' :**/Cargo.lock); do
|
||||||
|
# it's possible our current versions are out of sync across lock files,
|
||||||
|
# in that case try to sync them up with $relaxed_parsed_update_args
|
||||||
|
_ scripts/cargo-for-all-lock-files.sh \
|
||||||
|
"$lock" -- \
|
||||||
|
update $parsed_update_args ||
|
||||||
|
_ scripts/cargo-for-all-lock-files.sh \
|
||||||
|
"$lock" -- \
|
||||||
|
update $relaxed_parsed_update_args
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo --- ok
|
@ -49,7 +49,7 @@ else
|
|||||||
# ~/.cargo
|
# ~/.cargo
|
||||||
ARGS+=(--volume "$PWD:/home")
|
ARGS+=(--volume "$PWD:/home")
|
||||||
fi
|
fi
|
||||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
ARGS+=(--env "HOME=/home" --env "CARGO_HOME=/home/.cargo")
|
||||||
|
|
||||||
# kcov tries to set the personality of the binary which docker
|
# kcov tries to set the personality of the binary which docker
|
||||||
# doesn't allow by default.
|
# doesn't allow by default.
|
||||||
@ -67,15 +67,22 @@ ARGS+=(
|
|||||||
--env BUILDKITE_JOB_ID
|
--env BUILDKITE_JOB_ID
|
||||||
--env CI
|
--env CI
|
||||||
--env CI_BRANCH
|
--env CI_BRANCH
|
||||||
|
--env CI_TAG
|
||||||
--env CI_BUILD_ID
|
--env CI_BUILD_ID
|
||||||
--env CI_COMMIT
|
--env CI_COMMIT
|
||||||
--env CI_JOB_ID
|
--env CI_JOB_ID
|
||||||
--env CI_PULL_REQUEST
|
--env CI_PULL_REQUEST
|
||||||
--env CI_REPO_SLUG
|
--env CI_REPO_SLUG
|
||||||
--env CODECOV_TOKEN
|
|
||||||
--env CRATES_IO_TOKEN
|
--env CRATES_IO_TOKEN
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Also propagate environment variables needed for codecov
|
||||||
|
# https://docs.codecov.io/docs/testing-with-docker#section-codecov-inside-docker
|
||||||
|
# We normalize CI to `1`; but codecov expects it to be `true` to detect Buildkite...
|
||||||
|
# Unfortunately, codecov.io fails sometimes:
|
||||||
|
# curl: (7) Failed to connect to codecov.io port 443: Connection timed out
|
||||||
|
CODECOV_ENVS=$(CI=true bash <(while ! curl -sS --retry 5 --retry-delay 2 --retry-connrefused https://codecov.io/env; do sleep 10; done))
|
||||||
|
|
||||||
if $INTERACTIVE; then
|
if $INTERACTIVE; then
|
||||||
if [[ -n $1 ]]; then
|
if [[ -n $1 ]]; then
|
||||||
echo
|
echo
|
||||||
@ -83,8 +90,10 @@ if $INTERACTIVE; then
|
|||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
set -x
|
set -x
|
||||||
exec docker run --interactive --tty "${ARGS[@]}" "$IMAGE" bash
|
# shellcheck disable=SC2086
|
||||||
|
exec docker run --interactive --tty "${ARGS[@]}" $CODECOV_ENVS "$IMAGE" bash
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
|
# shellcheck disable=SC2086
|
||||||
|
exec docker run "${ARGS[@]}" $CODECOV_ENVS "$IMAGE" "$@"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM solanalabs/rust:1.40.0
|
FROM solanalabs/rust:1.43.0
|
||||||
ARG date
|
ARG date
|
||||||
|
|
||||||
RUN set -x \
|
RUN set -x \
|
||||||
|
@ -15,6 +15,8 @@ To update the pinned version:
|
|||||||
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
|
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
|
||||||
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
|
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
|
||||||
specific YYYY-MM-DD that is desired (default is today's build).
|
specific YYYY-MM-DD that is desired (default is today's build).
|
||||||
|
Check https://rust-lang.github.io/rustup-components-history/ for build
|
||||||
|
status
|
||||||
1. Update `ci/rust-version.sh` to reflect the new nightly `YYY-MM-DD`
|
1. Update `ci/rust-version.sh` to reflect the new nightly `YYY-MM-DD`
|
||||||
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
|
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
|
||||||
to confirm the new nightly image builds. Fix any issues as needed
|
to confirm the new nightly image builds. Fix any issues as needed
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Note: when the rust version is changed also modify
|
# Note: when the rust version is changed also modify
|
||||||
# ci/rust-version.sh to pick up the new image tag
|
# ci/rust-version.sh to pick up the new image tag
|
||||||
FROM rust:1.40.0
|
FROM rust:1.43.0
|
||||||
|
|
||||||
# Add Google Protocol Buffers for Libra's metrics library.
|
# Add Google Protocol Buffers for Libra's metrics library.
|
||||||
ENV PROTOC_VERSION 3.8.0
|
ENV PROTOC_VERSION 3.8.0
|
||||||
@ -17,6 +17,7 @@ RUN set -x \
|
|||||||
clang-7 \
|
clang-7 \
|
||||||
cmake \
|
cmake \
|
||||||
lcov \
|
lcov \
|
||||||
|
libudev-dev \
|
||||||
libclang-common-7-dev \
|
libclang-common-7-dev \
|
||||||
mscgen \
|
mscgen \
|
||||||
net-tools \
|
net-tools \
|
||||||
@ -31,6 +32,7 @@ RUN set -x \
|
|||||||
&& cargo install cargo-audit \
|
&& cargo install cargo-audit \
|
||||||
&& cargo install svgbob_cli \
|
&& cargo install svgbob_cli \
|
||||||
&& cargo install mdbook \
|
&& cargo install mdbook \
|
||||||
|
&& cargo install mdbook-linkcheck \
|
||||||
&& rustc --version \
|
&& rustc --version \
|
||||||
&& cargo --version \
|
&& cargo --version \
|
||||||
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \
|
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \
|
||||||
|
@ -1,61 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
testCmd="$*"
|
|
||||||
genPipeline=false
|
|
||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
|
||||||
|
|
||||||
# Clear cached json keypair files
|
|
||||||
rm -rf "$HOME/.config/solana"
|
|
||||||
|
|
||||||
source ci/_
|
|
||||||
export RUST_BACKTRACE=1
|
|
||||||
export RUSTFLAGS="-D warnings"
|
|
||||||
export PATH=$PWD/target/debug:$PATH
|
|
||||||
export USE_INSTALL=1
|
|
||||||
|
|
||||||
if [[ -n $BUILDKITE && -z $testCmd ]]; then
|
|
||||||
genPipeline=true
|
|
||||||
echo "
|
|
||||||
steps:
|
|
||||||
"
|
|
||||||
fi
|
|
||||||
|
|
||||||
build() {
|
|
||||||
$genPipeline && return
|
|
||||||
source ci/rust-version.sh stable
|
|
||||||
source scripts/ulimit-n.sh
|
|
||||||
_ cargo +$rust_stable build
|
|
||||||
}
|
|
||||||
|
|
||||||
runTest() {
|
|
||||||
declare runTestName="$1"
|
|
||||||
declare runTestCmd="$2"
|
|
||||||
if $genPipeline; then
|
|
||||||
echo "
|
|
||||||
- command: \"$0 '$runTestCmd'\"
|
|
||||||
name: \"$runTestName\"
|
|
||||||
timeout_in_minutes: 45
|
|
||||||
"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n $testCmd && "$testCmd" != "$runTestCmd" ]]; then
|
|
||||||
echo Skipped "$runTestName"...
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
#shellcheck disable=SC2068 # Don't want to double quote $runTestCmd
|
|
||||||
$runTestCmd
|
|
||||||
}
|
|
||||||
|
|
||||||
build
|
|
||||||
|
|
||||||
runTest "basic" \
|
|
||||||
"ci/localnet-sanity.sh -i 128"
|
|
||||||
|
|
||||||
runTest "restart" \
|
|
||||||
"ci/localnet-sanity.sh -i 128 -k 16"
|
|
||||||
|
|
||||||
runTest "incremental restart, extra node" \
|
|
||||||
"ci/localnet-sanity.sh -i 128 -k 16 -R -x"
|
|
@ -29,7 +29,7 @@ Start a local cluster and run sanity on it
|
|||||||
-x - Add an extra validator (may be supplied multiple times)
|
-x - Add an extra validator (may be supplied multiple times)
|
||||||
-r - Select the RPC endpoint hosted by a node that starts as
|
-r - Select the RPC endpoint hosted by a node that starts as
|
||||||
a validator node. If unspecified the RPC endpoint hosted by
|
a validator node. If unspecified the RPC endpoint hosted by
|
||||||
the bootstrap leader will be used.
|
the bootstrap validator will be used.
|
||||||
-c - Reuse existing node/ledger configuration from a previous sanity
|
-c - Reuse existing node/ledger configuration from a previous sanity
|
||||||
run
|
run
|
||||||
|
|
||||||
@ -73,16 +73,15 @@ source scripts/configure-metrics.sh
|
|||||||
source multinode-demo/common.sh
|
source multinode-demo/common.sh
|
||||||
|
|
||||||
nodes=(
|
nodes=(
|
||||||
"multinode-demo/faucet.sh"
|
"multinode-demo/bootstrap-validator.sh \
|
||||||
"multinode-demo/bootstrap-leader.sh \
|
|
||||||
--no-restart \
|
--no-restart \
|
||||||
--init-complete-file init-complete-node1.log \
|
--init-complete-file init-complete-node0.log \
|
||||||
--dynamic-port-range 8000-8050"
|
--dynamic-port-range 8000-8050"
|
||||||
"multinode-demo/validator.sh \
|
"multinode-demo/validator.sh \
|
||||||
--enable-rpc-exit \
|
--enable-rpc-exit \
|
||||||
--no-restart \
|
--no-restart \
|
||||||
--dynamic-port-range 8050-8100
|
--dynamic-port-range 8050-8100
|
||||||
--init-complete-file init-complete-node2.log \
|
--init-complete-file init-complete-node1.log \
|
||||||
--rpc-port 18899"
|
--rpc-port 18899"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -95,7 +94,7 @@ if [[ extraNodes -gt 0 ]]; then
|
|||||||
--no-restart \
|
--no-restart \
|
||||||
--dynamic-port-range $portStart-$portEnd
|
--dynamic-port-range $portStart-$portEnd
|
||||||
--label dyn$i \
|
--label dyn$i \
|
||||||
--init-complete-file init-complete-node$((2 + i)).log"
|
--init-complete-file init-complete-node$((1 + i)).log"
|
||||||
)
|
)
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
@ -160,17 +159,16 @@ startNodes() {
|
|||||||
for i in $(seq 0 $((${#nodes[@]} - 1))); do
|
for i in $(seq 0 $((${#nodes[@]} - 1))); do
|
||||||
declare cmd=${nodes[$i]}
|
declare cmd=${nodes[$i]}
|
||||||
|
|
||||||
if [[ "$i" -ne 0 ]]; then # 0 == faucet, skip it
|
|
||||||
declare initCompleteFile="init-complete-node$i.log"
|
declare initCompleteFile="init-complete-node$i.log"
|
||||||
rm -f "$initCompleteFile"
|
rm -f "$initCompleteFile"
|
||||||
initCompleteFiles+=("$initCompleteFile")
|
initCompleteFiles+=("$initCompleteFile")
|
||||||
fi
|
|
||||||
startNode "$i" "$cmd $maybeExpectedGenesisHash"
|
startNode "$i" "$cmd $maybeExpectedGenesisHash"
|
||||||
if $addLogs; then
|
if $addLogs; then
|
||||||
logs+=("$(getNodeLogFile "$i" "$cmd")")
|
logs+=("$(getNodeLogFile "$i" "$cmd")")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# 1 == bootstrap leader, wait until it boots before starting
|
# 1 == bootstrap validator, wait until it boots before starting
|
||||||
# other validators
|
# other validators
|
||||||
if [[ "$i" -eq 1 ]]; then
|
if [[ "$i" -eq 1 ]]; then
|
||||||
SECONDS=
|
SECONDS=
|
||||||
@ -178,8 +176,8 @@ startNodes() {
|
|||||||
|
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
$solana_cli --keypair config/bootstrap-leader/identity-keypair.json \
|
$solana_cli --keypair config/bootstrap-validator/identity.json \
|
||||||
--url http://127.0.0.1:8899 get-genesis-hash
|
--url http://127.0.0.1:8899 genesis-hash
|
||||||
) | tee genesis-hash.log
|
) | tee genesis-hash.log
|
||||||
maybeExpectedGenesisHash="--expected-genesis-hash $(tail -n1 genesis-hash.log)"
|
maybeExpectedGenesisHash="--expected-genesis-hash $(tail -n1 genesis-hash.log)"
|
||||||
fi
|
fi
|
||||||
@ -277,7 +275,7 @@ rollingNodeRestart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
verifyLedger() {
|
verifyLedger() {
|
||||||
for ledger in bootstrap-leader validator; do
|
for ledger in bootstrap-validator validator; do
|
||||||
echo "--- $ledger ledger verification"
|
echo "--- $ledger ledger verification"
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
@ -331,7 +329,7 @@ while [[ $iteration -le $iterations ]]; do
|
|||||||
rm -rf $client_keypair
|
rm -rf $client_keypair
|
||||||
) || flag_error
|
) || flag_error
|
||||||
|
|
||||||
echo "--- RPC API: bootstrap-leader getTransactionCount ($iteration)"
|
echo "--- RPC API: bootstrap-validator getTransactionCount ($iteration)"
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
curl --retry 5 --retry-delay 2 --retry-connrefused \
|
curl --retry 5 --retry-delay 2 --retry-connrefused \
|
||||||
@ -351,7 +349,7 @@ while [[ $iteration -le $iterations ]]; do
|
|||||||
http://localhost:18899
|
http://localhost:18899
|
||||||
) || flag_error
|
) || flag_error
|
||||||
|
|
||||||
# Verify transaction count as reported by the bootstrap-leader node is advancing
|
# Verify transaction count as reported by the bootstrap-validator node is advancing
|
||||||
transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt)
|
transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt)
|
||||||
if [[ -n $lastTransactionCount ]]; then
|
if [[ -n $lastTransactionCount ]]; then
|
||||||
echo "--- Transaction count check: $lastTransactionCount < $transactionCount"
|
echo "--- Transaction count check: $lastTransactionCount < $transactionCount"
|
||||||
|
21
ci/maybe-trigger-tests.sh
Executable file
21
ci/maybe-trigger-tests.sh
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
annotate() {
|
||||||
|
${BUILDKITE:-false} && {
|
||||||
|
buildkite-agent annotate "$@"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Skip if only the docs have been modified
|
||||||
|
ci/affects-files.sh \
|
||||||
|
\!^docs/ \
|
||||||
|
|| {
|
||||||
|
annotate --style info \
|
||||||
|
"Skipping all further tests as only docs/ files were modified"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
annotate --style info "Triggering tests"
|
||||||
|
buildkite-agent pipeline upload ci/buildkite-tests.yml
|
25
ci/nits.sh
25
ci/nits.sh
@ -18,19 +18,22 @@ declare prints=(
|
|||||||
|
|
||||||
# Parts of the tree that are expected to be print free
|
# Parts of the tree that are expected to be print free
|
||||||
declare print_free_tree=(
|
declare print_free_tree=(
|
||||||
'core/src'
|
':core/src/**.rs'
|
||||||
'faucet/src'
|
':faucet/src/**.rs'
|
||||||
'metrics/src'
|
':ledger/src/**.rs'
|
||||||
'net-utils/src'
|
':metrics/src/**.rs'
|
||||||
'runtime/src'
|
':net-utils/src/**.rs'
|
||||||
'sdk/bpf/rust/rust-utils'
|
':runtime/src/**.rs'
|
||||||
'sdk/src'
|
':sdk/bpf/rust/rust-utils/**.rs'
|
||||||
'programs/bpf/rust'
|
':sdk/**.rs'
|
||||||
'programs/stake/src'
|
':programs/**.rs'
|
||||||
'programs/vote/src'
|
':^**bin**.rs'
|
||||||
|
':^**bench**.rs'
|
||||||
|
':^**test**.rs'
|
||||||
|
':^**/build.rs'
|
||||||
)
|
)
|
||||||
|
|
||||||
if _ git --no-pager grep -n --max-depth=0 "${prints[@]/#/-e }" -- "${print_free_tree[@]}"; then
|
if _ git --no-pager grep -n "${prints[@]/#/-e}" -- "${print_free_tree[@]}"; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -1,99 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
|
||||||
|
|
||||||
me=$(basename "$0")
|
|
||||||
|
|
||||||
BOOK="book"
|
|
||||||
|
|
||||||
source ci/rust-version.sh stable
|
|
||||||
eval "$(ci/channel-info.sh)"
|
|
||||||
|
|
||||||
if [[ -n $PUBLISH_BOOK_TAG ]]; then
|
|
||||||
CURRENT_TAG="$(git describe --tags)"
|
|
||||||
COMMIT_TO_PUBLISH="$(git rev-list -n 1 "${PUBLISH_BOOK_TAG}")"
|
|
||||||
|
|
||||||
# book is manually published at a specified release tag
|
|
||||||
if [[ $PUBLISH_BOOK_TAG != "$CURRENT_TAG" ]]; then
|
|
||||||
(
|
|
||||||
cat <<EOF
|
|
||||||
steps:
|
|
||||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
|
||||||
async: true
|
|
||||||
build:
|
|
||||||
message: "$BUILDKITE_MESSAGE"
|
|
||||||
commit: "$COMMIT_TO_PUBLISH"
|
|
||||||
env:
|
|
||||||
PUBLISH_BOOK_TAG: "$PUBLISH_BOOK_TAG"
|
|
||||||
EOF
|
|
||||||
) | buildkite-agent pipeline upload
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
repo=git@github.com:solana-labs/book.git
|
|
||||||
else
|
|
||||||
# book-edge and book-beta are published automatically on the tip of the branch
|
|
||||||
case $CHANNEL in
|
|
||||||
edge)
|
|
||||||
repo=git@github.com:solana-labs/book-edge.git
|
|
||||||
;;
|
|
||||||
beta)
|
|
||||||
repo=git@github.com:solana-labs/book-beta.git
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "--- publish skipped"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
BOOK=$CHANNEL
|
|
||||||
fi
|
|
||||||
|
|
||||||
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "book/build.sh"
|
|
||||||
|
|
||||||
echo --- create book repo
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
cd book/html/
|
|
||||||
git init .
|
|
||||||
git add ./* ./.nojekyll
|
|
||||||
git config user.email maintainers@solana.com
|
|
||||||
git config user.name "$me"
|
|
||||||
git commit -m "${CI_COMMIT:-local}"
|
|
||||||
)
|
|
||||||
|
|
||||||
echo "--- publish $BOOK"
|
|
||||||
(
|
|
||||||
cd book/html/
|
|
||||||
git remote add origin $repo
|
|
||||||
git fetch origin master
|
|
||||||
if ! git diff HEAD origin/master --quiet; then
|
|
||||||
git push -f origin HEAD:master
|
|
||||||
else
|
|
||||||
echo "Content unchanged, publish skipped"
|
|
||||||
fi
|
|
||||||
)
|
|
||||||
|
|
||||||
echo --- update gitbook-cage
|
|
||||||
(
|
|
||||||
if [[ -z $CI_BRANCH ]]; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -x
|
|
||||||
(
|
|
||||||
. ci/rust-version.sh
|
|
||||||
ci/docker-run.sh $rust_stable_docker_image make -Cbook -B svg
|
|
||||||
)
|
|
||||||
# make a local commit for the svgs
|
|
||||||
git add -A -f book/src/.gitbook/assets/.
|
|
||||||
if ! git diff-index --quiet HEAD; then
|
|
||||||
git config user.email maintainers@solana.com
|
|
||||||
git config user.name "$me"
|
|
||||||
git commit -m "gitbook-cage update $(date -Is)"
|
|
||||||
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
|
|
||||||
# pop off the local commit
|
|
||||||
git reset --hard HEAD~
|
|
||||||
fi
|
|
||||||
)
|
|
||||||
|
|
||||||
exit 0
|
|
@ -4,7 +4,12 @@ set -e
|
|||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
eval "$(ci/channel-info.sh)"
|
eval "$(ci/channel-info.sh)"
|
||||||
|
|
||||||
echo --- Creating tarball
|
if [[ -n "$CI_TAG" ]]; then
|
||||||
|
CHANNEL_OR_TAG=$CI_TAG
|
||||||
|
else
|
||||||
|
CHANNEL_OR_TAG=$CHANNEL
|
||||||
|
fi
|
||||||
|
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
sdk/bpf/scripts/package.sh
|
sdk/bpf/scripts/package.sh
|
||||||
@ -12,7 +17,7 @@ echo --- Creating tarball
|
|||||||
)
|
)
|
||||||
|
|
||||||
echo --- AWS S3 Store
|
echo --- AWS S3 Store
|
||||||
if [[ -z $CHANNEL ]]; then
|
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||||
echo Skipped
|
echo Skipped
|
||||||
else
|
else
|
||||||
(
|
(
|
||||||
@ -24,7 +29,7 @@ else
|
|||||||
--volume "$PWD:/solana" \
|
--volume "$PWD:/solana" \
|
||||||
eremite/aws-cli:2018.12.18 \
|
eremite/aws-cli:2018.12.18 \
|
||||||
/usr/bin/s3cmd --acl-public put /solana/bpf-sdk.tar.bz2 \
|
/usr/bin/s3cmd --acl-public put /solana/bpf-sdk.tar.bz2 \
|
||||||
s3://solana-sdk/"$CHANNEL"/bpf-sdk.tar.bz2
|
s3://solana-sdk/"$CHANNEL_OR_TAG"/bpf-sdk.tar.bz2
|
||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
32
ci/publish-docs.sh
Executable file
32
ci/publish-docs.sh
Executable file
@ -0,0 +1,32 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
echo --- build docs
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
. ci/rust-version.sh stable
|
||||||
|
ci/docker-run.sh "$rust_stable_docker_image" docs/build.sh
|
||||||
|
)
|
||||||
|
|
||||||
|
echo --- update gitbook-cage
|
||||||
|
if [[ -n $CI_BRANCH ]]; then
|
||||||
|
(
|
||||||
|
# make a local commit for the svgs and generated/updated markdown
|
||||||
|
set -x
|
||||||
|
git add -f docs/src
|
||||||
|
if ! git diff-index --quiet HEAD; then
|
||||||
|
git config user.email maintainers@solana.com
|
||||||
|
git config user.name "$(basename "$0")"
|
||||||
|
git commit -m "gitbook-cage update $(date -Is)"
|
||||||
|
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
|
||||||
|
# pop off the local commit
|
||||||
|
git reset --hard HEAD~
|
||||||
|
fi
|
||||||
|
)
|
||||||
|
else
|
||||||
|
echo CI_BRANCH not set
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
@ -45,7 +45,7 @@ linux)
|
|||||||
TARGET=x86_64-unknown-linux-gnu
|
TARGET=x86_64-unknown-linux-gnu
|
||||||
;;
|
;;
|
||||||
windows)
|
windows)
|
||||||
TARGET=x86_64-pc-windows-msvc
|
TARGET=x86_64-pc-windows-gnu
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo CI_OS_NAME unset
|
echo CI_OS_NAME unset
|
||||||
@ -53,7 +53,7 @@ windows)
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
echo --- Creating tarball
|
echo --- Creating release tarball
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
rm -rf solana-release/
|
rm -rf solana-release/
|
||||||
@ -71,16 +71,7 @@ echo --- Creating tarball
|
|||||||
export CHANNEL
|
export CHANNEL
|
||||||
|
|
||||||
source ci/rust-version.sh stable
|
source ci/rust-version.sh stable
|
||||||
scripts/cargo-install-all.sh +"$rust_stable" --use-move solana-release
|
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
||||||
|
|
||||||
# Reduce the Windows archive size until
|
|
||||||
# https://github.com/appveyor/ci/issues/2997 is fixed
|
|
||||||
if [[ -n $APPVEYOR ]]; then
|
|
||||||
rm -f \
|
|
||||||
solana-release/bin/solana-validator.exe \
|
|
||||||
solana-release/bin/solana-bench-exchange.exe \
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
tar cvf solana-release-$TARGET.tar solana-release
|
tar cvf solana-release-$TARGET.tar solana-release
|
||||||
bzip2 solana-release-$TARGET.tar
|
bzip2 solana-release-$TARGET.tar
|
||||||
@ -89,18 +80,23 @@ echo --- Creating tarball
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Metrics tarball is platform agnostic, only publish it from Linux
|
# Metrics tarball is platform agnostic, only publish it from Linux
|
||||||
MAYBE_METRICS_TARBALL=
|
MAYBE_TARBALLS=
|
||||||
if [[ "$CI_OS_NAME" = linux ]]; then
|
if [[ "$CI_OS_NAME" = linux ]]; then
|
||||||
metrics/create-metrics-tarball.sh
|
metrics/create-metrics-tarball.sh
|
||||||
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
|
(
|
||||||
|
set -x
|
||||||
|
sdk/bpf/scripts/package.sh
|
||||||
|
[[ -f bpf-sdk.tar.bz2 ]]
|
||||||
|
|
||||||
|
)
|
||||||
|
MAYBE_TARBALLS="bpf-sdk.tar.bz2 solana-metrics.tar.bz2"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
source ci/upload-ci-artifact.sh
|
source ci/upload-ci-artifact.sh
|
||||||
|
|
||||||
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
|
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
|
||||||
upload-ci-artifact "$file"
|
|
||||||
|
|
||||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||||
|
upload-ci-artifact "$file"
|
||||||
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
@ -2,8 +2,10 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
|
# shellcheck source=multinode-demo/common.sh
|
||||||
|
source multinode-demo/common.sh
|
||||||
|
|
||||||
rm -f config/run/init-completed
|
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||||
|
|
||||||
timeout 15 ./run.sh &
|
timeout 15 ./run.sh &
|
||||||
pid=$!
|
pid=$!
|
||||||
@ -17,6 +19,13 @@ while [[ ! -f config/run/init-completed ]]; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
while [[ $($solana_cli slot --commitment recent) -eq 0 ]]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
|
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
|
||||||
|
|
||||||
wait $pid
|
wait $pid
|
||||||
|
|
||||||
|
$solana_ledger_tool create-snapshot --ledger config/ledger 1 config/snapshot-ledger
|
||||||
|
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
|
||||||
|
$solana_ledger_tool verify --ledger config/snapshot-ledger
|
||||||
|
@ -1,28 +1,30 @@
|
|||||||
#
|
#
|
||||||
# This file maintains the rust versions for use by CI.
|
# This file maintains the rust versions for use by CI.
|
||||||
#
|
#
|
||||||
# Build with stable rust, updating the stable toolchain if necessary:
|
|
||||||
# $ source ci/rust-version.sh stable
|
|
||||||
# $ cargo +"$rust_stable" build
|
|
||||||
#
|
|
||||||
# Build with nightly rust, updating the nightly toolchain if necessary:
|
|
||||||
# $ source ci/rust-version.sh nightly
|
|
||||||
# $ cargo +"$rust_nightly" build
|
|
||||||
#
|
|
||||||
# Obtain the environment variables without any automatic toolchain updating:
|
# Obtain the environment variables without any automatic toolchain updating:
|
||||||
# $ source ci/rust-version.sh
|
# $ source ci/rust-version.sh
|
||||||
#
|
#
|
||||||
|
# Obtain the environment variables updating both stable and nightly, only stable, or
|
||||||
|
# only nightly:
|
||||||
|
# $ source ci/rust-version.sh all
|
||||||
|
# $ source ci/rust-version.sh stable
|
||||||
|
# $ source ci/rust-version.sh nightly
|
||||||
|
|
||||||
|
# Then to build with either stable or nightly:
|
||||||
|
# $ cargo +"$rust_stable" build
|
||||||
|
# $ cargo +"$rust_nightly" build
|
||||||
|
#
|
||||||
|
|
||||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||||
stable_version="$RUST_STABLE_VERSION"
|
stable_version="$RUST_STABLE_VERSION"
|
||||||
else
|
else
|
||||||
stable_version=1.40.0
|
stable_version=1.43.0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||||
else
|
else
|
||||||
nightly_version=2019-12-19
|
nightly_version=2020-04-23
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
@ -51,6 +53,10 @@ export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
|
|||||||
nightly)
|
nightly)
|
||||||
rustup_install "$rust_nightly"
|
rustup_install "$rust_nightly"
|
||||||
;;
|
;;
|
||||||
|
all)
|
||||||
|
rustup_install "$rust_stable"
|
||||||
|
rustup_install "$rust_nightly"
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Note: ignoring unknown argument: $1"
|
echo "Note: ignoring unknown argument: $1"
|
||||||
;;
|
;;
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
HERE="$(dirname "$0")"
|
HERE="$(dirname "$0")"
|
||||||
|
|
||||||
# shellcheck source=net/datacenter-node-install/utils.sh
|
# shellcheck source=ci/setup-new-buildkite-agent/utils.sh
|
||||||
source "$HERE"/utils.sh
|
source "$HERE"/utils.sh
|
||||||
|
|
||||||
ensure_env || exit 1
|
ensure_env || exit 1
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user