Compare commits
934 Commits
dependabot
...
v1.8
Author | SHA1 | Date | |
---|---|---|---|
|
06850356ee | ||
|
7d2acbc9a3 | ||
|
5a06701a9a | ||
|
e45559701c | ||
|
f63505df33 | ||
|
b77ec9d46e | ||
|
23af37fea3 | ||
|
9238cef204 | ||
|
a352613c75 | ||
|
586f55e6c1 | ||
|
3424ace92c | ||
|
52c959e7f0 | ||
|
5c3575c238 | ||
|
00abcbe1be | ||
|
b4916d2601 | ||
|
13eb2e4687 | ||
|
fa264cf7ca | ||
|
c478510a7f | ||
|
4ce59bbbb5 | ||
|
ad9fceec8e | ||
|
43f791bffb | ||
|
1570026c2c | ||
|
b8e813a735 | ||
|
89ec164787 | ||
|
2f8991c312 | ||
|
59dc5eb5b8 | ||
|
9361a50445 | ||
|
86e369ec8e | ||
|
5136ed00a8 | ||
|
70ec2cd244 | ||
|
430cdf679e | ||
|
f32c33dd80 | ||
|
6e46511cc8 | ||
|
59dd876d79 | ||
|
d1188d7c08 | ||
|
89bc329cd2 | ||
|
6837d7691c | ||
|
0afd9d81df | ||
|
add26be71b | ||
|
ac36954af0 | ||
|
528980d037 | ||
|
375a939000 | ||
|
7dd7cea0c7 | ||
|
597342a643 | ||
|
367e23fe22 | ||
|
92cc75b3ae | ||
|
fdc1b046bc | ||
|
6979d01e4c | ||
|
a8c56a1dfa | ||
|
5d27a7f4c4 | ||
|
de46df6b1b | ||
|
4b7450e89e | ||
|
f3126f7e77 | ||
|
9b90162564 | ||
|
9c01d90c70 | ||
|
edf1954817 | ||
|
59eee75d65 | ||
|
c2dd9a006d | ||
|
8eb0a1091a | ||
|
cb5106a15b | ||
|
2d5957a4b4 | ||
|
99846eea12 | ||
|
78b82dedb1 | ||
|
db942269f0 | ||
|
eede487797 | ||
|
42c3fbc198 | ||
|
4706aa4900 | ||
|
b357eda15b | ||
|
fc8f61368d | ||
|
aabcdcf8fb | ||
|
81e65eae0b | ||
|
38e72982bc | ||
|
d52dd97ad1 | ||
|
61f88e04d2 | ||
|
3e131a5324 | ||
|
5655ea0061 | ||
|
40ef11ec86 | ||
|
debac00724 | ||
|
304afd42c6 | ||
|
5d3f3bc9b1 | ||
|
42531a11a5 | ||
|
10c40c9c2d | ||
|
1ef48f369e | ||
|
05e75ae937 | ||
|
a7aa5202ab | ||
|
9aa7821277 | ||
|
fee86726f2 | ||
|
8a470d3ae8 | ||
|
8c6df1f234 | ||
|
7cbfab0958 | ||
|
6130466a21 | ||
|
299a59f458 | ||
|
b30c726d22 | ||
|
069bb5e32f | ||
|
cad0e7f04f | ||
|
c90fa6643e | ||
|
54db774203 | ||
|
57b5ce5731 | ||
|
e0d933f940 | ||
|
e2559f20df | ||
|
7b9ca3e9d9 | ||
|
f0de9b43be | ||
|
6d7b64b140 | ||
|
f506851ca1 | ||
|
b70113e201 | ||
|
6093c7a218 | ||
|
b9777e10ee | ||
|
d840d56565 | ||
|
afb8df0c62 | ||
|
84d535b89e | ||
|
de1a9f84ac | ||
|
22bbd91843 | ||
|
1babd07faf | ||
|
0592b5568f | ||
|
ce98feac4e | ||
|
a53dd611c0 | ||
|
ec970f9d69 | ||
|
7793a11b65 | ||
|
4deac1daa4 | ||
|
4c36a93665 | ||
|
052309227f | ||
|
3b895104f3 | ||
|
24b0fc8927 | ||
|
271ae3c2fc | ||
|
a305fa0472 | ||
|
262b157d21 | ||
|
bfb02029bf | ||
|
039244417e | ||
|
1e1f383970 | ||
|
891b0a5152 | ||
|
685e40cbf2 | ||
|
17d698d20a | ||
|
cf34ae7d6f | ||
|
acd03fc29b | ||
|
ddef156305 | ||
|
33cd3a161e | ||
|
31a0906410 | ||
|
a0669af872 | ||
|
0f6f0545d1 | ||
|
89524d7b61 | ||
|
4a66832fb0 | ||
|
df40ede6ea | ||
|
93a8fd6a2b | ||
|
985fae2dcf | ||
|
90730899f1 | ||
|
b7cf6e7d9a | ||
|
fb59f2ad59 | ||
|
0e9fd84228 | ||
|
5e9d20378d | ||
|
01e932c6a4 | ||
|
e3e9d32f33 | ||
|
a523d09902 | ||
|
3ed9a47082 | ||
|
9985f5fa5c | ||
|
0958760592 | ||
|
9f53f3455a | ||
|
9fff4aa8b8 | ||
|
704d05f52d | ||
|
50d62bdd13 | ||
|
33c28da055 | ||
|
dd8e7f650a | ||
|
7621fa3c25 | ||
|
58e46e107c | ||
|
3c5ac9ab27 | ||
|
cc931ff47f | ||
|
37727fbbc3 | ||
|
c39d9eab04 | ||
|
a70e4c05d4 | ||
|
961509abbd | ||
|
20681ea2ce | ||
|
32bcfb757b | ||
|
eefb9875bf | ||
|
957914b3b5 | ||
|
f4113d24b4 | ||
|
2a43a89eca | ||
|
4a93be9f77 | ||
|
f03b6d701d | ||
|
83676aed53 | ||
|
a931b3f868 | ||
|
feef0ea478 | ||
|
c3c1b820fc | ||
|
548c0afac2 | ||
|
6bc52d4d47 | ||
|
423a4d6546 | ||
|
16f6bdf0b8 | ||
|
63629351f8 | ||
|
2d93db9dec | ||
|
558a52d5cb | ||
|
b00087e1ca | ||
|
e2850c84d2 | ||
|
f9b7e24846 | ||
|
843f26c2de | ||
|
a09e8397fb | ||
|
ef626e144d | ||
|
4cae58bfe9 | ||
|
f97d28caf4 | ||
|
16abcac802 | ||
|
a5e6a188c1 | ||
|
9f44d60a39 | ||
|
9284c6fffb | ||
|
2c49ab1ab4 | ||
|
9832ac54a4 | ||
|
fbf93f89a9 | ||
|
7210a883eb | ||
|
03db11eb3a | ||
|
3d526883e3 | ||
|
36838427d4 | ||
|
dfa1bc1bbd | ||
|
312f2fc6f6 | ||
|
24ace5cfaa | ||
|
23be4d89ef | ||
|
fa83e05d2c | ||
|
589eeb2432 | ||
|
a7eb10a787 | ||
|
336c39446b | ||
|
afa87c6a0b | ||
|
49e9adbba4 | ||
|
fbc519523f | ||
|
480895cc25 | ||
|
451ea7c9b2 | ||
|
bcacaf78e9 | ||
|
c60314c4a1 | ||
|
bdaac86f84 | ||
|
dc633e3385 | ||
|
8af4716710 | ||
|
afd17ec5d9 | ||
|
ef760fba32 | ||
|
fdc05ecd5a | ||
|
eae3166bdc | ||
|
d7377d4794 | ||
|
d1e0941289 | ||
|
77a3adb5a6 | ||
|
4f28ed1044 | ||
|
c2891c392c | ||
|
05a43e071c | ||
|
6aaff6183d | ||
|
de8dc27ecf | ||
|
cc75f576e2 | ||
|
4a186c5834 | ||
|
7e2e7dffa6 | ||
|
c6141925a9 | ||
|
e68cd335d5 | ||
|
3369826bd1 | ||
|
c901227d0f | ||
|
7bc9da3a97 | ||
|
64c5e7d9a9 | ||
|
156caeb710 | ||
|
ae11cc3297 | ||
|
a0b73d5658 | ||
|
fe5363ec6a | ||
|
402d72bc48 | ||
|
c0794d6fbb | ||
|
6deb0a9f5d | ||
|
be3209712d | ||
|
8028f218a4 | ||
|
3b5bafe510 | ||
|
d3e92f4250 | ||
|
400418fe2f | ||
|
b73d23d50a | ||
|
13d40d6a66 | ||
|
09c68ce696 | ||
|
801dc58b78 | ||
|
51cab5d16d | ||
|
39e27b130f | ||
|
e9d8a7a641 | ||
|
73acae9b5d | ||
|
87c71647e8 | ||
|
2b2536ac42 | ||
|
43e7368f3a | ||
|
87471ed7fb | ||
|
7ac43b16d2 | ||
|
69027e3f7e | ||
|
32f507dc51 | ||
|
387d5af52e | ||
|
eeb2bef63e | ||
|
83e01442a7 | ||
|
922c74caea | ||
|
1ab8c01ab7 | ||
|
9959ede9ce | ||
|
6d1e1287bc | ||
|
b8306a99d8 | ||
|
6777ca244f | ||
|
f04e06e0c2 | ||
|
e52b3fd1d8 | ||
|
06a3e9b178 | ||
|
9d66458a40 | ||
|
765fbc9a8c | ||
|
231a3bda8e | ||
|
8d6b54837c | ||
|
0cefd46d9d | ||
|
585695445f | ||
|
b6d040b0d7 | ||
|
4d3352e0e0 | ||
|
139d15cd84 | ||
|
5a7b487e3d | ||
|
416fccfc01 | ||
|
fe923bc56c | ||
|
b3904d80e6 | ||
|
e368de5f9c | ||
|
008139f506 | ||
|
e5aa5efbac | ||
|
a54fa45d5a | ||
|
713b61677e | ||
|
b1bf420524 | ||
|
6293b9d218 | ||
|
90f791a6a9 | ||
|
484856e4d4 | ||
|
26f32a3288 | ||
|
6e656deb59 | ||
|
b030d4be7c | ||
|
002693ab7d | ||
|
73d469991f | ||
|
d4192e3ac4 | ||
|
04cc50126b | ||
|
27b2561650 | ||
|
85302d6d17 | ||
|
42a67d30fc | ||
|
f3ea9bc995 | ||
|
b63617a3e1 | ||
|
d1ca16e9f8 | ||
|
0565fe3320 | ||
|
336ee01aae | ||
|
76c5c94a8a | ||
|
c3b1906f1d | ||
|
318d26f0ff | ||
|
c53174dc56 | ||
|
baa4c6eaf2 | ||
|
064cce41f7 | ||
|
d291bcf26e | ||
|
40c86a0605 | ||
|
80c3591391 | ||
|
741f9ea57f | ||
|
0e6b476cbf | ||
|
8bfba571f2 | ||
|
1a0eabe340 | ||
|
a05c08e711 | ||
|
6a6dd86262 | ||
|
5d6b52f9af | ||
|
8072635967 | ||
|
a86fdebb3b | ||
|
31b3ada6d9 | ||
|
2992a7154a | ||
|
21cd423e67 | ||
|
09ef4d12f7 | ||
|
de94c4e867 | ||
|
74684a107c | ||
|
19b3ba0442 | ||
|
784c745efa | ||
|
a7b3436b1e | ||
|
89b2a3d0ae | ||
|
56fc58a2b5 | ||
|
53e0c9710e | ||
|
c14864a608 | ||
|
780302af56 | ||
|
90557564c3 | ||
|
6d2fd078be | ||
|
4a874e9ba1 | ||
|
22510678b4 | ||
|
69b973a3a6 | ||
|
cc7ed71cb7 | ||
|
b6fe051d24 | ||
|
506d39ea82 | ||
|
a595e06b48 | ||
|
da08f3dc2b | ||
|
8dd3c1ece1 | ||
|
42a2c29234 | ||
|
a1f1264962 | ||
|
66caead016 | ||
|
de1f60fb2d | ||
|
7528016e2d | ||
|
adc57899fe | ||
|
afe229a89e | ||
|
5dd00e9230 | ||
|
0a698fc48f | ||
|
1666fc5483 | ||
|
467abd1f5b | ||
|
19432f2e5f | ||
|
7e7f8ef5f0 | ||
|
9e81798d6d | ||
|
8986bd301c | ||
|
6baad8e239 | ||
|
782d143489 | ||
|
b15e87631c | ||
|
d18f553e2d | ||
|
e84c57b659 | ||
|
66630804de | ||
|
72158e3bf9 | ||
|
df6063a622 | ||
|
55a1f03eee | ||
|
d20cccc26b | ||
|
6c4a8b2d72 | ||
|
307cda52ac | ||
|
026385effd | ||
|
0363d8d373 | ||
|
5c3f15e9c5 | ||
|
47e80be023 | ||
|
460dcad578 | ||
|
257d19ca48 | ||
|
de2aa898a7 | ||
|
23b6ce7980 | ||
|
8cba6cca76 | ||
|
85048c667c | ||
|
440ccd189e | ||
|
d5fc81e12a | ||
|
53f4bde471 | ||
|
232731e869 | ||
|
63835ec214 | ||
|
6de9ef62e8 | ||
|
0759b666ce | ||
|
c7e3d4cf79 | ||
|
63e37b2b20 | ||
|
436ec212f4 | ||
|
564cc95b00 | ||
|
28eb6ff796 | ||
|
de32ab4d57 | ||
|
cabe2d5d04 | ||
|
ece4ecb792 | ||
|
ba366f49ad | ||
|
8e666f47e0 | ||
|
0619705ce5 | ||
|
188089389f | ||
|
0a6bb84aec | ||
|
c8f6a0817b | ||
|
5350250a06 | ||
|
f8fccc7e91 | ||
|
eaa6d1a4b5 | ||
|
b66c9539c2 | ||
|
bdea60cc19 | ||
|
63ac5e4561 | ||
|
88e6f41bec | ||
|
e0280a68ba | ||
|
aa8d04d44b | ||
|
778f37b12d | ||
|
ebe77a0985 | ||
|
400a88786a | ||
|
29eae21057 | ||
|
0d1dbb6160 | ||
|
927d3b5e0d | ||
|
df929bda38 | ||
|
200c5c9fd6 | ||
|
9acf708344 | ||
|
af4c1785b6 | ||
|
b8f68860a4 | ||
|
547f33d1d1 | ||
|
67738a229c | ||
|
50803c3f58 | ||
|
50cb612ae1 | ||
|
e5dc8d731b | ||
|
f9dcb8228f | ||
|
68e8a05848 | ||
|
bfc5f9fb6c | ||
|
c3cc7d52fe | ||
|
4268cf1d8b | ||
|
c693ecc4c8 | ||
|
afe866ad02 | ||
|
6a73bf767b | ||
|
7d0494fcaa | ||
|
33d8e242c5 | ||
|
ef55045724 | ||
|
81d2c3261c | ||
|
348ba57b12 | ||
|
4a8ff62ad3 | ||
|
db85d659b9 | ||
|
a4df784e82 | ||
|
414674eba1 | ||
|
d922971ec6 | ||
|
95ac00d30a | ||
|
1ca4f7d110 | ||
|
8999f07ed2 | ||
|
9f4f8fc9e9 | ||
|
00b03897e1 | ||
|
6181df68cf | ||
|
1588b00f2c | ||
|
ef306aa7cb | ||
|
e718f4b04a | ||
|
51593a882b | ||
|
1c15cc6e9a | ||
|
734b380cdb | ||
|
9cc26b3b00 | ||
|
ef5a0e842c | ||
|
5bdb824267 | ||
|
474f2bcdf4 | ||
|
2302211963 | ||
|
8178db52a5 | ||
|
5d8429d953 | ||
|
fec15f69f4 | ||
|
257ddbeee1 | ||
|
47c1730808 | ||
|
a005a6b816 | ||
|
2f2948f998 | ||
|
55ccff7604 | ||
|
1bf88556ee | ||
|
4c4f183515 | ||
|
282322cbe8 | ||
|
2dc00d0e13 | ||
|
a90c338982 | ||
|
36c283026f | ||
|
a1a0c63862 | ||
|
e20fdde0a4 | ||
|
5b52ac8990 | ||
|
502ae8b319 | ||
|
30f0b3cf53 | ||
|
2975dc5c1a | ||
|
d68377e927 | ||
|
cc1a3d6645 | ||
|
e9a993fb59 | ||
|
88177d33fd | ||
|
0ec301f1c3 | ||
|
34665571fa | ||
|
5dd1c2191e | ||
|
aacb5e58ad | ||
|
8d2dce6f6b | ||
|
e50a26a493 | ||
|
302887da67 | ||
|
597c504c27 | ||
|
b7af118091 | ||
|
9e392687eb | ||
|
bee923ca6c | ||
|
879df38059 | ||
|
3e776267b5 | ||
|
0bfb466184 | ||
|
71bb3bf6aa | ||
|
99c74c8902 | ||
|
085f5f945d | ||
|
e757e51ddc | ||
|
458ccecb5d | ||
|
7f21a55a32 | ||
|
b112e4a8aa | ||
|
63b24d9577 | ||
|
49a6acffca | ||
|
7b4638aa0b | ||
|
f51ee23837 | ||
|
55f27d5c26 | ||
|
f33c651114 | ||
|
a71ebcc9f3 | ||
|
cd575945b6 | ||
|
6b24dd1c6a | ||
|
03da3eaa81 | ||
|
07b71329a7 | ||
|
53b387f113 | ||
|
d0143dad8f | ||
|
3e870a40f2 | ||
|
707302d9f2 | ||
|
b8ab6a46a8 | ||
|
9ad801a52c | ||
|
c85816c44e | ||
|
70d556782b | ||
|
ca83167cfc | ||
|
54ad080bf2 | ||
|
80e239550b | ||
|
992b313941 | ||
|
97bd521725 | ||
|
3ae2917603 | ||
|
e51c2d1a84 | ||
|
a24b0dc81c | ||
|
6e856cd468 | ||
|
0dde54b95b | ||
|
1fa863e4b2 | ||
|
73e97aab5b | ||
|
8f082a239a | ||
|
7e5026bde2 | ||
|
7ac0ea0885 | ||
|
dc06d3dee5 | ||
|
0b1aadf446 | ||
|
b9a0156a93 | ||
|
f786d1d0f3 | ||
|
b360c90d21 | ||
|
da801b753b | ||
|
8f168a610e | ||
|
d7e6ab58c4 | ||
|
a0e6a7c73b | ||
|
ed4e7c0c87 | ||
|
584f8deae3 | ||
|
e9c3f11d24 | ||
|
57e87a09c0 | ||
|
a2b435916d | ||
|
ad66189463 | ||
|
9a280426da | ||
|
864006a78a | ||
|
e37e46cfc2 | ||
|
376b21c6e7 | ||
|
fcda5d4a7d | ||
|
2e4a2c15be | ||
|
a8ec380c56 | ||
|
afb87a386a | ||
|
2d060fd2d9 | ||
|
c180f4c84e | ||
|
105a89175c | ||
|
959334d2e3 | ||
|
aa2098d115 | ||
|
37ee47c3e6 | ||
|
4fb43bbd90 | ||
|
5fbcb10e6f | ||
|
6f86abf551 | ||
|
744a69f818 | ||
|
1c6cac2054 | ||
|
0c64bd0938 | ||
|
f73a61d2ec | ||
|
bdb77b0c98 | ||
|
58bef3a94b | ||
|
52dfb4a09c | ||
|
c734db59cb | ||
|
3b36e8e285 | ||
|
cdbc77bf97 | ||
|
8ab358ce78 | ||
|
5193ba2062 | ||
|
7e5767f926 | ||
|
e5e6829d20 | ||
|
3976816b79 | ||
|
9732157f0c | ||
|
ed18d6d38d | ||
|
ef336a44a1 | ||
|
de5f503a76 | ||
|
b5b1ed2a55 | ||
|
caea9c99cd | ||
|
1495b94f7a | ||
|
f55bb78307 | ||
|
fa5a71dbf0 | ||
|
80a6479c47 | ||
|
a492de1bea | ||
|
43b414b0df | ||
|
6f31882260 | ||
|
5042808ecf | ||
|
1c9d0521ca | ||
|
ddda94e486 | ||
|
73ed9f56d7 | ||
|
ab5d032634 | ||
|
03b930515b | ||
|
c9f763ea6e | ||
|
422044f375 | ||
|
7584262f47 | ||
|
894f121d0e | ||
|
4b133509d9 | ||
|
e8040a828d | ||
|
78086329be | ||
|
6deeedd886 | ||
|
1a0146f21d | ||
|
7d0a9e0381 | ||
|
0f7b84197f | ||
|
bb06502d24 | ||
|
b7f1f19d8e | ||
|
0707290bbf | ||
|
8934a3961f | ||
|
b142ef5f8b | ||
|
f2dc9dd96e | ||
|
20ad3005b5 | ||
|
eacc69efba | ||
|
3c200ae45a | ||
|
74e7c7cbd0 | ||
|
6bd6d6212c | ||
|
4d2e66cf9c | ||
|
65fe00b7ad | ||
|
02c509390a | ||
|
49b0d1792b | ||
|
9511031490 | ||
|
04ee86e93c | ||
|
82f25f982e | ||
|
1cc32c9cda | ||
|
aedcab846c | ||
|
548ddff7ed | ||
|
7aced9e772 | ||
|
1cc8de0fed | ||
|
f08c7b2294 | ||
|
cc58d36de6 | ||
|
128393da54 | ||
|
e7964a0b89 | ||
|
77dc3746a3 | ||
|
e88f4d689c | ||
|
f1858c74a4 | ||
|
7427dafc36 | ||
|
1cdcabf7cf | ||
|
ea192b3c83 | ||
|
927057df26 | ||
|
19049ca91b | ||
|
3542348c1e | ||
|
df9061b933 | ||
|
98658ebed5 | ||
|
2a93147b1b | ||
|
4145c629c0 | ||
|
551dc0a74c | ||
|
85bbcdad9a | ||
|
336c1c1d37 | ||
|
1570afe493 | ||
|
c7c650fccc | ||
|
9b7fba69f4 | ||
|
0bd355c166 | ||
|
0d8c8d013d | ||
|
7a57f7b8cc | ||
|
940c4731c4 | ||
|
4332f0ca05 | ||
|
e0e6e20e02 | ||
|
0fdaa1438e | ||
|
8a111229f7 | ||
|
e3eb9c195a | ||
|
8dd5ec6fbd | ||
|
e5d60bc56d | ||
|
cba97d576a | ||
|
0670213365 | ||
|
ed5c11b3aa | ||
|
4b8c4704b0 | ||
|
c5374095e6 | ||
|
6abd5fbc3e | ||
|
a6a302f41f | ||
|
4c764829da | ||
|
9900c1ad8a | ||
|
3fabbab417 | ||
|
6c99c1ae13 | ||
|
c2320fceab | ||
|
8b87d86358 | ||
|
5d0e1d62d5 | ||
|
7e613c7a78 | ||
|
2a30436e45 | ||
|
5315a89e7d | ||
|
8c328316ae | ||
|
030a97d098 | ||
|
c40e71dc03 | ||
|
2f633cdfb7 | ||
|
edd6ae588a | ||
|
2a73ba2fbb | ||
|
5eadb86500 | ||
|
c98ab6c6dc | ||
|
5321463892 | ||
|
d668a7694f | ||
|
9bb482e46f | ||
|
c534c928a7 | ||
|
0d0478c4a4 | ||
|
cda681a2f0 | ||
|
72ed4f28b1 | ||
|
6afeaac7a5 | ||
|
75a2b66206 | ||
|
5966053a6d | ||
|
e500b79858 | ||
|
428c20c79f | ||
|
bf84bc17ea | ||
|
30fa9cbee7 | ||
|
eefca613ad | ||
|
f6d943aec7 | ||
|
88462e67b5 | ||
|
eb683dd402 | ||
|
c7d0aea5f4 | ||
|
8f08953100 | ||
|
09b009abd9 | ||
|
c37e481a43 | ||
|
72cf55b8c3 | ||
|
6cb24ae7b6 | ||
|
eeaf0234f0 | ||
|
0200740d70 | ||
|
1268eef3b2 | ||
|
b433048003 | ||
|
daf2c3c155 | ||
|
03d213d764 | ||
|
99f9481b5d | ||
|
10bd14bca6 | ||
|
d26533e370 | ||
|
820abacf49 | ||
|
4466aa39c4 | ||
|
f4e43731ef | ||
|
884ef211f7 | ||
|
896ef5a15f | ||
|
4ff482cd47 | ||
|
6e27360fbc | ||
|
a10a0824eb | ||
|
2fdda2ec1b | ||
|
57f76a2111 | ||
|
287daa9248 | ||
|
7e40a051a4 | ||
|
bd0a7730b6 | ||
|
0d1c87e650 | ||
|
56cd963fd7 | ||
|
d1b26cb267 | ||
|
6a0a03415c | ||
|
41d50adbf3 | ||
|
328b52c4d6 | ||
|
b7d04cf7b9 | ||
|
7ed2cf30a5 | ||
|
78fe5576a9 | ||
|
41179b1282 | ||
|
f82d99b4c2 | ||
|
967f0d07f2 | ||
|
940dbe99e9 | ||
|
98e1c68a70 | ||
|
d8e250e9b0 | ||
|
c8be8510ba | ||
|
c99460ba15 | ||
|
769fcb7f50 | ||
|
60812790e1 | ||
|
a01e44f3b9 | ||
|
0917370bd5 | ||
|
09b612b130 | ||
|
26fdf3fb07 | ||
|
f41f3f6b51 | ||
|
677664e71d | ||
|
a8071f1039 | ||
|
bc08351a0a | ||
|
088b3893c3 | ||
|
d9d6dd9ba6 | ||
|
aca66674d3 | ||
|
597429ab3e | ||
|
715360c1e7 | ||
|
fcabaa7eff | ||
|
b14af989b8 | ||
|
1a919e0c3e | ||
|
9c1a6bed7b | ||
|
c48ec02f42 | ||
|
6f376489a5 | ||
|
0cbf7bef1e | ||
|
0f87e598f6 | ||
|
363b75619f | ||
|
090c801cc6 | ||
|
b711778d4a | ||
|
d52569d66f | ||
|
c676b7a473 | ||
|
71c49bc8cd | ||
|
97a7f747fb | ||
|
cef9e0de0c | ||
|
5637acb799 | ||
|
3d3bdcb966 | ||
|
c9f02ae020 | ||
|
0e7512a225 | ||
|
c330016109 | ||
|
cb13cdec85 | ||
|
c65c580b20 | ||
|
d159ae9342 | ||
|
a540af1ca7 | ||
|
e9c234d89f | ||
|
b472dac6b3 | ||
|
523dac1be3 | ||
|
962a2126b5 | ||
|
5c495ad1b0 | ||
|
f633f34e43 | ||
|
bacf1b9acc | ||
|
4df9da5c48 | ||
|
30bbc1350d | ||
|
2f0f1fd5f5 | ||
|
c28e6ebc4c | ||
|
6479c11e9a | ||
|
4ed0fcdde6 | ||
|
296a8ade63 | ||
|
a84953ccfd | ||
|
8492031fd0 | ||
|
bff7259111 | ||
|
67e1814581 | ||
|
12e92dd59d | ||
|
4ee366edfa | ||
|
61573756f8 | ||
|
fe5fed1182 | ||
|
0c90307677 | ||
|
cdd2a51f1f | ||
|
0dbe3434f0 | ||
|
ef205593c5 | ||
|
8b5ba771ad | ||
|
991f99b239 | ||
|
d6f17517cb | ||
|
15b2f280e3 | ||
|
60b43a1ddf | ||
|
d6b83e3b0a | ||
|
0446f89d22 | ||
|
54bc3e606e | ||
|
fed90cfbe8 | ||
|
e2e41a29eb | ||
|
3b813db42f | ||
|
16b1a4d003 | ||
|
b51ea3ca0c | ||
|
dc76675644 | ||
|
274a238a00 | ||
|
10507f0ade | ||
|
af2a6106da | ||
|
120a7e433f | ||
|
98e34f07df | ||
|
738df79394 | ||
|
98e9b6e70b | ||
|
c9318f8dc2 | ||
|
78f3606e30 | ||
|
97f4d098e1 | ||
|
144a13b096 | ||
|
af0869c66c | ||
|
48e565038a | ||
|
cd6e1d921c | ||
|
fb767f4612 | ||
|
9f35db28e5 | ||
|
ab19543dff | ||
|
3d5f333a3b | ||
|
d06ca605cf | ||
|
334e11e4b9 | ||
|
fd68b4e7a8 | ||
|
e5ea16fad8 | ||
|
6d5a4b5cce | ||
|
ffb6b5a23b | ||
|
e247625025 | ||
|
67c07aa5d3 | ||
|
0de1ce0c2c | ||
|
59f4fba05c | ||
|
c0c764377c | ||
|
c9bc059637 | ||
|
78147d48e4 | ||
|
5e7db52087 | ||
|
ae42413d57 | ||
|
d433bd3d84 | ||
|
58dd6dc227 | ||
|
893df9b277 | ||
|
17dc13760b | ||
|
498bf911eb | ||
|
96de58b3a4 | ||
|
9b61fa24c7 | ||
|
e9be3cf6bc | ||
|
ea44a71914 | ||
|
a00fbbf5ca | ||
|
eb1a04af65 | ||
|
d1fbf77f3f | ||
|
04fbf73a29 | ||
|
db70eb3160 | ||
|
cd974c26b6 | ||
|
53e0f5d61c | ||
|
e864bf4898 | ||
|
81d12b7644 | ||
|
309fcd6270 | ||
|
938112e449 | ||
|
3e012ea69e | ||
|
975c942ea7 | ||
|
2798271da0 | ||
|
dc258cebab | ||
|
4b8c5194c7 | ||
|
3c7c6dacfb | ||
|
e36337a764 | ||
|
a49856b898 | ||
|
8ca2f52041 | ||
|
2f7f243022 | ||
|
7e443770d7 | ||
|
8ec09884b8 | ||
|
88c7e636d6 | ||
|
add3fd479d | ||
|
70410536b9 | ||
|
6f3f9b485c | ||
|
bd9ce3590d |
@@ -12,7 +12,8 @@ export PS4="++"
|
||||
# Restore target/ from the previous CI build on this machine
|
||||
#
|
||||
eval "$(ci/channel-info.sh)"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
||||
eval "$(ci/sbf-tools-info.sh)"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"
|
||||
(
|
||||
set -x
|
||||
MAX_CACHE_SIZE=18 # gigabytes
|
||||
|
23
.mergify.yml
23
.mergify.yml
@@ -65,20 +65,11 @@ pull_request_rules:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.5
|
||||
- name: v1.6 backport
|
||||
- v1.9
|
||||
|
||||
commands_restrictions:
|
||||
# The author of copied PRs is the Mergify user.
|
||||
# Restrict `copy` access to Core Contributors
|
||||
copy:
|
||||
conditions:
|
||||
- label=v1.6
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.6
|
||||
- name: v1.7 backport
|
||||
conditions:
|
||||
- label=v1.7
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.7
|
||||
- author=@core-contributors
|
||||
|
13
.travis.yml
13
.travis.yml
@@ -29,6 +29,7 @@ jobs:
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
name: "macOS release artifacts"
|
||||
os: osx
|
||||
osx_image: xcode12
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
@@ -36,8 +37,12 @@ jobs:
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
- brew install gnu-tar
|
||||
- PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH"
|
||||
- tar --version
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- rustup set profile default
|
||||
- ci/publish-tarball.sh
|
||||
deploy:
|
||||
- provider: s3
|
||||
@@ -60,6 +65,12 @@ jobs:
|
||||
- <<: *release-artifacts
|
||||
name: "Windows release artifacts"
|
||||
os: windows
|
||||
install:
|
||||
- choco install openssl
|
||||
- export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64"
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
# Linux release artifacts are still built by ci/buildkite-secondary.yml
|
||||
#- <<: *release-artifacts
|
||||
# name: "Linux release artifacts"
|
||||
@@ -116,7 +127,7 @@ jobs:
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "node"
|
||||
- "lts/*"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
2717
Cargo.lock
generated
2717
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
20
Cargo.toml
20
Cargo.toml
@@ -1,5 +1,8 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"accountsdb-plugin-interface",
|
||||
"accountsdb-plugin-manager",
|
||||
"accountsdb-plugin-postgres",
|
||||
"accounts-cluster-bench",
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
@@ -9,6 +12,7 @@ members = [
|
||||
"banks-client",
|
||||
"banks-interface",
|
||||
"banks-server",
|
||||
"bloom",
|
||||
"clap-utils",
|
||||
"cli-config",
|
||||
"cli-output",
|
||||
@@ -39,19 +43,19 @@ members = [
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"notifier",
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/secp256k1",
|
||||
"programs/bpf_loader",
|
||||
"programs/budget",
|
||||
"programs/compute-budget",
|
||||
"programs/config",
|
||||
"programs/exchange",
|
||||
"programs/failure",
|
||||
"programs/noop",
|
||||
"programs/ownable",
|
||||
"programs/ed25519",
|
||||
"programs/secp256k1",
|
||||
"programs/stake",
|
||||
"programs/vote",
|
||||
"remote-wallet",
|
||||
"rpc",
|
||||
"runtime",
|
||||
"runtime/store-tool",
|
||||
"sdk",
|
||||
@@ -59,7 +63,6 @@ members = [
|
||||
"sdk/cargo-test-bpf",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
@@ -76,5 +79,10 @@ exclude = [
|
||||
"programs/bpf",
|
||||
]
|
||||
|
||||
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
|
||||
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
|
||||
# specify arch-specific features.
|
||||
resolver = "2"
|
||||
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/uBVzyX3.png" width="250" />
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
@@ -19,7 +19,7 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt
|
||||
```
|
||||
|
||||
Please sure you are always using the latest stable rust version by running:
|
||||
Please make sure you are always using the latest stable rust version by running:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
@@ -51,11 +51,6 @@ $ cd solana
|
||||
$ cargo build
|
||||
```
|
||||
|
||||
## **4. Run a minimal local cluster.**
|
||||
```bash
|
||||
$ ./run.sh
|
||||
```
|
||||
|
||||
# Testing
|
||||
|
||||
**Run the test suite:**
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,11 +19,10 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.17" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
||||
|
@@ -17,8 +17,10 @@ pub mod validator_info;
|
||||
use {
|
||||
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
|
||||
solana_sdk::{
|
||||
account::ReadableAccount, account::WritableAccount, clock::Epoch,
|
||||
fee_calculator::FeeCalculator, pubkey::Pubkey,
|
||||
account::{ReadableAccount, WritableAccount},
|
||||
clock::Epoch,
|
||||
fee_calculator::FeeCalculator,
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
std::{
|
||||
io::{Read, Write},
|
||||
@@ -28,6 +30,7 @@ use {
|
||||
|
||||
pub type StringAmount = String;
|
||||
pub type StringDecimals = String;
|
||||
pub const MAX_BASE58_BYTES: usize = 128;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
@@ -48,7 +51,7 @@ pub enum UiAccountData {
|
||||
Binary(String, UiAccountEncoding),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary, // Legacy. Retained for RPC backwards compatibility
|
||||
@@ -60,6 +63,17 @@ pub enum UiAccountEncoding {
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
fn encode_bs58<T: ReadableAccount>(
|
||||
account: &T,
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> String {
|
||||
if account.data().len() <= MAX_BASE58_BYTES {
|
||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string()
|
||||
} else {
|
||||
"error: data too large for bs58 encoding".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode<T: ReadableAccount>(
|
||||
pubkey: &Pubkey,
|
||||
account: &T,
|
||||
@@ -68,33 +82,34 @@ impl UiAccount {
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
|
||||
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||
),
|
||||
UiAccountEncoding::Base58 => UiAccountData::Binary(
|
||||
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Binary => {
|
||||
let data = Self::encode_bs58(account, data_slice_config);
|
||||
UiAccountData::LegacyBinary(data)
|
||||
}
|
||||
UiAccountEncoding::Base58 => {
|
||||
let data = Self::encode_bs58(account, data_slice_config);
|
||||
UiAccountData::Binary(data, encoding)
|
||||
}
|
||||
UiAccountEncoding::Base64 => UiAccountData::Binary(
|
||||
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Base64Zstd => {
|
||||
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
|
||||
match encoder
|
||||
.write_all(slice_data(&account.data(), data_slice_config))
|
||||
.write_all(slice_data(account.data(), data_slice_config))
|
||||
.and_then(|()| encoder.finish())
|
||||
{
|
||||
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
|
||||
Err(_) => UiAccountData::Binary(
|
||||
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
UiAccountEncoding::Base64,
|
||||
),
|
||||
}
|
||||
}
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) =
|
||||
parse_account_data(pubkey, &account.owner(), &account.data(), additional_data)
|
||||
parse_account_data(pubkey, account.owner(), account.data(), additional_data)
|
||||
{
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
@@ -166,7 +181,7 @@ impl Default for UiFeeCalculator {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDataSliceConfig {
|
||||
pub offset: usize,
|
||||
@@ -189,8 +204,10 @@ fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::account::{Account, AccountSharedData};
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::account::{Account, AccountSharedData},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_slice_data() {
|
||||
|
@@ -1,25 +1,27 @@
|
||||
use crate::{
|
||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id_v2_0},
|
||||
parse_vote::parse_vote,
|
||||
use {
|
||||
crate::{
|
||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id},
|
||||
parse_vote::parse_vote,
|
||||
},
|
||||
inflector::Inflector,
|
||||
serde_json::Value,
|
||||
solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar},
|
||||
std::collections::HashMap,
|
||||
thiserror::Error,
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
|
||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -112,12 +114,14 @@ pub fn parse_account_data(
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
solana_vote_program::vote_state::{VoteState, VoteStateVersions},
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
|
@@ -1,9 +1,11 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
UiAccountData, UiAccountEncoding,
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
UiAccountData, UiAccountEncoding,
|
||||
},
|
||||
bincode::{deserialize, serialized_size},
|
||||
solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey},
|
||||
};
|
||||
use bincode::{deserialize, serialized_size};
|
||||
use solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey};
|
||||
|
||||
pub fn parse_bpf_upgradeable_loader(
|
||||
data: &[u8],
|
||||
@@ -90,9 +92,7 @@ pub struct UiProgramData {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use {super::*, bincode::serialize, solana_sdk::pubkey::Pubkey};
|
||||
|
||||
#[test]
|
||||
fn test_parse_bpf_upgradeable_loader_accounts() {
|
||||
|
@@ -1,15 +1,19 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
},
|
||||
bincode::deserialize,
|
||||
serde_json::Value,
|
||||
solana_config_program::{get_config_data, ConfigKeys},
|
||||
solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
stake::config::{self as stake_config, Config as StakeConfig},
|
||||
},
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::config::Config as StakeConfig;
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||
let parsed_account = if pubkey == &stake_config::id() {
|
||||
get_config_data(data)
|
||||
.ok()
|
||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||
@@ -37,7 +41,7 @@ fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConf
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
|
||||
let config_data: T = deserialize(get_config_data(data).ok()?).ok()?;
|
||||
let keys = keys
|
||||
.iter()
|
||||
.map(|key| UiConfigKey {
|
||||
@@ -87,11 +91,10 @@ pub struct UiConfig<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::validator_info::ValidatorInfo;
|
||||
use serde_json::json;
|
||||
use solana_config_program::create_config_account;
|
||||
use solana_sdk::account::ReadableAccount;
|
||||
use {
|
||||
super::*, crate::validator_info::ValidatorInfo, serde_json::json,
|
||||
solana_config_program::create_config_account, solana_sdk::account::ReadableAccount,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_config() {
|
||||
@@ -101,11 +104,7 @@ mod test {
|
||||
};
|
||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||
assert_eq!(
|
||||
parse_config(
|
||||
&stake_config_account.data(),
|
||||
&solana_stake_program::config::id()
|
||||
)
|
||||
.unwrap(),
|
||||
parse_config(stake_config_account.data(), &stake_config::id()).unwrap(),
|
||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
@@ -125,7 +124,7 @@ mod test {
|
||||
10,
|
||||
);
|
||||
assert_eq!(
|
||||
parse_config(&validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||
parse_config(validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||
ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: vec![
|
||||
UiConfigKey {
|
||||
|
@@ -1,7 +1,9 @@
|
||||
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
use {
|
||||
crate::{parse_account_data::ParseAccountError, UiFeeCalculator},
|
||||
solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
},
|
||||
};
|
||||
|
||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
@@ -42,14 +44,16 @@ pub struct UiNonceData {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[test]
|
||||
|
@@ -1,10 +1,14 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
},
|
||||
bincode::deserialize,
|
||||
solana_sdk::{
|
||||
clock::{Epoch, UnixTimestamp},
|
||||
stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState},
|
||||
},
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
@@ -132,8 +136,7 @@ impl From<Delegation> for UiDelegation {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use {super::*, bincode::serialize};
|
||||
|
||||
#[test]
|
||||
fn test_parse_stake() {
|
||||
|
@@ -1,18 +1,20 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use bv::BitVec;
|
||||
use solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
},
|
||||
bincode::deserialize,
|
||||
bv::BitVec,
|
||||
solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
},
|
||||
};
|
||||
|
||||
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
||||
@@ -212,10 +214,12 @@ pub struct UiStakeHistoryEntry {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
|
||||
sysvar::recent_blockhashes::IterItem,
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{
|
||||
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
|
||||
sysvar::recent_blockhashes::IterItem,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
|
@@ -1,35 +1,37 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, StringDecimals,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v2_0::{
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, StringDecimals,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
spl_token::{
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
},
|
||||
std::str::FromStr,
|
||||
};
|
||||
use std::str::FromStr;
|
||||
|
||||
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
|
||||
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id_v2_0() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token_v2_0::id().to_bytes())
|
||||
pub fn spl_token_id() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::id().to_bytes())
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v2_0_native_mint() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token_v2_0::native_mint::id().to_bytes())
|
||||
pub fn spl_token_native_mint() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
|
||||
}
|
||||
|
||||
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::new_from_array(pubkey.to_bytes())
|
||||
}
|
||||
|
||||
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
|
||||
pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
Pubkey::new_from_array(pubkey.to_bytes())
|
||||
}
|
||||
|
||||
|
@@ -1,9 +1,11 @@
|
||||
use crate::{parse_account_data::ParseAccountError, StringAmount};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
use {
|
||||
crate::{parse_account_data::ParseAccountError, StringAmount},
|
||||
solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState},
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
@@ -121,8 +123,7 @@ struct UiEpochCredits {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_vote_program::vote_state::VoteStateVersions;
|
||||
use {super::*, solana_vote_program::vote_state::VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_vote() {
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -1,16 +1,19 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
rayon::prelude::*,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
},
|
||||
solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey},
|
||||
std::{env, fs, path::PathBuf},
|
||||
};
|
||||
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
|
||||
use std::{env, fs, path::PathBuf};
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
@@ -64,6 +67,8 @@ fn main() {
|
||||
&ClusterType::Testnet,
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
AccountShrinkThreshold::default(),
|
||||
None,
|
||||
);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
@@ -119,6 +124,8 @@ fn main() {
|
||||
solana_sdk::clock::Slot::default(),
|
||||
&ancestors,
|
||||
None,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
time_store.stop();
|
||||
if results != results_store {
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,23 +13,24 @@ clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-client = { path = "../client", version = "=1.8.17" }
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,35 +1,38 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg};
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_account_decoder::parse_token::spl_token_v2_0_pubkey;
|
||||
use solana_clap_utils::input_parsers::pubkey_of;
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT};
|
||||
use solana_gossip::gossip_service::discover;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::inline_spl_token_v2_0;
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
rpc_port::DEFAULT_RPC_PORT,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
system_instruction, system_program,
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg},
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_account_decoder::parse_token::spl_token_pubkey,
|
||||
solana_clap_utils::input_parsers::pubkey_of,
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT},
|
||||
solana_gossip::gossip_service::discover,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::inline_spl_token,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
rpc_port::DEFAULT_RPC_PORT,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
system_instruction, system_program,
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_transaction_status::parse_token::spl_token_instruction,
|
||||
std::{
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// Create and close messages both require 2 signatures; if transaction construction changes, update
|
||||
@@ -55,7 +58,7 @@ pub fn airdrop_lamports(
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
@@ -273,7 +276,7 @@ fn make_create_message(
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let program_id = if mint.is_some() {
|
||||
inline_spl_token_v2_0::id()
|
||||
inline_spl_token::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
@@ -290,12 +293,12 @@ fn make_create_message(
|
||||
&program_id,
|
||||
)];
|
||||
if let Some(mint_address) = mint {
|
||||
instructions.push(spl_token_v2_0_instruction(
|
||||
spl_token_v2_0::instruction::initialize_account(
|
||||
&spl_token_v2_0::id(),
|
||||
&spl_token_v2_0_pubkey(&to_pubkey),
|
||||
&spl_token_v2_0_pubkey(&mint_address),
|
||||
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
|
||||
instructions.push(spl_token_instruction(
|
||||
spl_token::instruction::initialize_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&to_pubkey),
|
||||
&spl_token_pubkey(&mint_address),
|
||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
||||
)
|
||||
.unwrap(),
|
||||
));
|
||||
@@ -321,7 +324,7 @@ fn make_close_message(
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let program_id = if spl_token {
|
||||
inline_spl_token_v2_0::id()
|
||||
inline_spl_token::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
@@ -329,12 +332,12 @@ fn make_close_message(
|
||||
let address =
|
||||
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
||||
if spl_token {
|
||||
spl_token_v2_0_instruction(
|
||||
spl_token_v2_0::instruction::close_account(
|
||||
&spl_token_v2_0::id(),
|
||||
&spl_token_v2_0_pubkey(&address),
|
||||
&spl_token_v2_0_pubkey(&keypair.pubkey()),
|
||||
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
|
||||
spl_token_instruction(
|
||||
spl_token::instruction::close_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&address),
|
||||
&spl_token_pubkey(&keypair.pubkey()),
|
||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
||||
&[],
|
||||
)
|
||||
.unwrap(),
|
||||
@@ -363,7 +366,7 @@ fn run_accounts_bench(
|
||||
iterations: usize,
|
||||
maybe_space: Option<u64>,
|
||||
batch_size: usize,
|
||||
close_nth: u64,
|
||||
close_nth_batch: u64,
|
||||
maybe_lamports: Option<u64>,
|
||||
num_instructions: usize,
|
||||
mint: Option<Pubkey>,
|
||||
@@ -431,7 +434,7 @@ fn run_accounts_bench(
|
||||
if !airdrop_lamports(
|
||||
&client,
|
||||
&faucet_addr,
|
||||
&payer_keypairs[i],
|
||||
payer_keypairs[i],
|
||||
lamports * 100_000,
|
||||
) {
|
||||
warn!("failed airdrop, exiting");
|
||||
@@ -441,6 +444,7 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
|
||||
// Create accounts
|
||||
let sigs_len = executor.num_outstanding();
|
||||
if sigs_len < batch_size {
|
||||
let num_to_create = batch_size - sigs_len;
|
||||
@@ -475,21 +479,25 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
|
||||
if close_nth > 0 {
|
||||
let expected_closed = total_accounts_created as u64 / close_nth;
|
||||
if expected_closed > total_accounts_closed {
|
||||
let txs: Vec<_> = (0..expected_closed - total_accounts_closed)
|
||||
if close_nth_batch > 0 {
|
||||
let num_batches_to_close =
|
||||
total_accounts_created as u64 / (close_nth_batch * batch_size as u64);
|
||||
let expected_closed = num_batches_to_close * batch_size as u64;
|
||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
||||
// Close every account we've created with seed between max_closed_seed..expected_closed
|
||||
if max_closed_seed < expected_closed {
|
||||
let txs: Vec<_> = (0..expected_closed - max_closed_seed)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let message = make_close_message(
|
||||
&payer_keypairs[0],
|
||||
payer_keypairs[0],
|
||||
&base_keypair,
|
||||
seed_tracker.max_closed.clone(),
|
||||
1,
|
||||
min_balance,
|
||||
mint.is_some(),
|
||||
);
|
||||
let signers: Vec<&Keypair> = vec![&payer_keypairs[0], &base_keypair];
|
||||
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
|
||||
Transaction::new(&signers, message, recent_blockhash.0)
|
||||
})
|
||||
.collect();
|
||||
@@ -572,14 +580,14 @@ fn main() {
|
||||
.help("Number of transactions to send per batch"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("close_nth")
|
||||
Arg::with_name("close_nth_batch")
|
||||
.long("close-frequency")
|
||||
.takes_value(true)
|
||||
.value_name("BYTES")
|
||||
.help(
|
||||
"Send close transactions after this many accounts created. \
|
||||
Note: a `close-frequency` value near or below `batch-size` \
|
||||
may result in transaction-simulation errors, as the close \
|
||||
"Every `n` batches, create a batch of close transactions for
|
||||
the earliest remaining batch of accounts created.
|
||||
Note: Should be > 1 to avoid situations where the close \
|
||||
transactions will be submitted before the corresponding \
|
||||
create transactions have been confirmed",
|
||||
),
|
||||
@@ -632,7 +640,7 @@ fn main() {
|
||||
let space = value_t!(matches, "space", u64).ok();
|
||||
let lamports = value_t!(matches, "lamports", u64).ok();
|
||||
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
|
||||
let close_nth = value_t!(matches, "close_nth", u64).unwrap_or(0);
|
||||
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
|
||||
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
|
||||
if num_instructions == 0 || num_instructions > 500 {
|
||||
@@ -665,6 +673,7 @@ fn main() {
|
||||
Some(&entrypoint_addr), // find_node_by_gossip_addr
|
||||
None, // my_gossip_addr
|
||||
0, // my_shred_version
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
|
||||
@@ -685,7 +694,7 @@ fn main() {
|
||||
iterations,
|
||||
space,
|
||||
batch_size,
|
||||
close_nth,
|
||||
close_nth_batch,
|
||||
lamports,
|
||||
num_instructions,
|
||||
mint,
|
||||
@@ -694,18 +703,20 @@ fn main() {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
use {
|
||||
super::*,
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_sdk::poh_config::PohConfig,
|
||||
};
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
|
||||
#[test]
|
||||
fn test_accounts_cluster_bench() {
|
||||
solana_logger::setup();
|
||||
let validator_config = ValidatorConfig::default();
|
||||
let validator_config = ValidatorConfig::default_for_test();
|
||||
let num_nodes = 1;
|
||||
let mut config = ClusterConfig {
|
||||
cluster_lamports: 10_000_000,
|
||||
@@ -716,11 +727,11 @@ pub mod test {
|
||||
};
|
||||
|
||||
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
|
||||
let cluster = LocalCluster::new(&mut config);
|
||||
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
|
||||
let iterations = 10;
|
||||
let maybe_space = None;
|
||||
let batch_size = 100;
|
||||
let close_nth = 100;
|
||||
let close_nth_batch = 100;
|
||||
let maybe_lamports = None;
|
||||
let num_instructions = 2;
|
||||
let mut start = Measure::start("total accounts run");
|
||||
@@ -731,7 +742,7 @@ pub mod test {
|
||||
iterations,
|
||||
maybe_space,
|
||||
batch_size,
|
||||
close_nth,
|
||||
close_nth_batch,
|
||||
maybe_lamports,
|
||||
num_instructions,
|
||||
None,
|
||||
|
17
accountsdb-plugin-interface/Cargo.toml
Normal file
17
accountsdb-plugin-interface/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-interface"
|
||||
description = "The Solana AccountsDb plugin interface."
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
thiserror = "1.0.29"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
20
accountsdb-plugin-interface/README.md
Normal file
20
accountsdb-plugin-interface/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Solana AccountsDb Plugin Interface
|
||||
|
||||
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
|
||||
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
|
||||
|
||||
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
|
||||
instantiates the implementation of the interface.
|
||||
|
||||
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
|
||||
external PostgreSQL databases.
|
||||
|
||||
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
|
||||
|
||||
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)
|
143
accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs
Normal file
143
accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
/// The interface for AccountsDb plugins. A plugin must implement
|
||||
/// the AccountsDbPlugin trait to work with the runtime.
|
||||
/// In addition, the dynamic library must export a "C" function _create_plugin which
|
||||
/// creates the implementation of the plugin.
|
||||
use {
|
||||
std::{any::Any, error, io},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
impl Eq for ReplicaAccountInfo<'_> {}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
/// Information about an account being updated
|
||||
pub struct ReplicaAccountInfo<'a> {
|
||||
/// The Pubkey for the account
|
||||
pub pubkey: &'a [u8],
|
||||
|
||||
/// The lamports for the account
|
||||
pub lamports: u64,
|
||||
|
||||
/// The Pubkey of the owner program account
|
||||
pub owner: &'a [u8],
|
||||
|
||||
/// This account's data contains a loaded program (and is now read-only)
|
||||
pub executable: bool,
|
||||
|
||||
/// The epoch at which this account will next owe rent
|
||||
pub rent_epoch: u64,
|
||||
|
||||
/// The data held in this account.
|
||||
pub data: &'a [u8],
|
||||
|
||||
/// A global monotonically increasing atomic number, which can be used
|
||||
/// to tell the order of the account update. For example, when an
|
||||
/// account is updated in the same slot multiple times, the update
|
||||
/// with higher write_version should supersede the one with lower
|
||||
/// write_version.
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
/// A wrapper to future-proof ReplicaAccountInfo handling.
|
||||
/// If there were a change to the structure of ReplicaAccountInfo,
|
||||
/// there would be new enum entry for the newer version, forcing
|
||||
/// plugin implementations to handle the change.
|
||||
pub enum ReplicaAccountInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaAccountInfo<'a>),
|
||||
}
|
||||
|
||||
/// Errors returned by plugin calls
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginError {
|
||||
/// Error opening the configuration file; for example, when the file
|
||||
/// is not found or when the validator process has no permission to read it.
|
||||
#[error("Error opening config file. Error detail: ({0}).")]
|
||||
ConfigFileOpenError(#[from] io::Error),
|
||||
|
||||
/// Error in reading the content of the config file or the content
|
||||
/// is not in the expected format.
|
||||
#[error("Error reading config file. Error message: ({msg})")]
|
||||
ConfigFileReadError { msg: String },
|
||||
|
||||
/// Error when updating the account.
|
||||
#[error("Error updating account. Error message: ({msg})")]
|
||||
AccountsUpdateError { msg: String },
|
||||
|
||||
/// Error when updating the slot status
|
||||
#[error("Error updating slot status. Error message: ({msg})")]
|
||||
SlotStatusUpdateError { msg: String },
|
||||
|
||||
/// Any custom error defined by the plugin.
|
||||
#[error("Plugin-defined custom error. Error message: ({0})")]
|
||||
Custom(Box<dyn error::Error + Send + Sync>),
|
||||
}
|
||||
|
||||
/// The current status of a slot
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum SlotStatus {
|
||||
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
|
||||
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
|
||||
/// the fork the validator believes is most likely to finalize.
|
||||
Processed,
|
||||
|
||||
/// The highest slot having reached max vote lockout.
|
||||
Rooted,
|
||||
|
||||
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
|
||||
Confirmed,
|
||||
}
|
||||
|
||||
impl SlotStatus {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SlotStatus::Confirmed => "confirmed",
|
||||
SlotStatus::Processed => "processed",
|
||||
SlotStatus::Rooted => "rooted",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
|
||||
|
||||
/// Defines an AccountsDb plugin, to stream data from the runtime.
|
||||
/// AccountsDb plugins must describe desired behavior for load and unload,
|
||||
/// as well as how they will handle streamed data.
|
||||
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> &'static str;
|
||||
|
||||
/// The callback called when a plugin is loaded by the system,
|
||||
/// used for doing whatever initialization is required by the plugin.
|
||||
/// The _config_file contains the name of the
|
||||
/// of the config file. The config must be in JSON format and
|
||||
/// include a field "libpath" indicating the full path
|
||||
/// name of the shared library implementing this interface.
|
||||
fn on_load(&mut self, _config_file: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The callback called right before a plugin is unloaded by the system
|
||||
/// Used for doing cleanup before unload.
|
||||
fn on_unload(&mut self) {}
|
||||
|
||||
/// Called when an account is updated at a slot.
|
||||
/// When `is_startup` is true, it indicates the account is loaded from
|
||||
/// snapshots when the validator starts up. When `is_startup` is false,
|
||||
/// the account is updated during transaction processing.
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Called when all accounts are notified of during startup.
|
||||
fn notify_end_of_startup(&mut self) -> Result<()>;
|
||||
|
||||
/// Called when a slot status is updated
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()>;
|
||||
}
|
1
accountsdb-plugin-interface/src/lib.rs
Normal file
1
accountsdb-plugin-interface/src/lib.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod accountsdb_plugin_interface;
|
30
accountsdb-plugin-manager/Cargo.toml
Normal file
30
accountsdb-plugin-manager/Cargo.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-manager"
|
||||
description = "The Solana AccountsDb plugin manager."
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
crossbeam-channel = "0.4"
|
||||
libloading = "0.7.0"
|
||||
log = "0.4.11"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.67"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
thiserror = "1.0.21"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
227
accountsdb-plugin-manager/src/accounts_update_notifier.rs
Normal file
227
accountsdb-plugin-manager/src/accounts_update_notifier.rs
Normal file
@@ -0,0 +1,227 @@
|
||||
/// Module responsible for notifying plugins of account updates
|
||||
use {
|
||||
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
ReplicaAccountInfo, ReplicaAccountInfoVersions, SlotStatus,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_runtime::{
|
||||
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
|
||||
append_vec::{StoredAccountMeta, StoredMeta},
|
||||
},
|
||||
solana_sdk::{
|
||||
account::{AccountSharedData, ReadableAccount},
|
||||
clock::Slot,
|
||||
},
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsUpdateNotifierImpl {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
}
|
||||
|
||||
impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
|
||||
fn notify_account_update(&self, slot: Slot, meta: &StoredMeta, account: &AccountSharedData) {
|
||||
if let Some(account_info) = self.accountinfo_from_shared_account_data(meta, account) {
|
||||
self.notify_plugins_of_account_update(account_info, slot, false);
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-notify-account-restore-all");
|
||||
let mut measure_copy = Measure::start("accountsdb-plugin-copy-stored-account-info");
|
||||
|
||||
let account = self.accountinfo_from_stored_account_meta(account);
|
||||
measure_copy.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-copy-stored-account-info-us",
|
||||
measure_copy.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Some(account_info) = account {
|
||||
self.notify_plugins_of_account_update(account_info, slot, true);
|
||||
}
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify-account-restore-all-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
|
||||
fn notify_end_of_restore_from_snapshot(&self) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-end-of-restore-from-snapshot");
|
||||
match plugin.notify_end_of_startup() {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to notify the end of restore from snapshot, error: {} to plugin {}",
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully notified the end of restore from snapshot to plugin {}",
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-end-of-restore-from-snapshot",
|
||||
measure.as_us() as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_slot_confirmed(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Confirmed);
|
||||
}
|
||||
|
||||
fn notify_slot_processed(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Processed);
|
||||
}
|
||||
|
||||
fn notify_slot_rooted(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Rooted);
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsUpdateNotifierImpl {
|
||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
||||
AccountsUpdateNotifierImpl { plugin_manager }
|
||||
}
|
||||
|
||||
fn accountinfo_from_shared_account_data<'a>(
|
||||
&self,
|
||||
meta: &'a StoredMeta,
|
||||
account: &'a AccountSharedData,
|
||||
) -> Option<ReplicaAccountInfo<'a>> {
|
||||
Some(ReplicaAccountInfo {
|
||||
pubkey: meta.pubkey.as_ref(),
|
||||
lamports: account.lamports(),
|
||||
owner: account.owner().as_ref(),
|
||||
executable: account.executable(),
|
||||
rent_epoch: account.rent_epoch(),
|
||||
data: account.data(),
|
||||
write_version: meta.write_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn accountinfo_from_stored_account_meta<'a>(
|
||||
&self,
|
||||
stored_account_meta: &'a StoredAccountMeta,
|
||||
) -> Option<ReplicaAccountInfo<'a>> {
|
||||
Some(ReplicaAccountInfo {
|
||||
pubkey: stored_account_meta.meta.pubkey.as_ref(),
|
||||
lamports: stored_account_meta.account_meta.lamports,
|
||||
owner: stored_account_meta.account_meta.owner.as_ref(),
|
||||
executable: stored_account_meta.account_meta.executable,
|
||||
rent_epoch: stored_account_meta.account_meta.rent_epoch,
|
||||
data: stored_account_meta.data,
|
||||
write_version: stored_account_meta.meta.write_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn notify_plugins_of_account_update(
|
||||
&self,
|
||||
account: ReplicaAccountInfo,
|
||||
slot: Slot,
|
||||
is_startup: bool,
|
||||
) {
|
||||
let mut measure2 = Measure::start("accountsdb-plugin-notify_plugins_of_account_update");
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-account");
|
||||
match plugin.update_account(
|
||||
ReplicaAccountInfoVersions::V0_0_1(&account),
|
||||
slot,
|
||||
is_startup,
|
||||
) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update account {} at slot {}, error: {} to plugin {}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated account {} at slot {} to plugin {}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-account-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
measure2.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify_plugins_of_account_update-us",
|
||||
measure2.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
|
||||
pub fn notify_slot_status(&self, slot: Slot, parent: Option<Slot>, slot_status: SlotStatus) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-slot");
|
||||
match plugin.update_slot_status(slot, parent, slot_status.clone()) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update slot status at slot {}, error: {} to plugin {}",
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated slot status at slot {} to plugin {}",
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-slot-us",
|
||||
measure.as_us() as usize,
|
||||
1000,
|
||||
1000
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
55
accountsdb-plugin-manager/src/accountsdb_plugin_manager.rs
Normal file
55
accountsdb-plugin-manager/src/accountsdb_plugin_manager.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
/// Managing the AccountsDb plugins
|
||||
use {
|
||||
libloading::{Library, Symbol},
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::AccountsDbPlugin,
|
||||
std::error::Error,
|
||||
};
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct AccountsDbPluginManager {
|
||||
pub plugins: Vec<Box<dyn AccountsDbPlugin>>,
|
||||
libs: Vec<Library>,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginManager {
|
||||
pub fn new() -> Self {
|
||||
AccountsDbPluginManager {
|
||||
plugins: Vec::default(),
|
||||
libs: Vec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function loads the dynamically linked library specified in the path. The library
|
||||
/// must do necessary initializations.
|
||||
pub unsafe fn load_plugin(
|
||||
&mut self,
|
||||
libpath: &str,
|
||||
config_file: &str,
|
||||
) -> Result<(), Box<dyn Error>> {
|
||||
type PluginConstructor = unsafe fn() -> *mut dyn AccountsDbPlugin;
|
||||
let lib = Library::new(libpath)?;
|
||||
let constructor: Symbol<PluginConstructor> = lib.get(b"_create_plugin")?;
|
||||
let plugin_raw = constructor();
|
||||
let mut plugin = Box::from_raw(plugin_raw);
|
||||
plugin.on_load(config_file)?;
|
||||
self.plugins.push(plugin);
|
||||
self.libs.push(lib);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unload all plugins and loaded plugin libraries, making sure to fire
|
||||
/// their `on_plugin_unload()` methods so they can do any necessary cleanup.
|
||||
pub fn unload(&mut self) {
|
||||
for mut plugin in self.plugins.drain(..) {
|
||||
info!("Unloading plugin for {:?}", plugin.name());
|
||||
plugin.on_unload();
|
||||
}
|
||||
|
||||
for lib in self.libs.drain(..) {
|
||||
drop(lib);
|
||||
}
|
||||
}
|
||||
}
|
157
accountsdb-plugin-manager/src/accountsdb_plugin_service.rs
Normal file
157
accountsdb-plugin-manager/src/accountsdb_plugin_service.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
use {
|
||||
crate::{
|
||||
accounts_update_notifier::AccountsUpdateNotifierImpl,
|
||||
accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
slot_status_observer::SlotStatusObserver,
|
||||
},
|
||||
crossbeam_channel::Receiver,
|
||||
log::*,
|
||||
serde_json,
|
||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||
std::{
|
||||
fs::File,
|
||||
io::Read,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, RwLock},
|
||||
thread,
|
||||
},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsdbPluginServiceError {
|
||||
#[error("Cannot open the the plugin config file")]
|
||||
CannotOpenConfigFile(String),
|
||||
|
||||
#[error("Cannot read the the plugin config file")]
|
||||
CannotReadConfigFile(String),
|
||||
|
||||
#[error("The config file is not in a valid Json format")]
|
||||
InvalidConfigFileFormat(String),
|
||||
|
||||
#[error("Plugin library path is not specified in the config file")]
|
||||
LibPathNotSet,
|
||||
|
||||
#[error("Invalid plugin path")]
|
||||
InvalidPluginPath,
|
||||
|
||||
#[error("Cannot load plugin shared library")]
|
||||
PluginLoadError(String),
|
||||
}
|
||||
|
||||
/// The service managing the AccountsDb plugin workflow.
|
||||
pub struct AccountsDbPluginService {
|
||||
slot_status_observer: SlotStatusObserver,
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginService {
|
||||
/// Creates and returns the AccountsDbPluginService.
|
||||
/// # Arguments
|
||||
/// * `confirmed_bank_receiver` - The receiver for confirmed bank notification
|
||||
/// * `accountsdb_plugin_config_file` - The config file path for the plugin. The
|
||||
/// config file controls the plugin responsible
|
||||
/// for transporting the data to external data stores. It is defined in JSON format.
|
||||
/// The `libpath` field should be pointed to the full path of the dynamic shared library
|
||||
/// (.so file) to be loaded. The shared library must implement the `AccountsDbPlugin`
|
||||
/// trait. And the shared library shall export a `C` function `_create_plugin` which
|
||||
/// shall create the implementation of `AccountsDbPlugin` and returns to the caller.
|
||||
/// The rest of the JSON fields' definition is up to to the concrete plugin implementation
|
||||
/// It is usually used to configure the connection information for the external data store.
|
||||
|
||||
pub fn new(
|
||||
confirmed_bank_receiver: Receiver<BankNotification>,
|
||||
accountsdb_plugin_config_files: &[PathBuf],
|
||||
) -> Result<Self, AccountsdbPluginServiceError> {
|
||||
info!(
|
||||
"Starting AccountsDbPluginService from config files: {:?}",
|
||||
accountsdb_plugin_config_files
|
||||
);
|
||||
let mut plugin_manager = AccountsDbPluginManager::new();
|
||||
|
||||
for accountsdb_plugin_config_file in accountsdb_plugin_config_files {
|
||||
Self::load_plugin(&mut plugin_manager, accountsdb_plugin_config_file)?;
|
||||
}
|
||||
|
||||
let plugin_manager = Arc::new(RwLock::new(plugin_manager));
|
||||
let accounts_update_notifier = Arc::new(RwLock::new(AccountsUpdateNotifierImpl::new(
|
||||
plugin_manager.clone(),
|
||||
)));
|
||||
let slot_status_observer =
|
||||
SlotStatusObserver::new(confirmed_bank_receiver, accounts_update_notifier.clone());
|
||||
|
||||
info!("Started AccountsDbPluginService");
|
||||
Ok(AccountsDbPluginService {
|
||||
slot_status_observer,
|
||||
plugin_manager,
|
||||
accounts_update_notifier,
|
||||
})
|
||||
}
|
||||
|
||||
fn load_plugin(
|
||||
plugin_manager: &mut AccountsDbPluginManager,
|
||||
accountsdb_plugin_config_file: &Path,
|
||||
) -> Result<(), AccountsdbPluginServiceError> {
|
||||
let mut file = match File::open(accountsdb_plugin_config_file) {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
|
||||
"Failed to open the plugin config file {:?}, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut contents = String::new();
|
||||
if let Err(err) = file.read_to_string(&mut contents) {
|
||||
return Err(AccountsdbPluginServiceError::CannotReadConfigFile(format!(
|
||||
"Failed to read the plugin config file {:?}, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
)));
|
||||
}
|
||||
|
||||
let result: serde_json::Value = match serde_json::from_str(&contents) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
|
||||
format!(
|
||||
"The config file {:?} is not in a valid Json format, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let libpath = result["libpath"]
|
||||
.as_str()
|
||||
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
|
||||
let config_file = accountsdb_plugin_config_file
|
||||
.as_os_str()
|
||||
.to_str()
|
||||
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
|
||||
|
||||
unsafe {
|
||||
let result = plugin_manager.load_plugin(libpath, config_file);
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to load the plugin library: {:?}, error: {:?}",
|
||||
libpath, err
|
||||
);
|
||||
return Err(AccountsdbPluginServiceError::PluginLoadError(msg));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_accounts_update_notifier(&self) -> AccountsUpdateNotifier {
|
||||
self.accounts_update_notifier.clone()
|
||||
}
|
||||
|
||||
pub fn join(mut self) -> thread::Result<()> {
|
||||
self.slot_status_observer.join()?;
|
||||
self.plugin_manager.write().unwrap().unload();
|
||||
Ok(())
|
||||
}
|
||||
}
|
4
accountsdb-plugin-manager/src/lib.rs
Normal file
4
accountsdb-plugin-manager/src/lib.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod accounts_update_notifier;
|
||||
pub mod accountsdb_plugin_manager;
|
||||
pub mod accountsdb_plugin_service;
|
||||
pub mod slot_status_observer;
|
80
accountsdb-plugin-manager/src/slot_status_observer.rs
Normal file
80
accountsdb-plugin-manager/src/slot_status_observer.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use {
|
||||
crossbeam_channel::Receiver,
|
||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SlotStatusObserver {
|
||||
bank_notification_receiver_service: Option<JoinHandle<()>>,
|
||||
exit_updated_slot_server: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl SlotStatusObserver {
|
||||
pub fn new(
|
||||
bank_notification_receiver: Receiver<BankNotification>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
) -> Self {
|
||||
let exit_updated_slot_server = Arc::new(AtomicBool::new(false));
|
||||
|
||||
Self {
|
||||
bank_notification_receiver_service: Some(Self::run_bank_notification_receiver(
|
||||
bank_notification_receiver,
|
||||
exit_updated_slot_server.clone(),
|
||||
accounts_update_notifier,
|
||||
)),
|
||||
exit_updated_slot_server,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
self.exit_updated_slot_server.store(true, Ordering::Relaxed);
|
||||
self.bank_notification_receiver_service
|
||||
.take()
|
||||
.map(JoinHandle::join)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn run_bank_notification_receiver(
|
||||
bank_notification_receiver: Receiver<BankNotification>,
|
||||
exit: Arc<AtomicBool>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("bank_notification_receiver".to_string())
|
||||
.spawn(move || {
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
if let Ok(slot) = bank_notification_receiver.recv() {
|
||||
match slot {
|
||||
BankNotification::OptimisticallyConfirmed(slot) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_confirmed(slot, None);
|
||||
}
|
||||
BankNotification::Frozen(bank) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_processed(bank.slot(), Some(bank.parent_slot()));
|
||||
}
|
||||
BankNotification::Root(bank) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_rooted(bank.slot(), Some(bank.parent_slot()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
}
|
33
accountsdb-plugin-postgres/Cargo.toml
Normal file
33
accountsdb-plugin-postgres/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-postgres"
|
||||
description = "The Solana AccountsDb plugin for PostgreSQL database."
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
postgres = { version = "0.19.1", features = ["with-chrono-0_4"] }
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.67"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
thiserror = "1.0.21"
|
||||
tokio-postgres = "0.7.3"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
5
accountsdb-plugin-postgres/README.md
Normal file
5
accountsdb-plugin-postgres/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
This is an example implementing the AccountsDb plugin for PostgreSQL database.
|
||||
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
|
||||
|
||||
To create the schema objects for the database, please use `scripts/create_schema.sql`.
|
||||
`scripts/drop_schema.sql` can be used to tear down the schema objects.
|
54
accountsdb-plugin-postgres/scripts/create_schema.sql
Normal file
54
accountsdb-plugin-postgres/scripts/create_schema.sql
Normal file
@@ -0,0 +1,54 @@
|
||||
/**
|
||||
* This plugin implementation for PostgreSQL requires the following tables
|
||||
*/
|
||||
-- The table storing accounts
|
||||
|
||||
|
||||
CREATE TABLE account (
|
||||
pubkey BYTEA PRIMARY KEY,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
-- The table storing slot information
|
||||
CREATE TABLE slot (
|
||||
slot BIGINT PRIMARY KEY,
|
||||
parent BIGINT,
|
||||
status varchar(16) NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
/**
|
||||
* The following is for keeping historical data for accounts and is not required for plugin to work.
|
||||
*/
|
||||
-- The table storing historical data for accounts
|
||||
CREATE TABLE account_audit (
|
||||
pubkey BYTEA,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
|
||||
BEGIN
|
||||
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
|
||||
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
|
||||
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
|
||||
RETURN NEW;
|
||||
END;
|
||||
|
||||
$audit_account_update$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
|
||||
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();
|
9
accountsdb-plugin-postgres/scripts/drop_schema.sql
Normal file
9
accountsdb-plugin-postgres/scripts/drop_schema.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
/**
|
||||
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
|
||||
*/
|
||||
|
||||
DROP TRIGGER account_update_trigger ON account;
|
||||
DROP FUNCTION audit_account_update;
|
||||
DROP TABLE account_audit;
|
||||
DROP TABLE account;
|
||||
DROP TABLE slot;
|
802
accountsdb-plugin-postgres/scripts/postgresql.conf
Normal file
802
accountsdb-plugin-postgres/scripts/postgresql.conf
Normal file
@@ -0,0 +1,802 @@
|
||||
# This a reference configuration file for the PostgreSQL database version 14.
|
||||
|
||||
# -----------------------------
|
||||
# PostgreSQL configuration file
|
||||
# -----------------------------
|
||||
#
|
||||
# This file consists of lines of the form:
|
||||
#
|
||||
# name = value
|
||||
#
|
||||
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
||||
# "#" anywhere on a line. The complete list of parameter names and allowed
|
||||
# values can be found in the PostgreSQL documentation.
|
||||
#
|
||||
# The commented-out settings shown in this file represent the default values.
|
||||
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
||||
# you need to reload the server.
|
||||
#
|
||||
# This file is read on server startup and when the server receives a SIGHUP
|
||||
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||
# require a server shutdown and restart to take effect.
|
||||
#
|
||||
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||
# with the "SET" SQL command.
|
||||
#
|
||||
# Memory units: B = bytes Time units: us = microseconds
|
||||
# kB = kilobytes ms = milliseconds
|
||||
# MB = megabytes s = seconds
|
||||
# GB = gigabytes min = minutes
|
||||
# TB = terabytes h = hours
|
||||
# d = days
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# FILE LOCATIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# The default values of these variables are driven from the -D command-line
|
||||
# option or PGDATA environment variable, represented here as ConfigDir.
|
||||
|
||||
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
|
||||
# (change requires restart)
|
||||
|
||||
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
|
||||
# (change requires restart)
|
||||
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
|
||||
# (change requires restart)
|
||||
|
||||
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONNECTIONS AND AUTHENTICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Connection Settings -
|
||||
|
||||
#listen_addresses = 'localhost' # what IP address(es) to listen on;
|
||||
# comma-separated list of addresses;
|
||||
# defaults to 'localhost'; use '*' for all
|
||||
# (change requires restart)
|
||||
listen_addresses = '*'
|
||||
port = 5433 # (change requires restart)
|
||||
max_connections = 200 # (change requires restart)
|
||||
#superuser_reserved_connections = 3 # (change requires restart)
|
||||
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
|
||||
# (change requires restart)
|
||||
#unix_socket_group = '' # (change requires restart)
|
||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||
# (change requires restart)
|
||||
#bonjour = off # advertise server via Bonjour
|
||||
# (change requires restart)
|
||||
#bonjour_name = '' # defaults to the computer name
|
||||
# (change requires restart)
|
||||
|
||||
# - TCP settings -
|
||||
# see "man tcp" for details
|
||||
|
||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||
# 0 selects the system default
|
||||
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
|
||||
# 0 selects the system default
|
||||
|
||||
#client_connection_check_interval = 0 # time between checks for client
|
||||
# disconnection while running queries;
|
||||
# 0 for never
|
||||
|
||||
# - Authentication -
|
||||
|
||||
#authentication_timeout = 1min # 1s-600s
|
||||
#password_encryption = scram-sha-256 # scram-sha-256 or md5
|
||||
#db_user_namespace = off
|
||||
|
||||
# GSSAPI using Kerberos
|
||||
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
|
||||
#krb_caseins_users = off
|
||||
|
||||
# - SSL -
|
||||
|
||||
ssl = on
|
||||
#ssl_ca_file = ''
|
||||
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
|
||||
#ssl_crl_file = ''
|
||||
#ssl_crl_dir = ''
|
||||
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
|
||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||
#ssl_prefer_server_ciphers = on
|
||||
#ssl_ecdh_curve = 'prime256v1'
|
||||
#ssl_min_protocol_version = 'TLSv1.2'
|
||||
#ssl_max_protocol_version = ''
|
||||
#ssl_dh_params_file = ''
|
||||
#ssl_passphrase_command = ''
|
||||
#ssl_passphrase_command_supports_reload = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# RESOURCE USAGE (except WAL)
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Memory -
|
||||
|
||||
shared_buffers = 1GB # min 128kB
|
||||
# (change requires restart)
|
||||
#huge_pages = try # on, off, or try
|
||||
# (change requires restart)
|
||||
#huge_page_size = 0 # zero for system default
|
||||
# (change requires restart)
|
||||
#temp_buffers = 8MB # min 800kB
|
||||
#max_prepared_transactions = 0 # zero disables the feature
|
||||
# (change requires restart)
|
||||
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||
# you actively intend to use prepared transactions.
|
||||
#work_mem = 4MB # min 64kB
|
||||
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
|
||||
#maintenance_work_mem = 64MB # min 1MB
|
||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||
#logical_decoding_work_mem = 64MB # min 64kB
|
||||
#max_stack_depth = 2MB # min 100kB
|
||||
#shared_memory_type = mmap # the default is the first option
|
||||
# supported by the operating system:
|
||||
# mmap
|
||||
# sysv
|
||||
# windows
|
||||
# (change requires restart)
|
||||
dynamic_shared_memory_type = posix # the default is the first option
|
||||
# supported by the operating system:
|
||||
# posix
|
||||
# sysv
|
||||
# windows
|
||||
# mmap
|
||||
# (change requires restart)
|
||||
#min_dynamic_shared_memory = 0MB # (change requires restart)
|
||||
|
||||
# - Disk -
|
||||
|
||||
#temp_file_limit = -1 # limits per-process temp file space
|
||||
# in kilobytes, or -1 for no limit
|
||||
|
||||
# - Kernel Resources -
|
||||
|
||||
#max_files_per_process = 1000 # min 64
|
||||
# (change requires restart)
|
||||
|
||||
# - Cost-Based Vacuum Delay -
|
||||
|
||||
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
|
||||
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||
#vacuum_cost_page_miss = 2 # 0-10000 credits
|
||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||
#vacuum_cost_limit = 200 # 1-10000 credits
|
||||
|
||||
# - Background Writer -
|
||||
|
||||
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
||||
|
||||
# - Asynchronous Behavior -
|
||||
|
||||
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
|
||||
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
|
||||
#max_worker_processes = 8 # (change requires restart)
|
||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||
# can be used in parallel operations
|
||||
#parallel_leader_participation = on
|
||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# WRITE-AHEAD LOG
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Settings -
|
||||
|
||||
wal_level = minimal # minimal, replica, or logical
|
||||
# (change requires restart)
|
||||
fsync = off # flush data to disk for crash safety
|
||||
# (turning this off can cause
|
||||
# unrecoverable data corruption)
|
||||
synchronous_commit = off # synchronization level;
|
||||
# off, local, remote_write, remote_apply, or on
|
||||
#wal_sync_method = fsync # the default is the first option
|
||||
# supported by the operating system:
|
||||
# open_datasync
|
||||
# fdatasync (default on Linux and FreeBSD)
|
||||
# fsync
|
||||
# fsync_writethrough
|
||||
# open_sync
|
||||
full_page_writes = off # recover from partial page writes
|
||||
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||
# (change requires restart)
|
||||
#wal_compression = off # enable compression of full-page writes
|
||||
#wal_init_zero = on # zero-fill new WAL files
|
||||
#wal_recycle = on # recycle WAL files
|
||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||
# (change requires restart)
|
||||
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||
#wal_skip_threshold = 2MB
|
||||
|
||||
#commit_delay = 0 # range 0-100000, in microseconds
|
||||
#commit_siblings = 5 # range 1-1000
|
||||
|
||||
# - Checkpoints -
|
||||
|
||||
#checkpoint_timeout = 5min # range 30s-1d
|
||||
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
|
||||
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
||||
#checkpoint_warning = 30s # 0 disables
|
||||
max_wal_size = 1GB
|
||||
min_wal_size = 80MB
|
||||
|
||||
# - Archiving -
|
||||
|
||||
#archive_mode = off # enables archiving; off, on, or always
|
||||
# (change requires restart)
|
||||
#archive_command = '' # command to use to archive a logfile segment
|
||||
# placeholders: %p = path of file to archive
|
||||
# %f = file name only
|
||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||
#archive_timeout = 0 # force a logfile segment switch after this
|
||||
# number of seconds; 0 disables
|
||||
|
||||
# - Archive Recovery -
|
||||
|
||||
# These are only used in recovery mode.
|
||||
|
||||
#restore_command = '' # command to use to restore an archived logfile segment
|
||||
# placeholders: %p = path of file to restore
|
||||
# %f = file name only
|
||||
# e.g. 'cp /mnt/server/archivedir/%f %p'
|
||||
#archive_cleanup_command = '' # command to execute at every restartpoint
|
||||
#recovery_end_command = '' # command to execute at completion of recovery
|
||||
|
||||
# - Recovery Target -
|
||||
|
||||
# Set these only when performing a targeted recovery.
|
||||
|
||||
#recovery_target = '' # 'immediate' to end recovery as soon as a
|
||||
# consistent state is reached
|
||||
# (change requires restart)
|
||||
#recovery_target_name = '' # the named restore point to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_time = '' # the time stamp up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_inclusive = on # Specifies whether to stop:
|
||||
# just after the specified recovery target (on)
|
||||
# just before the recovery target (off)
|
||||
# (change requires restart)
|
||||
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
|
||||
# (change requires restart)
|
||||
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPLICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Sending Servers -
|
||||
|
||||
# Set these on the primary and on any standby that will send replication data.
|
||||
|
||||
max_wal_senders = 0 # max number of walsender processes
|
||||
# (change requires restart)
|
||||
#max_replication_slots = 10 # max number of replication slots
|
||||
# (change requires restart)
|
||||
#wal_keep_size = 0 # in megabytes; 0 disables
|
||||
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
|
||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||
# (change requires restart)
|
||||
|
||||
# - Primary Server -
|
||||
|
||||
# These settings are ignored on a standby server.
|
||||
|
||||
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||
# method to choose sync standbys, number of sync standbys,
|
||||
# and comma-separated list of application_name
|
||||
# from standby(s); '*' = all
|
||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||
|
||||
# - Standby Servers -
|
||||
|
||||
# These settings are ignored on a primary server.
|
||||
|
||||
#primary_conninfo = '' # connection string to sending server
|
||||
#primary_slot_name = '' # replication slot on sending server
|
||||
#promote_trigger_file = '' # file name whose presence ends recovery
|
||||
#hot_standby = on # "off" disallows queries during recovery
|
||||
# (change requires restart)
|
||||
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||
# when reading WAL from archive;
|
||||
# -1 allows indefinite delay
|
||||
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
||||
# when reading streaming WAL;
|
||||
# -1 allows indefinite delay
|
||||
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
|
||||
# is not set
|
||||
#wal_receiver_status_interval = 10s # send replies at least this often
|
||||
# 0 disables
|
||||
#hot_standby_feedback = off # send info from standby to prevent
|
||||
# query conflicts
|
||||
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||
# communication from primary
|
||||
# in milliseconds; 0 disables
|
||||
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||
# retrieve WAL after a failed attempt
|
||||
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
|
||||
|
||||
# - Subscribers -
|
||||
|
||||
# These settings are ignored on a publisher.
|
||||
|
||||
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||
# (change requires restart)
|
||||
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# QUERY TUNING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Planner Method Configuration -
|
||||
|
||||
#enable_async_append = on
|
||||
#enable_bitmapscan = on
|
||||
#enable_gathermerge = on
|
||||
#enable_hashagg = on
|
||||
#enable_hashjoin = on
|
||||
#enable_incremental_sort = on
|
||||
#enable_indexscan = on
|
||||
#enable_indexonlyscan = on
|
||||
#enable_material = on
|
||||
#enable_memoize = on
|
||||
#enable_mergejoin = on
|
||||
#enable_nestloop = on
|
||||
#enable_parallel_append = on
|
||||
#enable_parallel_hash = on
|
||||
#enable_partition_pruning = on
|
||||
#enable_partitionwise_join = off
|
||||
#enable_partitionwise_aggregate = off
|
||||
#enable_seqscan = on
|
||||
#enable_sort = on
|
||||
#enable_tidscan = on
|
||||
|
||||
# - Planner Cost Constants -
|
||||
|
||||
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
||||
#random_page_cost = 4.0 # same scale as above
|
||||
#cpu_tuple_cost = 0.01 # same scale as above
|
||||
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||
#cpu_operator_cost = 0.0025 # same scale as above
|
||||
#parallel_setup_cost = 1000.0 # same scale as above
|
||||
#parallel_tuple_cost = 0.1 # same scale as above
|
||||
#min_parallel_table_scan_size = 8MB
|
||||
#min_parallel_index_scan_size = 512kB
|
||||
#effective_cache_size = 4GB
|
||||
|
||||
#jit_above_cost = 100000 # perform JIT compilation if available
|
||||
# and query more expensive than this;
|
||||
# -1 disables
|
||||
#jit_inline_above_cost = 500000 # inline small functions if query is
|
||||
# more expensive than this; -1 disables
|
||||
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
||||
# query is more expensive than this;
|
||||
# -1 disables
|
||||
|
||||
# - Genetic Query Optimizer -
|
||||
|
||||
#geqo = on
|
||||
#geqo_threshold = 12
|
||||
#geqo_effort = 5 # range 1-10
|
||||
#geqo_pool_size = 0 # selects default based on effort
|
||||
#geqo_generations = 0 # selects default based on effort
|
||||
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
||||
#geqo_seed = 0.0 # range 0.0-1.0
|
||||
|
||||
# - Other Planner Options -
|
||||
|
||||
#default_statistics_target = 100 # range 1-10000
|
||||
#constraint_exclusion = partition # on, off, or partition
|
||||
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
||||
#from_collapse_limit = 8
|
||||
#jit = on # allow JIT compilation
|
||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||
# JOIN clauses
|
||||
#plan_cache_mode = auto # auto, force_generic_plan or
|
||||
# force_custom_plan
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPORTING AND LOGGING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Where to Log -
|
||||
|
||||
#log_destination = 'stderr' # Valid values are combinations of
|
||||
# stderr, csvlog, syslog, and eventlog,
|
||||
# depending on platform. csvlog
|
||||
# requires logging_collector to be on.
|
||||
|
||||
# This is used when logging to stderr:
|
||||
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||
# into log files. Required to be on for
|
||||
# csvlogs.
|
||||
# (change requires restart)
|
||||
|
||||
# These are only used if logging_collector is on:
|
||||
#log_directory = 'log' # directory where log files are written,
|
||||
# can be absolute or relative to PGDATA
|
||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||
# can include strftime() escapes
|
||||
#log_file_mode = 0600 # creation mode for log files,
|
||||
# begin with 0 to use octal notation
|
||||
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
||||
# happen after that time. 0 disables.
|
||||
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
||||
# happen after that much log output.
|
||||
# 0 disables.
|
||||
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||
# same name as the new log file will be
|
||||
# truncated rather than appended to.
|
||||
# But such truncation only occurs on
|
||||
# time-driven rotation, not on restarts
|
||||
# or size-driven rotation. Default is
|
||||
# off, meaning append to existing files
|
||||
# in all cases.
|
||||
|
||||
# These are relevant when logging to syslog:
|
||||
#syslog_facility = 'LOCAL0'
|
||||
#syslog_ident = 'postgres'
|
||||
#syslog_sequence_numbers = on
|
||||
#syslog_split_messages = on
|
||||
|
||||
# This is only relevant when logging to eventlog (Windows):
|
||||
# (change requires restart)
|
||||
#event_source = 'PostgreSQL'
|
||||
|
||||
# - When to Log -
|
||||
|
||||
#log_min_messages = warning # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic
|
||||
|
||||
#log_min_error_statement = error # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic (effectively off)
|
||||
|
||||
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
|
||||
# and their durations, > 0 logs only
|
||||
# statements running at least this number
|
||||
# of milliseconds
|
||||
|
||||
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
|
||||
# and their durations, > 0 logs only a sample of
|
||||
# statements running at least this number
|
||||
# of milliseconds;
|
||||
# sample fraction is determined by log_statement_sample_rate
|
||||
|
||||
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
|
||||
# log_min_duration_sample to be logged;
|
||||
# 1.0 logs all such statements, 0.0 never logs
|
||||
|
||||
|
||||
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
|
||||
# are logged regardless of their duration; 1.0 logs all
|
||||
# statements from all transactions, 0.0 never logs
|
||||
|
||||
# - What to Log -
|
||||
|
||||
#debug_print_parse = off
|
||||
#debug_print_rewritten = off
|
||||
#debug_print_plan = off
|
||||
#debug_pretty_print = on
|
||||
#log_autovacuum_min_duration = -1 # log autovacuum activity;
|
||||
# -1 disables, 0 logs all actions and
|
||||
# their durations, > 0 logs only
|
||||
# actions running at least this number
|
||||
# of milliseconds.
|
||||
#log_checkpoints = off
|
||||
#log_connections = off
|
||||
#log_disconnections = off
|
||||
#log_duration = off
|
||||
#log_error_verbosity = default # terse, default, or verbose messages
|
||||
#log_hostname = off
|
||||
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
|
||||
# %a = application name
|
||||
# %u = user name
|
||||
# %d = database name
|
||||
# %r = remote host and port
|
||||
# %h = remote host
|
||||
# %b = backend type
|
||||
# %p = process ID
|
||||
# %P = process ID of parallel group leader
|
||||
# %t = timestamp without milliseconds
|
||||
# %m = timestamp with milliseconds
|
||||
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||
# %Q = query ID (0 if none or not computed)
|
||||
# %i = command tag
|
||||
# %e = SQL state
|
||||
# %c = session ID
|
||||
# %l = session line number
|
||||
# %s = session start timestamp
|
||||
# %v = virtual transaction ID
|
||||
# %x = transaction ID (0 if none)
|
||||
# %q = stop here in non-session
|
||||
# processes
|
||||
# %% = '%'
|
||||
# e.g. '<%u%%%d> '
|
||||
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||
#log_recovery_conflict_waits = off # log standby recovery conflict waits
|
||||
# >= deadlock_timeout
|
||||
#log_parameter_max_length = -1 # when logging statements, limit logged
|
||||
# bind-parameter values to N bytes;
|
||||
# -1 means print in full, 0 disables
|
||||
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
|
||||
# bind-parameter values to N bytes;
|
||||
# -1 means print in full, 0 disables
|
||||
#log_statement = 'none' # none, ddl, mod, all
|
||||
#log_replication_commands = off
|
||||
#log_temp_files = -1 # log temporary files equal or larger
|
||||
# than the specified size in kilobytes;
|
||||
# -1 disables, 0 logs all temp files
|
||||
log_timezone = 'Etc/UTC'
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# PROCESS TITLE
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
cluster_name = '14/main' # added to process titles if nonempty
|
||||
# (change requires restart)
|
||||
#update_process_title = on
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# STATISTICS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Query and Index Statistics Collector -
|
||||
|
||||
#track_activities = on
|
||||
#track_activity_query_size = 1024 # (change requires restart)
|
||||
#track_counts = on
|
||||
#track_io_timing = off
|
||||
#track_wal_io_timing = off
|
||||
#track_functions = none # none, pl, all
|
||||
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
|
||||
|
||||
|
||||
# - Monitoring -
|
||||
|
||||
#compute_query_id = auto
|
||||
#log_statement_stats = off
|
||||
#log_parser_stats = off
|
||||
#log_planner_stats = off
|
||||
#log_executor_stats = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# AUTOVACUUM
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||
# requires track_counts to also be on.
|
||||
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
||||
# (change requires restart)
|
||||
#autovacuum_naptime = 1min # time between autovacuum runs
|
||||
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
||||
# vacuum
|
||||
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
|
||||
# before vacuum; -1 disables insert
|
||||
# vacuums
|
||||
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
||||
# analyze
|
||||
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
||||
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
|
||||
# size before insert vacuum
|
||||
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
||||
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||
# before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
|
||||
# autovacuum, in milliseconds;
|
||||
# -1 means use vacuum_cost_delay
|
||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||
# autovacuum, -1 means use
|
||||
# vacuum_cost_limit
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CLIENT CONNECTION DEFAULTS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Statement Behavior -
|
||||
|
||||
#client_min_messages = notice # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# log
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
#search_path = '"$user", public' # schema names
|
||||
#row_security = on
|
||||
#default_table_access_method = 'heap'
|
||||
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
|
||||
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||
# only default tablespace
|
||||
#check_function_bodies = on
|
||||
#default_transaction_isolation = 'read committed'
|
||||
#default_transaction_read_only = off
|
||||
#default_transaction_deferrable = off
|
||||
#session_replication_role = 'origin'
|
||||
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#vacuum_freeze_table_age = 150000000
|
||||
#vacuum_freeze_min_age = 50000000
|
||||
#vacuum_failsafe_age = 1600000000
|
||||
#vacuum_multixact_freeze_table_age = 150000000
|
||||
#vacuum_multixact_freeze_min_age = 5000000
|
||||
#vacuum_multixact_failsafe_age = 1600000000
|
||||
#bytea_output = 'hex' # hex, escape
|
||||
#xmlbinary = 'base64'
|
||||
#xmloption = 'content'
|
||||
#gin_pending_list_limit = 4MB
|
||||
|
||||
# - Locale and Formatting -
|
||||
|
||||
datestyle = 'iso, mdy'
|
||||
#intervalstyle = 'postgres'
|
||||
timezone = 'Etc/UTC'
|
||||
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||
# abbreviations. Currently, there are
|
||||
# Default
|
||||
# Australia (historical usage)
|
||||
# India
|
||||
# You can create your own file in
|
||||
# share/timezonesets/.
|
||||
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
|
||||
# selects precise output mode
|
||||
#client_encoding = sql_ascii # actually, defaults to database
|
||||
# encoding
|
||||
|
||||
# These settings are initialized by initdb, but they can be changed.
|
||||
lc_messages = 'C.UTF-8' # locale for system error message
|
||||
# strings
|
||||
lc_monetary = 'C.UTF-8' # locale for monetary formatting
|
||||
lc_numeric = 'C.UTF-8' # locale for number formatting
|
||||
lc_time = 'C.UTF-8' # locale for time formatting
|
||||
|
||||
# default configuration for text search
|
||||
default_text_search_config = 'pg_catalog.english'
|
||||
|
||||
# - Shared Library Preloading -
|
||||
|
||||
#local_preload_libraries = ''
|
||||
#session_preload_libraries = ''
|
||||
#shared_preload_libraries = '' # (change requires restart)
|
||||
#jit_provider = 'llvmjit' # JIT library to use
|
||||
|
||||
# - Other Defaults -
|
||||
|
||||
#dynamic_library_path = '$libdir'
|
||||
#extension_destdir = '' # prepend path when loading extensions
|
||||
# and shared objects (added by Debian)
|
||||
#gin_fuzzy_search_limit = 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# LOCK MANAGEMENT
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#deadlock_timeout = 1s
|
||||
#max_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_relation = -2 # negative values mean
|
||||
# (max_pred_locks_per_transaction
|
||||
# / -max_pred_locks_per_relation) - 1
|
||||
#max_pred_locks_per_page = 2 # min 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# VERSION AND PLATFORM COMPATIBILITY
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Previous PostgreSQL Versions -
|
||||
|
||||
#array_nulls = on
|
||||
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||
#escape_string_warning = on
|
||||
#lo_compat_privileges = off
|
||||
#quote_all_identifiers = off
|
||||
#standard_conforming_strings = on
|
||||
#synchronize_seqscans = on
|
||||
|
||||
# - Other Platforms and Clients -
|
||||
|
||||
#transform_null_equals = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# ERROR HANDLING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#exit_on_error = off # terminate session on any error?
|
||||
#restart_after_crash = on # reinitialize after backend crash?
|
||||
#data_sync_retry = off # retry or panic on failure to fsync
|
||||
# data?
|
||||
# (change requires restart)
|
||||
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONFIG FILE INCLUDES
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# These options allow settings to be loaded from files other than the
|
||||
# default postgresql.conf. Note that these are directives, not variable
|
||||
# assignments, so they can usefully be given more than once.
|
||||
|
||||
include_dir = 'conf.d' # include files ending in '.conf' from
|
||||
# a directory, e.g., 'conf.d'
|
||||
#include_if_exists = '...' # include file only if it exists
|
||||
#include = '...' # include file
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CUSTOMIZED OPTIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# Add settings for extensions here
|
69
accountsdb-plugin-postgres/src/accounts_selector.rs
Normal file
69
accountsdb-plugin-postgres/src/accounts_selector.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use {log::*, std::collections::HashSet};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsSelector {
|
||||
pub accounts: HashSet<Vec<u8>>,
|
||||
pub owners: HashSet<Vec<u8>>,
|
||||
pub select_all_accounts: bool,
|
||||
}
|
||||
|
||||
impl AccountsSelector {
|
||||
pub fn default() -> Self {
|
||||
AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(accounts: &[String], owners: &[String]) -> Self {
|
||||
info!(
|
||||
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
|
||||
accounts, owners
|
||||
);
|
||||
|
||||
let select_all_accounts = accounts.iter().any(|key| key == "*");
|
||||
if select_all_accounts {
|
||||
return AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts,
|
||||
};
|
||||
}
|
||||
let accounts = accounts
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
let owners = owners
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
AccountsSelector {
|
||||
accounts,
|
||||
owners,
|
||||
select_all_accounts,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
|
||||
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_accounts_selector() {
|
||||
AccountsSelector::new(
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
&[],
|
||||
);
|
||||
|
||||
AccountsSelector::new(
|
||||
&[],
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
);
|
||||
}
|
||||
}
|
345
accountsdb-plugin-postgres/src/accountsdb_plugin_postgres.rs
Normal file
345
accountsdb-plugin-postgres/src/accountsdb_plugin_postgres.rs
Normal file
@@ -0,0 +1,345 @@
|
||||
use solana_measure::measure::Measure;
|
||||
/// Main entry for the PostgreSQL plugin
|
||||
use {
|
||||
crate::{
|
||||
accounts_selector::AccountsSelector,
|
||||
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
|
||||
},
|
||||
bs58,
|
||||
log::*,
|
||||
serde_derive::{Deserialize, Serialize},
|
||||
serde_json,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions, Result, SlotStatus,
|
||||
},
|
||||
solana_metrics::*,
|
||||
std::{fs::File, io::Read},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AccountsDbPluginPostgres {
|
||||
client: Option<ParallelPostgresClient>,
|
||||
accounts_selector: Option<AccountsSelector>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for AccountsDbPluginPostgres {
|
||||
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AccountsDbPluginPostgresConfig {
|
||||
pub host: Option<String>,
|
||||
pub user: Option<String>,
|
||||
pub port: Option<u16>,
|
||||
pub connection_str: Option<String>,
|
||||
pub threads: Option<usize>,
|
||||
pub batch_size: Option<usize>,
|
||||
pub panic_on_db_errors: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginPostgresError {
|
||||
#[error("Error connecting to the backend data store. Error message: ({msg})")]
|
||||
DataStoreConnectionError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
DataSchemaError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
ConfigurationError { msg: String },
|
||||
}
|
||||
|
||||
impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
||||
fn name(&self) -> &'static str {
|
||||
"AccountsDbPluginPostgres"
|
||||
}
|
||||
|
||||
/// Do initialization for the PostgreSQL plugin.
|
||||
///
|
||||
/// # Format of the config file:
|
||||
/// * The `accounts_selector` section allows the user to controls accounts selections.
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
|
||||
/// }
|
||||
/// or:
|
||||
/// "accounts_selector" = {
|
||||
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
|
||||
/// }
|
||||
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
|
||||
/// When only owners is specified,
|
||||
/// all accounts belonging to the owners will be streamed.
|
||||
/// The accounts field support wildcard to select all accounts:
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["*"\],
|
||||
/// }
|
||||
/// * "host", optional, specifies the PostgreSQL server.
|
||||
/// * "user", optional, specifies the PostgreSQL user.
|
||||
/// * "port", optional, specifies the PostgreSQL server's port.
|
||||
/// * "connection_str", optional, the custom PostgreSQL connection string.
|
||||
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
|
||||
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
|
||||
/// `host` and `user` must be given.
|
||||
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
|
||||
/// maintains a PostgreSQL connection to the server. The default is '10'.
|
||||
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
|
||||
/// from restoring a snapshot. The default is '10'.
|
||||
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
|
||||
/// PostgreSQL database. The default is 'false'.
|
||||
/// # Examples
|
||||
///
|
||||
/// {
|
||||
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
|
||||
/// "host": "host_foo",
|
||||
/// "user": "solana",
|
||||
/// "threads": 10,
|
||||
/// "accounts_selector" : {
|
||||
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
|
||||
/// }
|
||||
/// }
|
||||
|
||||
fn on_load(&mut self, config_file: &str) -> Result<()> {
|
||||
solana_logger::setup_with_default("info");
|
||||
info!(
|
||||
"Loading plugin {:?} from config_file {:?}",
|
||||
self.name(),
|
||||
config_file
|
||||
);
|
||||
let mut file = File::open(config_file)?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
|
||||
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
|
||||
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
|
||||
|
||||
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
|
||||
serde_json::from_str(&contents);
|
||||
match result {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::ConfigFileReadError {
|
||||
msg: format!(
|
||||
"The config file is not in the JSON format expected: {:?}",
|
||||
err
|
||||
),
|
||||
})
|
||||
}
|
||||
Ok(config) => {
|
||||
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
|
||||
self.client = Some(client);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn on_unload(&mut self) {
|
||||
info!("Unloading plugin: {:?}", self.name());
|
||||
|
||||
match &mut self.client {
|
||||
None => {}
|
||||
Some(client) => {
|
||||
client.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()> {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
|
||||
match account {
|
||||
ReplicaAccountInfoVersions::V0_0_1(account) => {
|
||||
let mut measure_select =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-select");
|
||||
if let Some(accounts_selector) = &self.accounts_selector {
|
||||
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
measure_select.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-select-us",
|
||||
measure_select.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
debug!(
|
||||
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
bs58::encode(account.owner).into_string(),
|
||||
slot,
|
||||
self.accounts_selector.as_ref().unwrap()
|
||||
);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database."
|
||||
.to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let mut measure_update =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-client");
|
||||
let result = { client.update_account(account, slot, is_startup) };
|
||||
measure_update.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-client-us",
|
||||
measure_update.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-main-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.update_slot_status(slot, parent, status);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<()> {
|
||||
info!("Notifying the end of startup for accounts notifications");
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.notify_end_of_startup();
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsDbPluginPostgres {
|
||||
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
|
||||
let accounts_selector = &config["accounts_selector"];
|
||||
|
||||
if accounts_selector.is_null() {
|
||||
AccountsSelector::default()
|
||||
} else {
|
||||
let accounts = &accounts_selector["accounts"];
|
||||
let accounts: Vec<String> = if accounts.is_array() {
|
||||
accounts
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
let owners = &accounts_selector["owners"];
|
||||
let owners: Vec<String> = if owners.is_array() {
|
||||
owners
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
AccountsSelector::new(&accounts, &owners)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
AccountsDbPluginPostgres {
|
||||
client: None,
|
||||
accounts_selector: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[allow(improper_ctypes_definitions)]
|
||||
/// # Safety
|
||||
///
|
||||
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
|
||||
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
|
||||
let plugin = AccountsDbPluginPostgres::new();
|
||||
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
|
||||
Box::into_raw(plugin)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use {super::*, serde_json};
|
||||
|
||||
#[test]
|
||||
fn test_accounts_selector_from_config() {
|
||||
let config = "{\"accounts_selector\" : { \
|
||||
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
|
||||
}}";
|
||||
|
||||
let config: serde_json::Value = serde_json::from_str(config).unwrap();
|
||||
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
|
||||
}
|
||||
}
|
3
accountsdb-plugin-postgres/src/lib.rs
Normal file
3
accountsdb-plugin-postgres/src/lib.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod accounts_selector;
|
||||
pub mod accountsdb_plugin_postgres;
|
||||
pub mod postgres_client;
|
879
accountsdb-plugin-postgres/src/postgres_client.rs
Normal file
879
accountsdb-plugin-postgres/src/postgres_client.rs
Normal file
@@ -0,0 +1,879 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
|
||||
use {
|
||||
crate::accountsdb_plugin_postgres::{
|
||||
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
|
||||
},
|
||||
chrono::Utc,
|
||||
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
|
||||
log::*,
|
||||
postgres::{Client, NoTls, Statement},
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_sdk::timing::AtomicInterval,
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
},
|
||||
tokio_postgres::types::ToSql,
|
||||
};
|
||||
|
||||
/// The maximum asynchronous requests allowed in the channel to avoid excessive
|
||||
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
|
||||
const MAX_ASYNC_REQUESTS: usize = 40960;
|
||||
const DEFAULT_POSTGRES_PORT: u16 = 5432;
|
||||
const DEFAULT_THREADS_COUNT: usize = 100;
|
||||
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
|
||||
const ACCOUNT_COLUMN_COUNT: usize = 9;
|
||||
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
|
||||
|
||||
struct PostgresSqlClientWrapper {
|
||||
client: Client,
|
||||
update_account_stmt: Statement,
|
||||
bulk_account_insert_stmt: Statement,
|
||||
update_slot_with_parent_stmt: Statement,
|
||||
update_slot_without_parent_stmt: Statement,
|
||||
}
|
||||
|
||||
pub struct SimplePostgresClient {
|
||||
batch_size: usize,
|
||||
pending_account_updates: Vec<DbAccountInfo>,
|
||||
client: Mutex<PostgresSqlClientWrapper>,
|
||||
}
|
||||
|
||||
struct PostgresClientWorker {
|
||||
client: SimplePostgresClient,
|
||||
/// Indicating if accounts notification during startup is done.
|
||||
is_startup_done: bool,
|
||||
}
|
||||
|
||||
impl Eq for DbAccountInfo {}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct DbAccountInfo {
|
||||
pub pubkey: Vec<u8>,
|
||||
pub lamports: i64,
|
||||
pub owner: Vec<u8>,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: i64,
|
||||
pub data: Vec<u8>,
|
||||
pub slot: i64,
|
||||
pub write_version: i64,
|
||||
}
|
||||
|
||||
pub(crate) fn abort() -> ! {
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
// standard error is usually redirected to a log file, cry for help on standard output as
|
||||
// well
|
||||
eprintln!("Validator process aborted. The validator log may contain further details");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
panic!("process::exit(1) is intercepted for friendly test failure...");
|
||||
}
|
||||
|
||||
impl DbAccountInfo {
|
||||
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
|
||||
let data = account.data().to_vec();
|
||||
Self {
|
||||
pubkey: account.pubkey().to_vec(),
|
||||
lamports: account.lamports() as i64,
|
||||
owner: account.owner().to_vec(),
|
||||
executable: account.executable(),
|
||||
rent_epoch: account.rent_epoch() as i64,
|
||||
data,
|
||||
slot: slot as i64,
|
||||
write_version: account.write_version(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ReadableAccountInfo: Sized {
|
||||
fn pubkey(&self) -> &[u8];
|
||||
fn owner(&self) -> &[u8];
|
||||
fn lamports(&self) -> i64;
|
||||
fn executable(&self) -> bool;
|
||||
fn rent_epoch(&self) -> i64;
|
||||
fn data(&self) -> &[u8];
|
||||
fn write_version(&self) -> i64;
|
||||
}
|
||||
|
||||
impl ReadableAccountInfo for DbAccountInfo {
|
||||
fn pubkey(&self) -> &[u8] {
|
||||
&self.pubkey
|
||||
}
|
||||
|
||||
fn owner(&self) -> &[u8] {
|
||||
&self.owner
|
||||
}
|
||||
|
||||
fn lamports(&self) -> i64 {
|
||||
self.lamports
|
||||
}
|
||||
|
||||
fn executable(&self) -> bool {
|
||||
self.executable
|
||||
}
|
||||
|
||||
fn rent_epoch(&self) -> i64 {
|
||||
self.rent_epoch
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
fn write_version(&self) -> i64 {
|
||||
self.write_version
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
|
||||
fn pubkey(&self) -> &[u8] {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
fn owner(&self) -> &[u8] {
|
||||
self.owner
|
||||
}
|
||||
|
||||
fn lamports(&self) -> i64 {
|
||||
self.lamports as i64
|
||||
}
|
||||
|
||||
fn executable(&self) -> bool {
|
||||
self.executable
|
||||
}
|
||||
|
||||
fn rent_epoch(&self) -> i64 {
|
||||
self.rent_epoch as i64
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
self.data
|
||||
}
|
||||
|
||||
fn write_version(&self) -> i64 {
|
||||
self.write_version as i64
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PostgresClient {
|
||||
fn join(&mut self) -> thread::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
|
||||
}
|
||||
|
||||
impl SimplePostgresClient {
|
||||
fn connect_to_db(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Client, AccountsDbPluginError> {
|
||||
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
|
||||
|
||||
let connection_str = if let Some(connection_str) = &config.connection_str {
|
||||
connection_str.clone()
|
||||
} else {
|
||||
if config.host.is_none() || config.user.is_none() {
|
||||
let msg = format!(
|
||||
"\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified",
|
||||
config.connection_str, config.host, config.user
|
||||
);
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::ConfigurationError { msg },
|
||||
)));
|
||||
}
|
||||
format!(
|
||||
"host={} user={} port={}",
|
||||
config.host.as_ref().unwrap(),
|
||||
config.user.as_ref().unwrap(),
|
||||
port
|
||||
)
|
||||
};
|
||||
|
||||
match Client::connect(&connection_str, NoTls) {
|
||||
Err(err) => {
|
||||
let msg = format!(
|
||||
"Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}",
|
||||
err, connection_str
|
||||
);
|
||||
error!("{}", msg);
|
||||
Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
|
||||
)))
|
||||
}
|
||||
Ok(client) => Ok(client),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_bulk_account_insert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
|
||||
for j in 0..batch_size {
|
||||
let row = j * ACCOUNT_COLUMN_COUNT;
|
||||
let val_str = format!(
|
||||
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
|
||||
row + 1,
|
||||
row + 2,
|
||||
row + 3,
|
||||
row + 4,
|
||||
row + 5,
|
||||
row + 6,
|
||||
row + 7,
|
||||
row + 8,
|
||||
row + 9,
|
||||
);
|
||||
|
||||
if j == 0 {
|
||||
stmt = format!("{} {}", &stmt, val_str);
|
||||
} else {
|
||||
stmt = format!("{}, {}", &stmt, val_str);
|
||||
}
|
||||
}
|
||||
|
||||
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
||||
|
||||
stmt = format!("{} {}", stmt, handle_conflict);
|
||||
|
||||
info!("{}", stmt);
|
||||
let bulk_stmt = client.prepare(&stmt);
|
||||
|
||||
match bulk_stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_single_account_upsert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
|
||||
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_slot_upsert_statement_with_parent(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \
|
||||
VALUES ($1, $2, $3, $4) \
|
||||
ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_slot_upsert_statement_without_parent(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO slot (slot, status, updated_on) \
|
||||
VALUES ($1, $2, $3) \
|
||||
ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal function for updating or inserting a single account
|
||||
fn upsert_account_internal(
|
||||
account: &DbAccountInfo,
|
||||
statement: &Statement,
|
||||
client: &mut Client,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
let lamports = account.lamports() as i64;
|
||||
let rent_epoch = account.rent_epoch() as i64;
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let result = client.query(
|
||||
statement,
|
||||
&[
|
||||
&account.pubkey(),
|
||||
&account.slot,
|
||||
&account.owner(),
|
||||
&lamports,
|
||||
&account.executable(),
|
||||
&rent_epoch,
|
||||
&account.data(),
|
||||
&account.write_version(),
|
||||
&updated_on,
|
||||
],
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update or insert a single account
|
||||
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
Self::upsert_account_internal(account, statement, client)
|
||||
}
|
||||
|
||||
/// Insert accounts in batch to reduce network overhead
|
||||
fn insert_accounts_in_batch(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
self.pending_account_updates.push(account);
|
||||
|
||||
if self.pending_account_updates.len() == self.batch_size {
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
|
||||
|
||||
let mut values: Vec<&(dyn ToSql + Sync)> =
|
||||
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
for j in 0..self.batch_size {
|
||||
let account = &self.pending_account_updates[j];
|
||||
|
||||
values.push(&account.pubkey);
|
||||
values.push(&account.slot);
|
||||
values.push(&account.owner);
|
||||
values.push(&account.lamports);
|
||||
values.push(&account.executable);
|
||||
values.push(&account.rent_epoch);
|
||||
values.push(&account.data);
|
||||
values.push(&account.write_version);
|
||||
values.push(&updated_on);
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-prepare-values-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let result = client
|
||||
.client
|
||||
.query(&client.bulk_account_insert_stmt, &values);
|
||||
|
||||
self.pending_account_updates.clear();
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-count",
|
||||
self.batch_size,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush any left over accounts in batch which are not processed in the last batch
|
||||
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
if self.pending_account_updates.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
|
||||
for account in self.pending_account_updates.drain(..) {
|
||||
Self::upsert_account_internal(&account, statement, client)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
info!("Creating SimplePostgresClient...");
|
||||
let mut client = Self::connect_to_db(config)?;
|
||||
let bulk_account_insert_stmt =
|
||||
Self::build_bulk_account_insert_statement(&mut client, config)?;
|
||||
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
|
||||
|
||||
let update_slot_with_parent_stmt =
|
||||
Self::build_slot_upsert_statement_with_parent(&mut client, config)?;
|
||||
let update_slot_without_parent_stmt =
|
||||
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
|
||||
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
info!("Created SimplePostgresClient.");
|
||||
Ok(Self {
|
||||
batch_size,
|
||||
pending_account_updates: Vec::with_capacity(batch_size),
|
||||
client: Mutex::new(PostgresSqlClientWrapper {
|
||||
client,
|
||||
update_account_stmt,
|
||||
bulk_account_insert_stmt,
|
||||
update_slot_with_parent_stmt,
|
||||
update_slot_without_parent_stmt,
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PostgresClient for SimplePostgresClient {
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
trace!(
|
||||
"Updating account {} with owner {} at slot {}",
|
||||
bs58::encode(account.pubkey()).into_string(),
|
||||
bs58::encode(account.owner()).into_string(),
|
||||
account.slot,
|
||||
);
|
||||
if !is_startup {
|
||||
return self.upsert_account(&account);
|
||||
}
|
||||
self.insert_accounts_in_batch(account)
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
let slot = slot as i64; // postgres only supports i64
|
||||
let parent = parent.map(|parent| parent as i64);
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let status_str = status.as_str();
|
||||
let client = self.client.get_mut().unwrap();
|
||||
|
||||
let result = match parent {
|
||||
Some(parent) => client.client.execute(
|
||||
&client.update_slot_with_parent_stmt,
|
||||
&[&slot, &parent, &status_str, &updated_on],
|
||||
),
|
||||
None => client.client.execute(
|
||||
&client.update_slot_without_parent_stmt,
|
||||
&[&slot, &status_str, &updated_on],
|
||||
),
|
||||
};
|
||||
|
||||
match result {
|
||||
Err(err) => {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{:?}", msg);
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
|
||||
}
|
||||
Ok(rows) => {
|
||||
assert_eq!(1, rows, "Expected one rows to be updated a time");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
self.flush_buffered_writes()
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateAccountRequest {
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
}
|
||||
|
||||
struct UpdateSlotRequest {
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
slot_status: SlotStatus,
|
||||
}
|
||||
|
||||
enum DbWorkItem {
|
||||
UpdateAccount(UpdateAccountRequest),
|
||||
UpdateSlot(UpdateSlotRequest),
|
||||
}
|
||||
|
||||
impl PostgresClientWorker {
|
||||
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
let result = SimplePostgresClient::new(&config);
|
||||
match result {
|
||||
Ok(client) => Ok(PostgresClientWorker {
|
||||
client,
|
||||
is_startup_done: false,
|
||||
}),
|
||||
Err(err) => {
|
||||
error!("Error in creating SimplePostgresClient: {}", err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_work(
|
||||
&mut self,
|
||||
receiver: Receiver<DbWorkItem>,
|
||||
exit_worker: Arc<AtomicBool>,
|
||||
is_startup_done: Arc<AtomicBool>,
|
||||
startup_done_count: Arc<AtomicUsize>,
|
||||
panic_on_db_errors: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
while !exit_worker.load(Ordering::Relaxed) {
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
|
||||
let work = receiver.recv_timeout(Duration::from_millis(500));
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-worker-recv-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
match work {
|
||||
Ok(work) => match work {
|
||||
DbWorkItem::UpdateAccount(request) => {
|
||||
if let Err(err) = self
|
||||
.client
|
||||
.update_account(request.account, request.is_startup)
|
||||
{
|
||||
error!("Failed to update account: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
DbWorkItem::UpdateSlot(request) => {
|
||||
if let Err(err) = self.client.update_slot_status(
|
||||
request.slot,
|
||||
request.parent,
|
||||
request.slot_status,
|
||||
) {
|
||||
error!("Failed to update slot: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(err) => match err {
|
||||
RecvTimeoutError::Timeout => {
|
||||
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
|
||||
if let Err(err) = self.client.notify_end_of_startup() {
|
||||
error!("Error in notifying end of startup: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
self.is_startup_done = true;
|
||||
startup_done_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
error!("Error in receiving the item {:?}", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub struct ParallelPostgresClient {
|
||||
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
|
||||
exit_worker: Arc<AtomicBool>,
|
||||
is_startup_done: Arc<AtomicBool>,
|
||||
startup_done_count: Arc<AtomicUsize>,
|
||||
initialized_worker_count: Arc<AtomicUsize>,
|
||||
sender: Sender<DbWorkItem>,
|
||||
last_report: AtomicInterval,
|
||||
}
|
||||
|
||||
impl ParallelPostgresClient {
|
||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
info!("Creating ParallelPostgresClient...");
|
||||
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
|
||||
let exit_worker = Arc::new(AtomicBool::new(false));
|
||||
let mut workers = Vec::default();
|
||||
let is_startup_done = Arc::new(AtomicBool::new(false));
|
||||
let startup_done_count = Arc::new(AtomicUsize::new(0));
|
||||
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
|
||||
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
|
||||
for i in 0..worker_count {
|
||||
let cloned_receiver = receiver.clone();
|
||||
let exit_clone = exit_worker.clone();
|
||||
let is_startup_done_clone = is_startup_done.clone();
|
||||
let startup_done_count_clone = startup_done_count.clone();
|
||||
let initialized_worker_count_clone = initialized_worker_count.clone();
|
||||
let config = config.clone();
|
||||
let worker = Builder::new()
|
||||
.name(format!("worker-{}", i))
|
||||
.spawn(move || -> Result<(), AccountsDbPluginError> {
|
||||
let panic_on_db_errors = *config
|
||||
.panic_on_db_errors
|
||||
.as_ref()
|
||||
.unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR);
|
||||
let result = PostgresClientWorker::new(config);
|
||||
|
||||
match result {
|
||||
Ok(mut worker) => {
|
||||
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
|
||||
worker.do_work(
|
||||
cloned_receiver,
|
||||
exit_clone,
|
||||
is_startup_done_clone,
|
||||
startup_done_count_clone,
|
||||
panic_on_db_errors,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Error when making connection to database: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
workers.push(worker);
|
||||
}
|
||||
|
||||
info!("Created ParallelPostgresClient.");
|
||||
Ok(Self {
|
||||
last_report: AtomicInterval::default(),
|
||||
workers,
|
||||
exit_worker,
|
||||
is_startup_done,
|
||||
startup_done_count,
|
||||
initialized_worker_count,
|
||||
sender,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
self.exit_worker.store(true, Ordering::Relaxed);
|
||||
while !self.workers.is_empty() {
|
||||
let worker = self.workers.pop();
|
||||
if worker.is_none() {
|
||||
break;
|
||||
}
|
||||
let worker = worker.unwrap();
|
||||
let result = worker.join().unwrap();
|
||||
if result.is_err() {
|
||||
error!("The worker thread has failed: {:?}", result);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_account(
|
||||
&mut self,
|
||||
account: &ReplicaAccountInfo,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if self.last_report.should_update(30000) {
|
||||
datapoint_debug!(
|
||||
"postgres-plugin-stats",
|
||||
("message-queue-length", self.sender.len() as i64, i64),
|
||||
);
|
||||
}
|
||||
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
|
||||
let wrk_item = DbWorkItem::UpdateAccount(UpdateAccountRequest {
|
||||
account: DbAccountInfo::new(account, slot),
|
||||
is_startup,
|
||||
});
|
||||
|
||||
measure.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-posgres-create-work-item-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
|
||||
|
||||
if let Err(err) = self.sender.send(wrk_item) {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!(
|
||||
"Failed to update the account {:?}, error: {:?}",
|
||||
bs58::encode(account.pubkey()).into_string(),
|
||||
err
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-posgres-send-msg-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if let Err(err) = self.sender.send(DbWorkItem::UpdateSlot(UpdateSlotRequest {
|
||||
slot,
|
||||
parent,
|
||||
slot_status: status,
|
||||
})) {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError {
|
||||
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Notifying the end of startup");
|
||||
// Ensure all items in the queue has been received by the workers
|
||||
while !self.sender.is_empty() {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
self.is_startup_done.store(true, Ordering::Relaxed);
|
||||
|
||||
// Wait for all worker threads to be done with flushing
|
||||
while self.startup_done_count.load(Ordering::Relaxed)
|
||||
!= self.initialized_worker_count.load(Ordering::Relaxed)
|
||||
{
|
||||
info!(
|
||||
"Startup done count: {}, good worker thread count: {}",
|
||||
self.startup_done_count.load(Ordering::Relaxed),
|
||||
self.initialized_worker_count.load(Ordering::Relaxed)
|
||||
);
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
|
||||
info!("Done with notifying the end of startup");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PostgresClientBuilder {}
|
||||
|
||||
impl PostgresClientBuilder {
|
||||
pub fn build_pararallel_postgres_client(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
|
||||
ParallelPostgresClient::new(config)
|
||||
}
|
||||
|
||||
pub fn build_simple_postgres_client(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
|
||||
SimplePostgresClient::new(config)
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,17 +14,18 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.17" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-perf = { path = "../perf", version = "=1.8.17" }
|
||||
solana-poh = { path = "../poh", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,37 +1,37 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::{
|
||||
banking_stage::{create_test_recorder, BankingStage},
|
||||
poh_recorder::PohRecorder,
|
||||
poh_recorder::WorkingBankEntry,
|
||||
};
|
||||
use solana_gossip::{cluster_info::ClusterInfo, cluster_info::Node};
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::Keypair,
|
||||
signature::Signature,
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
crossbeam_channel::unbounded,
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_core::banking_stage::BankingStage,
|
||||
solana_gossip::cluster_info::{ClusterInfo, Node},
|
||||
solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::packet::to_packet_batches,
|
||||
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
|
||||
solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
cost_model::CostModel,
|
||||
},
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::{Keypair, Signature},
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
};
|
||||
|
||||
fn check_txs(
|
||||
@@ -77,7 +77,7 @@ fn make_accounts_txs(
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
if !same_payer {
|
||||
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
|
||||
}
|
||||
@@ -168,6 +168,7 @@ fn main() {
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
|
||||
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
@@ -188,7 +189,7 @@ fn main() {
|
||||
genesis_config.hash(),
|
||||
);
|
||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
@@ -198,7 +199,7 @@ fn main() {
|
||||
if !skip_sanity {
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
let res = bank.process_transaction(tx);
|
||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||
});
|
||||
bank.clear_signatures();
|
||||
@@ -210,7 +211,7 @@ fn main() {
|
||||
bank.clear_signatures();
|
||||
}
|
||||
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
|
||||
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
@@ -218,15 +219,21 @@ fn main() {
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost().info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
verified_receiver,
|
||||
tpu_vote_receiver,
|
||||
vote_receiver,
|
||||
None,
|
||||
replay_vote_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
@@ -354,10 +361,10 @@ fn main() {
|
||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
||||
for tx in transactions.iter_mut() {
|
||||
tx.message.recent_blockhash = bank.last_blockhash();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
|
||||
}
|
||||
|
||||
start += chunk_len;
|
||||
@@ -379,6 +386,7 @@ fn main() {
|
||||
);
|
||||
|
||||
drop(verified_sender);
|
||||
drop(tpu_vote_sender);
|
||||
drop(vote_sender);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
banking_stage.join().unwrap();
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,20 +11,20 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
borsh = "0.8.1"
|
||||
borsh-derive = "0.8.1"
|
||||
borsh = "0.9.0"
|
||||
borsh-derive = "0.9.0"
|
||||
futures = "0.3"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.0" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.8.17" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.8.17" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -5,31 +5,38 @@
|
||||
//! but they are undocumented, may change over time, and are generally more
|
||||
//! cumbersome to use.
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
use futures::{future::join_all, Future, FutureExt};
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use solana_banks_interface::{BanksRequest, BanksResponse};
|
||||
use solana_program::{
|
||||
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||
rent::Rent, sysvar,
|
||||
use {
|
||||
borsh::BorshDeserialize,
|
||||
futures::{future::join_all, Future, FutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse},
|
||||
solana_program::{
|
||||
clock::{Clock, Slot},
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
program_pack::Pack,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
sysvar::{self, Sysvar},
|
||||
},
|
||||
solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
commitment_config::CommitmentLevel,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
},
|
||||
std::io::{self, Error, ErrorKind},
|
||||
tarpc::{
|
||||
client::{self, channel::RequestDispatch, NewClient},
|
||||
context::{self, Context},
|
||||
rpc::{ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
Transport,
|
||||
},
|
||||
tokio::{net::ToSocketAddrs, time::Duration},
|
||||
tokio_serde::formats::Bincode,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
commitment_config::CommitmentLevel,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
};
|
||||
use std::io::{self, Error, ErrorKind};
|
||||
use tarpc::{
|
||||
client::{self, channel::RequestDispatch, NewClient},
|
||||
context::{self, Context},
|
||||
rpc::{ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
Transport,
|
||||
};
|
||||
use tokio::{net::ToSocketAddrs, time::Duration};
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
// This exists only for backward compatibility
|
||||
pub trait BanksClientExt {}
|
||||
@@ -63,7 +70,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
}
|
||||
@@ -85,6 +92,14 @@ impl BanksClient {
|
||||
self.inner.get_slot_with_context(ctx, commitment)
|
||||
}
|
||||
|
||||
pub fn get_block_height_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_block_height_with_context(ctx, commitment)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
@@ -115,24 +130,39 @@ impl BanksClient {
|
||||
self.send_transaction_with_context(context::current(), transaction)
|
||||
}
|
||||
|
||||
/// Return the cluster clock
|
||||
pub fn get_clock(&mut self) -> impl Future<Output = io::Result<Clock>> + '_ {
|
||||
self.get_account(sysvar::clock::id()).map(|result| {
|
||||
let clock_sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Clock sysvar not present"))?;
|
||||
from_account::<Clock, _>(&clock_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Clock sysvar")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
|
||||
/// will use the transaction's blockhash to look up these same fee parameters and
|
||||
/// use them to calculate the transaction fee.
|
||||
pub fn get_fees(
|
||||
&mut self,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the cluster Sysvar
|
||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
|
||||
from_account::<T, _>(&sysvar)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||
self.get_account(sysvar::rent::id()).map(|result| {
|
||||
let rent_sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
|
||||
from_account::<Rent, _>(&rent_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
|
||||
})
|
||||
})
|
||||
self.get_sysvar::<Rent>()
|
||||
}
|
||||
|
||||
/// Return a recent, rooted blockhash from the server. The cluster will only accept
|
||||
@@ -192,12 +222,18 @@ impl BanksClient {
|
||||
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted slot height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot height.
|
||||
/// Return the most recent rooted slot. All transactions at or below this slot
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot.
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_slot_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted block height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher block height.
|
||||
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the account at the given address at the slot corresponding to the given
|
||||
/// commitment level. If the account is not found, None is returned.
|
||||
pub fn get_account_with_commitment(
|
||||
@@ -313,16 +349,18 @@ pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClie
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_banks_server::banks_server::start_local_server;
|
||||
use solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||
genesis_utils::create_genesis_config,
|
||||
use {
|
||||
super::*,
|
||||
solana_banks_server::banks_server::start_local_server,
|
||||
solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||
genesis_utils::create_genesis_config,
|
||||
},
|
||||
solana_sdk::{message::Message, signature::Signer, system_instruction},
|
||||
std::sync::{Arc, RwLock},
|
||||
tarpc::transport,
|
||||
tokio::{runtime::Runtime, time::sleep},
|
||||
};
|
||||
use solana_sdk::{message::Message, signature::Signer, system_instruction};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use tarpc::transport;
|
||||
use tokio::{runtime::Runtime, time::sleep};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
@@ -350,7 +388,9 @@ mod tests {
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
|
||||
let recent_blockhash = banks_client.get_recent_blockhash().await?;
|
||||
@@ -377,13 +417,15 @@ mod tests {
|
||||
|
||||
let mint_pubkey = &genesis.mint_keypair.pubkey();
|
||||
let bob_pubkey = solana_sdk::pubkey::new_rand();
|
||||
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1);
|
||||
let message = Message::new(&[instruction], Some(mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
|
||||
let (_, recent_blockhash, last_valid_block_height) = banks_client.get_fees().await?;
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
let signature = transaction.signatures[0];
|
||||
banks_client.send_transaction(transaction).await?;
|
||||
@@ -391,8 +433,8 @@ mod tests {
|
||||
let mut status = banks_client.get_transaction_status(signature).await?;
|
||||
|
||||
while status.is_none() {
|
||||
let root_slot = banks_client.get_root_slot().await?;
|
||||
if root_slot > last_valid_slot {
|
||||
let root_block_height = banks_client.get_root_block_height().await?;
|
||||
if root_block_height > last_valid_block_height {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,7 +12,7 @@ edition = "2018"
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.122", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -1,13 +1,15 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
use {
|
||||
serde::{Deserialize, Serialize},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -34,6 +36,7 @@ pub trait Banks {
|
||||
async fn get_transaction_status_with_context(signature: Signature)
|
||||
-> Option<TransactionStatus>;
|
||||
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
|
||||
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
@@ -46,8 +49,10 @@ pub trait Banks {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tarpc::{client, transport};
|
||||
use {
|
||||
super::*,
|
||||
tarpc::{client, transport},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,10 +14,10 @@ bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.0" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
@@ -1,48 +1,52 @@
|
||||
use crate::send_transaction_service::{SendTransactionService, TransactionInfo};
|
||||
use bincode::{deserialize, serialize};
|
||||
use futures::{
|
||||
future,
|
||||
prelude::stream::{self, StreamExt},
|
||||
};
|
||||
use solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use std::{
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
use {
|
||||
crate::send_transaction_service::{SendTransactionService, TransactionInfo},
|
||||
bincode::{deserialize, serialize},
|
||||
futures::{
|
||||
future,
|
||||
prelude::stream::{self, StreamExt},
|
||||
},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||
},
|
||||
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
feature_set::FeatureSet,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
},
|
||||
std::{
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
},
|
||||
tarpc::{
|
||||
context::Context,
|
||||
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
server::{self, Channel, Handler},
|
||||
transport,
|
||||
},
|
||||
tokio::time::sleep,
|
||||
tokio_serde::formats::Bincode,
|
||||
};
|
||||
use tarpc::{
|
||||
context::Context,
|
||||
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
server::{self, Channel, Handler},
|
||||
transport,
|
||||
};
|
||||
use tokio::time::sleep;
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
}
|
||||
|
||||
impl BanksServer {
|
||||
@@ -54,11 +58,13 @@ impl BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
Self {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,6 +87,7 @@ impl BanksServer {
|
||||
fn new_loopback(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
let (transaction_sender, transaction_receiver) = channel();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
@@ -95,7 +102,12 @@ impl BanksServer {
|
||||
.name("solana-bank-forks-client".to_string())
|
||||
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
|
||||
.unwrap();
|
||||
Self::new(bank_forks, block_commitment_cache, transaction_sender)
|
||||
Self::new(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
)
|
||||
}
|
||||
|
||||
fn slot(&self, commitment: CommitmentLevel) -> Slot {
|
||||
@@ -113,16 +125,16 @@ impl BanksServer {
|
||||
self,
|
||||
signature: &Signature,
|
||||
blockhash: &Hash,
|
||||
last_valid_slot: Slot,
|
||||
last_valid_block_height: u64,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
let mut status = self
|
||||
.bank(commitment)
|
||||
.get_signature_status_with_blockhash(signature, blockhash);
|
||||
while status.is_none() {
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
sleep(self.poll_signature_status_sleep_duration).await;
|
||||
let bank = self.bank(commitment);
|
||||
if bank.slot() > last_valid_slot {
|
||||
if bank.block_height() > last_valid_block_height {
|
||||
break;
|
||||
}
|
||||
status = bank.get_signature_status_with_blockhash(signature, blockhash);
|
||||
@@ -131,10 +143,13 @@ impl BanksServer {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
fn verify_transaction(
|
||||
transaction: &Transaction,
|
||||
feature_set: &Arc<FeatureSet>,
|
||||
) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles() {
|
||||
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
@@ -145,16 +160,19 @@ fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
impl Banks for BanksServer {
|
||||
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_slot = self
|
||||
let last_valid_block_height = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_slot(&blockhash)
|
||||
.get_blockhash_last_valid_block_height(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
let info =
|
||||
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||
let info = TransactionInfo::new(
|
||||
signature,
|
||||
serialize(&transaction).unwrap(),
|
||||
last_valid_block_height,
|
||||
);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
}
|
||||
|
||||
@@ -162,11 +180,13 @@ impl Banks for BanksServer {
|
||||
self,
|
||||
_: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> (FeeCalculator, Hash, Slot) {
|
||||
) -> (FeeCalculator, Hash, u64) {
|
||||
let bank = self.bank(commitment);
|
||||
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
|
||||
let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap();
|
||||
(fee_calculator, blockhash, last_valid_slot)
|
||||
let last_valid_block_height = bank
|
||||
.get_blockhash_last_valid_block_height(&blockhash)
|
||||
.unwrap();
|
||||
(fee_calculator, blockhash, last_valid_block_height)
|
||||
}
|
||||
|
||||
async fn get_transaction_status_with_context(
|
||||
@@ -209,29 +229,33 @@ impl Banks for BanksServer {
|
||||
self.slot(commitment)
|
||||
}
|
||||
|
||||
async fn get_block_height_with_context(self, _: Context, commitment: CommitmentLevel) -> u64 {
|
||||
self.bank(commitment).block_height()
|
||||
}
|
||||
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction) {
|
||||
if let Err(err) = verify_transaction(&transaction, &self.bank(commitment).feature_set) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_slot = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_slot(blockhash)
|
||||
let last_valid_block_height = self
|
||||
.bank(commitment)
|
||||
.get_blockhash_last_valid_block_height(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
let info =
|
||||
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||
let info = TransactionInfo::new(
|
||||
signature,
|
||||
serialize(&transaction).unwrap(),
|
||||
last_valid_block_height,
|
||||
);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
|
||||
self.poll_signature_status(&signature, blockhash, last_valid_block_height, commitment)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -249,8 +273,13 @@ impl Banks for BanksServer {
|
||||
pub async fn start_local_server(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
|
||||
let banks_server = BanksServer::new_loopback(bank_forks, block_commitment_cache);
|
||||
let banks_server = BanksServer::new_loopback(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
poll_signature_status_sleep_duration,
|
||||
);
|
||||
let (client_transport, server_transport) = transport::channel::unbounded();
|
||||
let server = server::new(server::Config::default())
|
||||
.incoming(stream::once(future::ready(server_transport)))
|
||||
@@ -285,8 +314,12 @@ pub async fn start_tcp_server(
|
||||
|
||||
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
|
||||
|
||||
let server =
|
||||
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
|
||||
let server = BanksServer::new(
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
sender,
|
||||
Duration::from_millis(200),
|
||||
);
|
||||
chan.respond_with(server.serve()).execute()
|
||||
})
|
||||
// Max 10 channels.
|
||||
|
@@ -1,21 +1,23 @@
|
||||
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
|
||||
|
||||
use crate::banks_server::start_tcp_server;
|
||||
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
|
||||
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
use {
|
||||
crate::banks_server::start_tcp_server,
|
||||
futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select},
|
||||
solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache},
|
||||
std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
tokio::{
|
||||
runtime::Runtime,
|
||||
time::{self, Duration},
|
||||
},
|
||||
tokio_stream::wrappers::IntervalStream,
|
||||
};
|
||||
use tokio::{
|
||||
runtime::Runtime,
|
||||
time::{self, Duration},
|
||||
};
|
||||
use tokio_stream::wrappers::IntervalStream;
|
||||
|
||||
pub struct RpcBanksService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@@ -101,8 +103,7 @@ impl RpcBanksService {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
use {super::*, solana_runtime::bank::Bank};
|
||||
|
||||
#[test]
|
||||
fn test_rpc_banks_server_exit() {
|
||||
|
@@ -1,17 +1,19 @@
|
||||
// TODO: Merge this implementation with the one at `core/src/send_transaction_service.rs`
|
||||
use log::*;
|
||||
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||
use solana_sdk::{clock::Slot, signature::Signature};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
mpsc::{Receiver, RecvTimeoutError},
|
||||
Arc, RwLock,
|
||||
use {
|
||||
log::*,
|
||||
solana_metrics::{datapoint_warn, inc_new_counter_info},
|
||||
solana_runtime::{bank::Bank, bank_forks::BankForks},
|
||||
solana_sdk::signature::Signature,
|
||||
std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
mpsc::{Receiver, RecvTimeoutError},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Maximum size of the transaction queue
|
||||
@@ -24,15 +26,19 @@ pub struct SendTransactionService {
|
||||
pub struct TransactionInfo {
|
||||
pub signature: Signature,
|
||||
pub wire_transaction: Vec<u8>,
|
||||
pub last_valid_slot: Slot,
|
||||
pub last_valid_block_height: u64,
|
||||
}
|
||||
|
||||
impl TransactionInfo {
|
||||
pub fn new(signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) -> Self {
|
||||
pub fn new(
|
||||
signature: Signature,
|
||||
wire_transaction: Vec<u8>,
|
||||
last_valid_block_height: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
signature,
|
||||
wire_transaction,
|
||||
last_valid_slot,
|
||||
last_valid_block_height,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,7 +130,7 @@ impl SendTransactionService {
|
||||
result.rooted += 1;
|
||||
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
||||
false
|
||||
} else if transaction_info.last_valid_slot < root_bank.slot() {
|
||||
} else if transaction_info.last_valid_block_height < root_bank.block_height() {
|
||||
info!("Dropping expired transaction: {}", signature);
|
||||
result.expired += 1;
|
||||
inc_new_counter_info!("send_transaction_service-expired", 1);
|
||||
@@ -138,8 +144,8 @@ impl SendTransactionService {
|
||||
result.retried += 1;
|
||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||
Self::send_transaction(
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
send_socket,
|
||||
tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
true
|
||||
@@ -179,12 +185,14 @@ impl SendTransactionService {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
|
||||
system_transaction,
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{
|
||||
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
|
||||
system_transaction,
|
||||
},
|
||||
std::sync::mpsc::channel,
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
fn service_exit() {
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,22 +18,23 @@ rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.8.17" }
|
||||
solana-client = { path = "../client", version = "=1.8.17" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.8.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,43 +1,45 @@
|
||||
#![allow(clippy::useless_attribute)]
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
use crate::order_book::*;
|
||||
use itertools::izip;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_exchange_program::{exchange_instruction, exchange_state::*, id};
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_sdk::{
|
||||
client::{Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
timing::{duration_as_ms, duration_as_s},
|
||||
transaction::Transaction,
|
||||
{system_instruction, system_program},
|
||||
};
|
||||
use std::{
|
||||
cmp,
|
||||
collections::{HashMap, VecDeque},
|
||||
fs::File,
|
||||
io::prelude::*,
|
||||
mem,
|
||||
net::SocketAddr,
|
||||
path::Path,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
use {
|
||||
crate::order_book::*,
|
||||
itertools::izip,
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_client::perf_utils::{sample_txs, SampleStats},
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_exchange_program::{exchange_instruction, exchange_state::*, id},
|
||||
solana_faucet::faucet::request_airdrop_transaction,
|
||||
solana_genesis::Base64Account,
|
||||
solana_metrics::datapoint_info,
|
||||
solana_sdk::{
|
||||
client::{Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_program,
|
||||
timing::{duration_as_ms, duration_as_s},
|
||||
transaction::Transaction,
|
||||
},
|
||||
std::{
|
||||
cmp,
|
||||
collections::{HashMap, VecDeque},
|
||||
fs::File,
|
||||
io::prelude::*,
|
||||
mem,
|
||||
net::SocketAddr,
|
||||
path::Path,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder},
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
thread::{sleep, Builder},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
|
||||
@@ -451,13 +453,13 @@ fn swapper<T>(
|
||||
let to_swap_txs: Vec<_> = to_swap
|
||||
.par_iter()
|
||||
.map(|(signer, swap, profit)| {
|
||||
let s: &Keypair = &signer;
|
||||
let s: &Keypair = signer;
|
||||
let owner = &signer.pubkey();
|
||||
let instruction = exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
&profit,
|
||||
profit,
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&s.pubkey()));
|
||||
Transaction::new(&[s], message, blockhash)
|
||||
@@ -600,7 +602,7 @@ fn trader<T>(
|
||||
src,
|
||||
),
|
||||
];
|
||||
let message = Message::new(&instructions, Some(&owner_pubkey));
|
||||
let message = Message::new(&instructions, Some(owner_pubkey));
|
||||
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
@@ -739,7 +741,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), m);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(k.clone(), Transaction::new_unsigned(message))
|
||||
})
|
||||
@@ -777,7 +779,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut waits = 0;
|
||||
loop {
|
||||
sleep(Duration::from_millis(200));
|
||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
|
||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount));
|
||||
if to_fund_txs.is_empty() {
|
||||
break;
|
||||
}
|
||||
@@ -836,7 +838,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
);
|
||||
let request_ix =
|
||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
||||
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
|
||||
let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey));
|
||||
(
|
||||
(from_keypair, new_keypair),
|
||||
Transaction::new_unsigned(message),
|
||||
@@ -872,7 +874,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
let mut waits = 0;
|
||||
while !to_create_txs.is_empty() {
|
||||
sleep(Duration::from_millis(200));
|
||||
to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx));
|
||||
to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx));
|
||||
if to_create_txs.is_empty() {
|
||||
break;
|
||||
}
|
||||
@@ -958,7 +960,7 @@ fn compute_and_report_stats(maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>, tot
|
||||
|
||||
fn generate_keypairs(num: u64) -> Vec<Keypair> {
|
||||
let mut seed = [0_u8; 32];
|
||||
seed.copy_from_slice(&Keypair::new().pubkey().as_ref());
|
||||
seed.copy_from_slice(Keypair::new().pubkey().as_ref());
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
rnd.gen_n_keypairs(num)
|
||||
}
|
||||
@@ -989,7 +991,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
|
||||
.expect("Failed to get blockhash");
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let signature = client.async_send_transaction(transaction).unwrap();
|
||||
|
||||
|
@@ -1,10 +1,10 @@
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::signature::{read_keypair_file, Keypair};
|
||||
use std::net::SocketAddr;
|
||||
use std::process::exit;
|
||||
use std::time::Duration;
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches},
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_faucet::faucet::FAUCET_PORT,
|
||||
solana_sdk::signature::{read_keypair_file, Keypair},
|
||||
std::{net::SocketAddr, process::exit, time::Duration},
|
||||
};
|
||||
|
||||
pub struct Config {
|
||||
pub entrypoint_addr: SocketAddr,
|
||||
|
@@ -3,10 +3,13 @@ pub mod bench;
|
||||
mod cli;
|
||||
pub mod order_book;
|
||||
|
||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||
use log::*;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_sdk::signature::Signer;
|
||||
use {
|
||||
crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config},
|
||||
log::*,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
|
||||
solana_sdk::signature::Signer,
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
};
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
@@ -55,11 +58,12 @@ fn main() {
|
||||
);
|
||||
} else {
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
|
||||
info!("{} nodes found", num_clients);
|
||||
if num_clients < num_nodes {
|
||||
|
@@ -1,11 +1,13 @@
|
||||
use itertools::EitherOrBoth::{Both, Left, Right};
|
||||
use itertools::Itertools;
|
||||
use log::*;
|
||||
use solana_exchange_program::exchange_state::*;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::{error, fmt};
|
||||
use {
|
||||
itertools::{
|
||||
EitherOrBoth::{Both, Left, Right},
|
||||
Itertools,
|
||||
},
|
||||
log::*,
|
||||
solana_exchange_program::exchange_state::*,
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{cmp::Ordering, collections::BinaryHeap, error, fmt},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ToOrder {
|
||||
|
@@ -1,21 +1,24 @@
|
||||
use log::*;
|
||||
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_exchange_program::{
|
||||
exchange_processor::process_instruction, id, solana_exchange_program,
|
||||
use {
|
||||
log::*,
|
||||
solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config},
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_exchange_program::{
|
||||
exchange_processor::process_instruction, id, solana_exchange_program,
|
||||
},
|
||||
solana_faucet::faucet::run_local_faucet_with_port,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
||||
solana_sdk::{
|
||||
genesis_config::create_genesis_config,
|
||||
signature::{Keypair, Signer},
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{process::exit, sync::mpsc::channel, time::Duration},
|
||||
};
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_client::BankClient};
|
||||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{process::exit, sync::mpsc::channel, time::Duration};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
@@ -43,13 +46,19 @@ fn test_exchange_local_cluster() {
|
||||
} = config;
|
||||
let accounts_in_groups = batch_size * account_groups;
|
||||
|
||||
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||
node_stakes: vec![100_000; NUM_NODES],
|
||||
cluster_lamports: 100_000_000_000_000,
|
||||
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
|
||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
let cluster = LocalCluster::new(
|
||||
&mut ClusterConfig {
|
||||
node_stakes: vec![100_000; NUM_NODES],
|
||||
cluster_lamports: 100_000_000_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig::default_for_test(),
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||
..ClusterConfig::default()
|
||||
},
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
@@ -66,13 +75,17 @@ fn test_exchange_local_cluster() {
|
||||
.expect("faucet_addr");
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes =
|
||||
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
});
|
||||
let nodes = discover_cluster(
|
||||
&cluster.entry_point_info.gossip,
|
||||
NUM_NODES,
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
|
||||
info!("clients: {}", num_clients);
|
||||
assert!(num_clients >= NUM_NODES);
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,32 +1,38 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_streamer::streamer::{receiver, PacketReceiver};
|
||||
use std::cmp::max;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle, Result};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg},
|
||||
solana_streamer::{
|
||||
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
|
||||
streamer::{receiver, PacketBatchReceiver},
|
||||
},
|
||||
std::{
|
||||
cmp::max,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::channel,
|
||||
Arc,
|
||||
},
|
||||
thread::{sleep, spawn, JoinHandle, Result},
|
||||
time::{Duration, SystemTime},
|
||||
},
|
||||
};
|
||||
|
||||
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut msgs = Packets::default();
|
||||
msgs.packets.resize(10, Packet::default());
|
||||
for w in msgs.packets.iter_mut() {
|
||||
let mut packet_batch = PacketBatch::default();
|
||||
packet_batch.packets.resize(10, Packet::default());
|
||||
for w in packet_batch.packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
w.meta.set_addr(addr);
|
||||
}
|
||||
let msgs = Arc::new(msgs);
|
||||
let packet_batch = Arc::new(packet_batch);
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in &msgs.packets {
|
||||
for p in &packet_batch.packets {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size <= PACKET_DATA_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
@@ -36,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
|
||||
if let Ok(packet_batch) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -75,7 +81,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketBatchRecycler::default();
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
@@ -92,6 +98,7 @@ fn main() -> Result<()> {
|
||||
recycler.clone(),
|
||||
"bench-streamer-test",
|
||||
1,
|
||||
true,
|
||||
));
|
||||
}
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,23 +15,24 @@ log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.8.17" }
|
||||
solana-client = { path = "../client", version = "=1.8.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,34 +1,36 @@
|
||||
use crate::cli::Config;
|
||||
use log::*;
|
||||
use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{self, datapoint_info};
|
||||
use solana_sdk::{
|
||||
client::Client,
|
||||
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
use {
|
||||
crate::cli::Config,
|
||||
log::*,
|
||||
rayon::prelude::*,
|
||||
solana_client::perf_utils::{sample_txs, SampleStats},
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_faucet::faucet::request_airdrop_transaction,
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::{self, datapoint_info},
|
||||
solana_sdk::{
|
||||
client::Client,
|
||||
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
},
|
||||
std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// The point at which transactions become "too old", in seconds.
|
||||
@@ -544,12 +546,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
|
||||
// re-sign retained to_fund_txes with updated blockhash
|
||||
self.sign(blockhash);
|
||||
self.send(&client);
|
||||
self.send(client);
|
||||
|
||||
// Sleep a few slots to allow transactions to process
|
||||
sleep(Duration::from_secs(1));
|
||||
|
||||
self.verify(&client, to_lamports);
|
||||
self.verify(client, to_lamports);
|
||||
|
||||
// retry anything that seems to have dropped through cracks
|
||||
// again since these txs are all or nothing, they're fine to
|
||||
@@ -564,7 +566,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||
.par_iter()
|
||||
.map(|(k, t)| {
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), t);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(*k, Transaction::new_unsigned(message))
|
||||
})
|
||||
@@ -617,7 +619,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
return None;
|
||||
}
|
||||
|
||||
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
|
||||
let verified = if verify_funding_transfer(&client, tx, to_lamports) {
|
||||
verified_txs.fetch_add(1, Ordering::Relaxed);
|
||||
Some(k.pubkey())
|
||||
} else {
|
||||
@@ -733,7 +735,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
@@ -924,12 +926,14 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::client::SyncClient;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::genesis_config::create_genesis_config;
|
||||
use {
|
||||
super::*,
|
||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
||||
solana_sdk::{
|
||||
client::SyncClient, fee_calculator::FeeRateGovernor,
|
||||
genesis_config::create_genesis_config,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_bank_client() {
|
||||
|
@@ -1,11 +1,13 @@
|
||||
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair},
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg, ArgMatches},
|
||||
solana_faucet::faucet::FAUCET_PORT,
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair},
|
||||
},
|
||||
std::{net::SocketAddr, process::exit, time::Duration},
|
||||
};
|
||||
use std::{net::SocketAddr, process::exit, time::Duration};
|
||||
|
||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||
|
||||
|
@@ -1,13 +1,20 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use log::*;
|
||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
||||
use solana_bench_tps::cli;
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_program;
|
||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
||||
use {
|
||||
log::*,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs},
|
||||
cli,
|
||||
},
|
||||
solana_genesis::Base64Account,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
signature::{Keypair, Signer},
|
||||
system_program,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc},
|
||||
};
|
||||
|
||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||
@@ -39,7 +46,7 @@ fn main() {
|
||||
let keypair_count = *tx_count * keypair_multiplier;
|
||||
if *write_to_client_file {
|
||||
info!("Generating {} keypairs", keypair_count);
|
||||
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
||||
let (keypairs, _) = generate_keypairs(id, keypair_count as u64);
|
||||
let num_accounts = keypairs.len() as u64;
|
||||
let max_fee =
|
||||
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||
@@ -68,13 +75,14 @@ fn main() {
|
||||
}
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
@@ -88,7 +96,7 @@ fn main() {
|
||||
let mut target_client = None;
|
||||
for node in nodes {
|
||||
if node.id == *target_node {
|
||||
target_client = Some(Arc::new(get_client(&[node])));
|
||||
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -97,7 +105,7 @@ fn main() {
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
Arc::new(get_client(&nodes))
|
||||
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
|
||||
};
|
||||
|
||||
let keypairs = if *read_from_client_file {
|
||||
@@ -135,7 +143,7 @@ fn main() {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(*faucet_addr),
|
||||
&id,
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
)
|
||||
|
@@ -1,21 +1,24 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use serial_test::serial;
|
||||
use solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
};
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_gossip::cluster_info::VALIDATOR_PORT_RANGE;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::{
|
||||
sync::{mpsc::channel, Arc},
|
||||
time::Duration,
|
||||
use {
|
||||
serial_test::serial,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
},
|
||||
solana_client::thin_client::create_client,
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_faucet::faucet::run_local_faucet_with_port,
|
||||
solana_gossip::cluster_info::VALIDATOR_PORT_RANGE,
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{mpsc::channel, Arc},
|
||||
time::Duration,
|
||||
},
|
||||
};
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
@@ -23,13 +26,19 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
|
||||
solana_logger::setup();
|
||||
const NUM_NODES: usize = 1;
|
||||
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
|
||||
native_instruction_processors,
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
let cluster = LocalCluster::new(
|
||||
&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig::default_for_test(),
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors,
|
||||
..ClusterConfig::default()
|
||||
},
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
|
32
bloom/Cargo.toml
Normal file
32
bloom/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "solana-bloom"
|
||||
version = "1.8.17"
|
||||
description = "Solana bloom filter"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bloom"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
fnv = "1.0.7"
|
||||
rand = "0.7.0"
|
||||
serde = { version = "1.0.133", features = ["rc"] }
|
||||
rayon = "1.5.1"
|
||||
serde_derive = "1.0.103"
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.8.17" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
log = "0.4.14"
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bloom"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.4"
|
@@ -1,17 +1,18 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
use bv::BitVec;
|
||||
use fnv::FnvHasher;
|
||||
use rand::Rng;
|
||||
use solana_runtime::bloom::{AtomicBloom, Bloom, BloomHashIndex};
|
||||
use solana_sdk::{
|
||||
hash::{hash, Hash},
|
||||
signature::Signature,
|
||||
use {
|
||||
bv::BitVec,
|
||||
fnv::FnvHasher,
|
||||
rand::Rng,
|
||||
solana_bloom::bloom::{AtomicBloom, Bloom, BloomHashIndex},
|
||||
solana_sdk::{
|
||||
hash::{hash, Hash},
|
||||
signature::Signature,
|
||||
},
|
||||
std::{collections::HashSet, hash::Hasher},
|
||||
test::Bencher,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::hash::Hasher;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
1
bloom/build.rs
Symbolic link
1
bloom/build.rs
Symbolic link
@@ -0,0 +1 @@
|
||||
../frozen-abi/build.rs
|
@@ -1,12 +1,19 @@
|
||||
//! Simple Bloom Filter
|
||||
use bv::BitVec;
|
||||
use fnv::FnvHasher;
|
||||
use rand::{self, Rng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
use std::fmt;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::{cmp, hash::Hasher, marker::PhantomData};
|
||||
use {
|
||||
bv::BitVec,
|
||||
fnv::FnvHasher,
|
||||
rand::{self, Rng},
|
||||
serde::{Deserialize, Serialize},
|
||||
solana_sdk::sanitize::{Sanitize, SanitizeError},
|
||||
std::{
|
||||
cmp,
|
||||
convert::TryFrom,
|
||||
fmt,
|
||||
hash::Hasher,
|
||||
marker::PhantomData,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
},
|
||||
};
|
||||
|
||||
/// Generate a stable hash of `self` for each `hash_index`
|
||||
/// Best effort can be made for uniqueness of each hash.
|
||||
@@ -94,7 +101,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
}
|
||||
}
|
||||
fn pos(&self, key: &T, k: u64) -> u64 {
|
||||
key.hash_at_index(k) % self.bits.len()
|
||||
key.hash_at_index(k).wrapping_rem(self.bits.len())
|
||||
}
|
||||
pub fn clear(&mut self) {
|
||||
self.bits = BitVec::new_fill(false, self.bits.len());
|
||||
@@ -104,7 +111,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
for k in &self.keys {
|
||||
let pos = self.pos(key, *k);
|
||||
if !self.bits.get(pos) {
|
||||
self.num_bits_set += 1;
|
||||
self.num_bits_set = self.num_bits_set.saturating_add(1);
|
||||
self.bits.set(pos, true);
|
||||
}
|
||||
}
|
||||
@@ -157,21 +164,26 @@ impl<T: BloomHashIndex> From<Bloom<T>> for AtomicBloom<T> {
|
||||
|
||||
impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
fn pos(&self, key: &T, hash_index: u64) -> (usize, u64) {
|
||||
let pos = key.hash_at_index(hash_index) % self.num_bits;
|
||||
let pos = key.hash_at_index(hash_index).wrapping_rem(self.num_bits);
|
||||
// Divide by 64 to figure out which of the
|
||||
// AtomicU64 bit chunks we need to modify.
|
||||
let index = pos >> 6;
|
||||
let index = pos.wrapping_shr(6);
|
||||
// (pos & 63) is equivalent to mod 64 so that we can find
|
||||
// the index of the bit within the AtomicU64 to modify.
|
||||
let mask = 1u64 << (pos & 63);
|
||||
let mask = 1u64.wrapping_shl(u32::try_from(pos & 63).unwrap());
|
||||
(index as usize, mask)
|
||||
}
|
||||
|
||||
pub fn add(&self, key: &T) {
|
||||
/// Adds an item to the bloom filter and returns true if the item
|
||||
/// was not in the filter before.
|
||||
pub fn add(&self, key: &T) -> bool {
|
||||
let mut added = false;
|
||||
for k in &self.keys {
|
||||
let (index, mask) = self.pos(key, *k);
|
||||
self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
let prev_val = self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
added = added || prev_val & mask == 0u64;
|
||||
}
|
||||
added
|
||||
}
|
||||
|
||||
pub fn contains(&self, key: &T) -> bool {
|
||||
@@ -182,6 +194,12 @@ impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn clear_for_tests(&mut self) {
|
||||
self.bits.iter().for_each(|bit| {
|
||||
bit.store(0u64, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
// Only for tests and simulations.
|
||||
pub fn mock_clone(&self) -> Self {
|
||||
Self {
|
||||
@@ -217,9 +235,11 @@ impl<T: BloomHashIndex> From<AtomicBloom<T>> for Bloom<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rayon::prelude::*;
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
use {
|
||||
super::*,
|
||||
rayon::prelude::*,
|
||||
solana_sdk::hash::{hash, Hash},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_bloom_filter() {
|
||||
@@ -311,7 +331,9 @@ mod test {
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.num_bits, 6168);
|
||||
assert_eq!(bloom.bits.len(), 97);
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
let bloom: Bloom<Hash> = bloom.into();
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.bits.len(), 6168);
|
||||
@@ -353,7 +375,9 @@ mod test {
|
||||
}
|
||||
// Round trip, re-inserting the same hash values.
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
||||
@@ -371,7 +395,9 @@ mod test {
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
assert_eq!(bloom.num_bits, 9731);
|
||||
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
|
||||
more_hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
more_hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
5
bloom/src/lib.rs
Normal file
5
bloom/src/lib.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
|
||||
pub mod bloom;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_frozen_abi_macro;
|
@@ -102,6 +102,8 @@ command_step() {
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -137,7 +139,7 @@ all_test_steps() {
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
@@ -148,6 +150,33 @@ all_test_steps() {
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-bpf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-bpf.sh"
|
||||
name: "stable-bpf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-BPF skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
@@ -165,7 +194,7 @@ all_test_steps() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
@@ -194,6 +223,8 @@ EOF
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
@@ -216,7 +247,15 @@ EOF
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
45
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
30
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
|
@@ -3,13 +3,24 @@
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- wait
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
- command: "ci/publish-crate.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build-aarch64-apple-darwin"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball (aarch64-apple-darwin)"
|
||||
|
@@ -28,16 +28,29 @@ cargo_audit_ignores=(
|
||||
# Blocked on multiple crates updating `time` to >= 0.2.23
|
||||
--ignore RUSTSEC-2020-0071
|
||||
|
||||
# difference is unmaintained
|
||||
#
|
||||
# Blocked on predicates v1.0.6 removing its dependency on `difference`
|
||||
--ignore RUSTSEC-2020-0095
|
||||
|
||||
# generic-array: arr! macro erases lifetimes
|
||||
#
|
||||
# Blocked on libsecp256k1 releasing with upgraded dependencies
|
||||
# https://github.com/paritytech/libsecp256k1/issues/66
|
||||
--ignore RUSTSEC-2020-0146
|
||||
|
||||
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0078
|
||||
|
||||
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0079
|
||||
|
||||
# chrono: Potential segfault in `localtime_r` invocations
|
||||
#
|
||||
# Blocked due to no safe upgrade
|
||||
# https://github.com/chronotope/chrono/issues/499
|
||||
--ignore RUSTSEC-2020-0159
|
||||
|
||||
)
|
||||
scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}"
|
||||
|
21
ci/env.sh
21
ci/env.sh
@@ -23,6 +23,9 @@ if [[ -n $CI ]]; then
|
||||
elif [[ -n $BUILDKITE ]]; then
|
||||
export CI_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
||||
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
|
||||
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
export CI_COMMIT=$BUILDKITE_COMMIT
|
||||
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||
@@ -35,7 +38,18 @@ if [[ -n $CI ]]; then
|
||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_PULL_REQUEST=
|
||||
fi
|
||||
export CI_OS_NAME=linux
|
||||
|
||||
case "$(uname -s)" in
|
||||
Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
Darwin)
|
||||
export CI_OS_NAME=osx
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||
# triggered it
|
||||
@@ -74,10 +88,13 @@ else
|
||||
export CI_BUILD_ID=
|
||||
export CI_COMMIT=
|
||||
export CI_JOB_ID=
|
||||
export CI_OS_NAME=
|
||||
export CI_PULL_REQUEST=
|
||||
export CI_REPO_SLUG=
|
||||
export CI_TAG=
|
||||
# Don't override ci/run-local.sh
|
||||
if [[ -z $CI_LOCAL_RUN ]]; then
|
||||
export CI_OS_NAME=
|
||||
fi
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
@@ -127,7 +127,7 @@ startNode() {
|
||||
waitForNodeToInit() {
|
||||
declare initCompleteFile=$1
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
if [[ $SECONDS -ge 240 ]]; then
|
||||
if [[ $SECONDS -ge 300 ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||
exit 1
|
||||
|
@@ -12,10 +12,14 @@ import json
|
||||
import subprocess
|
||||
import sys;
|
||||
|
||||
real_file = os.path.realpath(__file__)
|
||||
ci_path = os.path.dirname(real_file)
|
||||
src_root = os.path.dirname(ci_path)
|
||||
|
||||
def load_metadata():
|
||||
cmd = f'{src_root}/cargo metadata --no-deps --format-version=1'
|
||||
return json.loads(subprocess.Popen(
|
||||
'cargo metadata --no-deps --format-version=1',
|
||||
shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||
cmd, shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||
|
||||
def get_packages():
|
||||
metadata = load_metadata()
|
||||
|
@@ -39,7 +39,11 @@ fi
|
||||
|
||||
case "$CI_OS_NAME" in
|
||||
osx)
|
||||
TARGET=x86_64-apple-darwin
|
||||
_cputype="$(uname -m)"
|
||||
if [[ $_cputype = arm64 ]]; then
|
||||
_cputype=aarch64
|
||||
fi
|
||||
TARGET=${_cputype}-apple-darwin
|
||||
;;
|
||||
linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
@@ -146,7 +150,7 @@ elif [[ -n $BUILDKITE ]]; then
|
||||
cat > release.solana.com-install <<EOF
|
||||
SOLANA_RELEASE=$CHANNEL_OR_TAG
|
||||
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
|
||||
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
|
||||
SOLANA_DOWNLOAD_ROOT=https://release.solana.com
|
||||
EOF
|
||||
cat install/solana-install-init.sh >> release.solana.com-install
|
||||
|
||||
|
57
ci/run-local.sh
Executable file
57
ci/run-local.sh
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export CI_LOCAL_RUN=true
|
||||
|
||||
set -e
|
||||
|
||||
case $(uname -o) in
|
||||
*/Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
*)
|
||||
echo "local CI runs are only supported on Linux" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
steps=()
|
||||
steps+=(test-sanity)
|
||||
steps+=(shellcheck)
|
||||
steps+=(test-checks)
|
||||
steps+=(test-coverage)
|
||||
steps+=(test-stable)
|
||||
steps+=(test-stable-bpf)
|
||||
steps+=(test-stable-perf)
|
||||
steps+=(test-downstream-builds)
|
||||
steps+=(test-bench)
|
||||
steps+=(test-local-cluster)
|
||||
steps+=(test-local-cluster-flakey)
|
||||
steps+=(test-local-cluster-slow)
|
||||
|
||||
step_index=0
|
||||
if [[ -n "$1" ]]; then
|
||||
start_step="$1"
|
||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
||||
step="${steps[$step_index]}"
|
||||
if [[ "$step" = "$start_step" ]]; then
|
||||
break
|
||||
fi
|
||||
step_index=$((step_index + 1))
|
||||
done
|
||||
if [[ $step_index -eq ${#steps[@]} ]]; then
|
||||
echo "unexpected start step: \"$start_step\"" 1>&2
|
||||
exit 1
|
||||
else
|
||||
echo "** starting at step: \"$start_step\" **"
|
||||
echo
|
||||
fi
|
||||
fi
|
||||
|
||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
||||
step="${steps[$step_index]}"
|
||||
cmd="ci/${step}.sh"
|
||||
$cmd
|
||||
step_index=$((step_index + 1))
|
||||
done
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./run.sh &
|
||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
|
24
ci/sbf-tools-info.sh
Executable file
24
ci/sbf-tools-info.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Finds the version of sbf-tools used by this source tree.
|
||||
#
|
||||
# stdout of this script may be eval-ed.
|
||||
#
|
||||
|
||||
here="$(dirname "$0")"
|
||||
|
||||
SBF_TOOLS_VERSION=unknown
|
||||
|
||||
cargo_build_bpf_main="${here}/../sdk/cargo-build-bpf/src/main.rs"
|
||||
if [[ -f "${cargo_build_bpf_main}" ]]; then
|
||||
version=$(sed -e 's/^.*bpf_tools_version\s*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_bpf_main}")
|
||||
if [[ ${version} != '' ]]; then
|
||||
SBF_TOOLS_VERSION="${version}"
|
||||
else
|
||||
echo '--- unable to parse SBF_TOOLS_VERSION'
|
||||
fi
|
||||
else
|
||||
echo "--- '${cargo_build_bpf_main}' not present"
|
||||
fi
|
||||
|
||||
echo SBF_TOOLS_VERSION="${SBF_TOOLS_VERSION}"
|
@@ -76,7 +76,7 @@ RestartForceExitStatus=SIGPIPE
|
||||
TimeoutStartSec=10
|
||||
TimeoutStopSec=0
|
||||
KillMode=process
|
||||
LimitNOFILE=700000
|
||||
LimitNOFILE=1000000
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@@ -8,5 +8,5 @@ source "$HERE"/utils.sh
|
||||
ensure_env || exit 1
|
||||
|
||||
# Allow more files to be opened by a user
|
||||
echo "* - nofile 700000" > /etc/security/limits.d/90-solana-nofiles.conf
|
||||
echo "* - nofile 1000000" > /etc/security/limits.d/90-solana-nofiles.conf
|
||||
|
||||
|
@@ -49,6 +49,10 @@ _ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
|
||||
_ "$cargo" nightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run poh benches
|
||||
_ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run core benches
|
||||
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
@@ -14,7 +14,7 @@ scripts/increment-cargo-version.sh check
|
||||
|
||||
# Disallow uncommitted Cargo.lock changes
|
||||
(
|
||||
_ scripts/cargo-for-all-lock-files.sh tree
|
||||
_ scripts/cargo-for-all-lock-files.sh tree >/dev/null
|
||||
set +e
|
||||
if ! _ git diff --exit-code; then
|
||||
echo -e "\nError: Uncommitted Cargo.lock changes" 1>&2
|
||||
@@ -35,8 +35,10 @@ echo --- build environment
|
||||
"$cargo" stable clippy --version --verbose
|
||||
"$cargo" nightly clippy --version --verbose
|
||||
|
||||
# audit is done only with stable
|
||||
# audit is done only with "$cargo stable"
|
||||
"$cargo" stable audit --version
|
||||
|
||||
grcov --version
|
||||
)
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
@@ -65,7 +67,8 @@ _ ci/order-crates-for-publishing.py
|
||||
|
||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- \
|
||||
--deny=warnings --deny=clippy::integer_arithmetic --allow=clippy::inconsistent_struct_constructor
|
||||
|
||||
_ "$cargo" stable fmt --all -- --check
|
||||
|
||||
|
9
ci/test-downstream-builds.sh
Executable file
9
ci/test-downstream-builds.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export CI_LOCAL_RUN=true
|
||||
|
||||
set -ex
|
||||
|
||||
scripts/build-downstream-projects.sh
|
1
ci/test-local-cluster-flakey.sh
Symbolic link
1
ci/test-local-cluster-flakey.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
1
ci/test-local-cluster-slow.sh
Symbolic link
1
ci/test-local-cluster-slow.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
1
ci/test-stable-bpf.sh
Symbolic link
1
ci/test-stable-bpf.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
@@ -21,10 +21,6 @@ export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
source scripts/ulimit-n.sh
|
||||
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
@@ -35,17 +31,25 @@ case $testName in
|
||||
test-stable)
|
||||
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
test-stable-bpf)
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# rustfilt required for dumping BPF assembly listings
|
||||
"$cargo" install rustfilt
|
||||
|
||||
# solana-keygen required when building C programs
|
||||
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
||||
export PATH="$PWD/target/debug":$PATH
|
||||
cargo_build_bpf="$(realpath ./cargo-build-bpf)"
|
||||
|
||||
# BPF solana-sdk legacy compile test
|
||||
./cargo-build-bpf --manifest-path sdk/Cargo.toml
|
||||
"$cargo_build_bpf" --manifest-path sdk/Cargo.toml
|
||||
|
||||
# BPF Program unit tests
|
||||
"$cargo" test --manifest-path programs/bpf/Cargo.toml
|
||||
cargo-build-bpf --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
|
||||
"$cargo_build_bpf" --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
|
||||
|
||||
# BPF program system tests
|
||||
_ make -C programs/bpf/c tests
|
||||
@@ -53,11 +57,32 @@ test-stable-perf)
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
|
||||
|
||||
# Dump BPF program assembly listings
|
||||
for bpf_test in programs/bpf/rust/*; do
|
||||
if pushd "$bpf_test"; then
|
||||
"$cargo_build_bpf" --dump
|
||||
popd
|
||||
fi
|
||||
done
|
||||
|
||||
# BPF program instruction count assertion
|
||||
bpf_target_path=programs/bpf/target
|
||||
_ "$cargo" stable test \
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust assert_instruction_count \
|
||||
-- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt
|
||||
|
||||
bpf_dump_archive="bpf-dumps.tar.bz2"
|
||||
rm -f "$bpf_dump_archive"
|
||||
tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so}
|
||||
exit 0
|
||||
;;
|
||||
test-stable-perf)
|
||||
if [[ $(uname) = Linux ]]; then
|
||||
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
||||
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
||||
# is not yet loaded.
|
||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh
|
||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh || true
|
||||
|
||||
rm -rf target/perf-libs
|
||||
./fetch-perf-libs.sh
|
||||
@@ -75,7 +100,17 @@ test-stable-perf)
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster-flakey)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster-slow)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
|
@@ -19,13 +19,24 @@ upload-ci-artifact() {
|
||||
upload-s3-artifact() {
|
||||
echo "--- artifact: $1 to $2"
|
||||
(
|
||||
set -x
|
||||
docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
args=(
|
||||
--rm
|
||||
--env AWS_ACCESS_KEY_ID
|
||||
--env AWS_SECRET_ACCESS_KEY
|
||||
--volume "$PWD:/solana"
|
||||
|
||||
)
|
||||
if [[ $(uname -m) = arm64 ]]; then
|
||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
||||
args+=(
|
||||
--platform linux/amd64
|
||||
)
|
||||
fi
|
||||
args+=(
|
||||
eremite/aws-cli:2018.12.18
|
||||
/usr/bin/s3cmd --acl-public put "$1" "$2"
|
||||
)
|
||||
set -x
|
||||
docker run "${args[@]}"
|
||||
)
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,10 +12,11 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.8.17" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.8.0"
|
||||
tiny-bip39 = "0.8.1"
|
||||
uriparse = "0.6.3"
|
||||
url = "2.1.0"
|
||||
chrono = "0.4"
|
||||
|
@@ -196,10 +196,12 @@ pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentC
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::{App, Arg};
|
||||
use solana_sdk::signature::write_keypair_file;
|
||||
use std::fs;
|
||||
use {
|
||||
super::*,
|
||||
clap::{App, Arg},
|
||||
solana_sdk::signature::write_keypair_file,
|
||||
std::fs,
|
||||
};
|
||||
|
||||
fn app<'ab, 'v>() -> App<'ab, 'v> {
|
||||
App::new("test")
|
||||
|
@@ -7,8 +7,7 @@ use {
|
||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||
signature::{read_keypair_file, Signature},
|
||||
},
|
||||
std::fmt::Display,
|
||||
std::str::FromStr,
|
||||
std::{fmt::Display, str::FromStr},
|
||||
};
|
||||
|
||||
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
|
||||
@@ -354,6 +353,27 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_niceness_adjustment_valid<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let adjustment = value.as_ref().parse::<i8>().map_err(|err| {
|
||||
format!(
|
||||
"error parsing niceness adjustment value '{}': {}",
|
||||
value, err
|
||||
)
|
||||
})?;
|
||||
if solana_perf::thread::is_renice_allowed(adjustment) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(String::from(
|
||||
"niceness adjustment supported only on Linux; negative adjustment \
|
||||
(priority increase) requires root or CAP_SYS_NICE (see `man 7 capabilities` \
|
||||
for details)",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -370,4 +390,11 @@ mod tests {
|
||||
assert!(is_derivation("a/b").is_err());
|
||||
assert!(is_derivation("0/4294967296").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_niceness_adjustment_valid() {
|
||||
assert_eq!(is_niceness_adjustment_valid("0"), Ok(()));
|
||||
assert!(is_niceness_adjustment_valid("128").is_err());
|
||||
assert!(is_niceness_adjustment_valid("-129").is_err());
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,14 @@
|
||||
//! Loading signers and keypairs from the command line.
|
||||
//!
|
||||
//! This module contains utilities for loading [Signer]s and [Keypair]s from
|
||||
//! standard signing sources, from the command line, as in the Solana CLI.
|
||||
//!
|
||||
//! The key function here is [`signer_from_path`], which loads a `Signer` from
|
||||
//! one of several possible sources by interpreting a "path" command line
|
||||
//! argument. Its documentation includes a description of all possible signing
|
||||
//! sources supported by the Solana CLI. Many other functions here are
|
||||
//! variations on, or delegate to, `signer_from_path`.
|
||||
|
||||
use {
|
||||
crate::{
|
||||
input_parsers::{pubkeys_sigs_of, STDOUT_OUTFILE_TOKEN},
|
||||
@@ -24,9 +35,11 @@ use {
|
||||
},
|
||||
},
|
||||
std::{
|
||||
cell::RefCell,
|
||||
convert::TryFrom,
|
||||
error,
|
||||
io::{stdin, stdout, Write},
|
||||
ops::Deref,
|
||||
process::exit,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
@@ -89,33 +102,142 @@ impl CliSignerInfo {
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
#[derive(Debug)]
|
||||
|
||||
/// A command line argument that loads a default signer in absence of other signers.
|
||||
///
|
||||
/// This type manages a default signing source which may be overridden by other
|
||||
/// signing sources via its [`generate_unique_signers`] method.
|
||||
///
|
||||
/// [`generate_unique_signers`]: DefaultSigner::generate_unique_signers
|
||||
///
|
||||
/// `path` is a signing source as documented by [`signer_from_path`], and
|
||||
/// `arg_name` is the name of its [clap] command line argument, which is passed
|
||||
/// to `signer_from_path` as its `keypair_name` argument.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DefaultSigner {
|
||||
/// The name of the signers command line argument.
|
||||
pub arg_name: String,
|
||||
/// The signing source.
|
||||
pub path: String,
|
||||
is_path_checked: RefCell<bool>,
|
||||
}
|
||||
|
||||
impl DefaultSigner {
|
||||
pub fn new(path: String) -> Self {
|
||||
/// Create a new `DefaultSigner`.
|
||||
///
|
||||
/// `path` is a signing source as documented by [`signer_from_path`], and
|
||||
/// `arg_name` is the name of its [clap] command line argument, which is
|
||||
/// passed to `signer_from_path` as its `keypair_name` argument.
|
||||
///
|
||||
/// [clap]: https://docs.rs/clap
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::DefaultSigner;
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
///
|
||||
/// let default_signer = DefaultSigner::new("keypair", &keypair_str);
|
||||
/// # assert!(default_signer.arg_name.len() > 0);
|
||||
/// assert_eq!(default_signer.path, keypair_str);
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn new<AN: AsRef<str>, P: AsRef<str>>(arg_name: AN, path: P) -> Self {
|
||||
let arg_name = arg_name.as_ref().to_string();
|
||||
let path = path.as_ref().to_string();
|
||||
Self {
|
||||
arg_name: "keypair".to_string(),
|
||||
arg_name,
|
||||
path,
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
pub fn from_path(path: String) -> Result<Self, Box<dyn error::Error>> {
|
||||
std::fs::metadata(&path)
|
||||
.map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!(
|
||||
"No default signer found, run \"solana-keygen new -o {}\" to create a new one",
|
||||
path
|
||||
),
|
||||
)
|
||||
.into()
|
||||
})
|
||||
.map(|_| Self::new(path))
|
||||
|
||||
fn path(&self) -> Result<&str, Box<dyn std::error::Error>> {
|
||||
if !self.is_path_checked.borrow().deref() {
|
||||
parse_signer_source(&self.path)
|
||||
.and_then(|s| {
|
||||
if let SignerSourceKind::Filepath(path) = &s.kind {
|
||||
std::fs::metadata(path).map(|_| ()).map_err(|e| e.into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
.map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!(
|
||||
"No default signer found, run \"solana-keygen new -o {}\" to create a new one",
|
||||
self.path
|
||||
),
|
||||
)
|
||||
})?;
|
||||
*self.is_path_checked.borrow_mut() = true;
|
||||
}
|
||||
Ok(&self.path)
|
||||
}
|
||||
|
||||
/// Generate a unique set of signers, possibly excluding this default signer.
|
||||
///
|
||||
/// This function allows a command line application to have a default
|
||||
/// signer, perhaps representing a default wallet, but to override that
|
||||
/// signer and instead sign with one or more other signers.
|
||||
///
|
||||
/// `bulk_signers` is a vector of signers, all of which are optional. If any
|
||||
/// of those signers is `None`, then the default signer will be loaded; if
|
||||
/// all of those signers are `Some`, then the default signer will not be
|
||||
/// loaded.
|
||||
///
|
||||
/// The returned value includes all of the `bulk_signers` that were not
|
||||
/// `None`, and maybe the default signer, if it was loaded.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::{DefaultSigner, signer_from_path};
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
/// use solana_sdk::signer::Signer;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .arg(Arg::with_name("payer")
|
||||
/// .long("payer")
|
||||
/// .help("The account paying for the transaction"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let mut wallet_manager = None;
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let maybe_payer = clap_matches.value_of("payer");
|
||||
///
|
||||
/// let default_signer = DefaultSigner::new("keypair", &keypair_str);
|
||||
/// let maybe_payer_signer = maybe_payer.map(|payer| {
|
||||
/// signer_from_path(&clap_matches, payer, "payer", &mut wallet_manager)
|
||||
/// }).transpose()?;
|
||||
/// let bulk_signers = vec![maybe_payer_signer];
|
||||
///
|
||||
/// let unique_signers = default_signer.generate_unique_signers(
|
||||
/// bulk_signers,
|
||||
/// &clap_matches,
|
||||
/// &mut wallet_manager,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn generate_unique_signers(
|
||||
&self,
|
||||
bulk_signers: Vec<Option<Box<dyn Signer>>>,
|
||||
@@ -140,21 +262,111 @@ impl DefaultSigner {
|
||||
})
|
||||
}
|
||||
|
||||
/// Loads the default [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as
|
||||
/// various types of _signing source_, depending on its format, one of which
|
||||
/// is a path to a keypair file. Some sources may require user interaction
|
||||
/// in the course of calling this function.
|
||||
///
|
||||
/// This simply delegates to the [`signer_from_path`] free function, passing
|
||||
/// it the `DefaultSigner`s `path` and `arg_name` fields as the `path` and
|
||||
/// `keypair_name` arguments.
|
||||
///
|
||||
/// See the [`signer_from_path`] free function for full documentation of how
|
||||
/// this function interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::DefaultSigner;
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let default_signer = DefaultSigner::new("keypair", &keypair_str);
|
||||
/// let mut wallet_manager = None;
|
||||
///
|
||||
/// let signer = default_signer.signer_from_path(
|
||||
/// &clap_matches,
|
||||
/// &mut wallet_manager,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn signer_from_path(
|
||||
&self,
|
||||
matches: &ArgMatches,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Box<dyn Signer>, Box<dyn std::error::Error>> {
|
||||
signer_from_path(matches, &self.path, &self.arg_name, wallet_manager)
|
||||
signer_from_path(matches, self.path()?, &self.arg_name, wallet_manager)
|
||||
}
|
||||
|
||||
/// Loads the default [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as
|
||||
/// various types of _signing source_, depending on its format, one of which
|
||||
/// is a path to a keypair file. Some sources may require user interaction
|
||||
/// in the course of calling this function.
|
||||
///
|
||||
/// This simply delegates to the [`signer_from_path_with_config`] free
|
||||
/// function, passing it the `DefaultSigner`s `path` and `arg_name` fields
|
||||
/// as the `path` and `keypair_name` arguments.
|
||||
///
|
||||
/// See the [`signer_from_path`] free function for full documentation of how
|
||||
/// this function interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::{SignerFromPathConfig, DefaultSigner};
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let default_signer = DefaultSigner::new("keypair", &keypair_str);
|
||||
/// let mut wallet_manager = None;
|
||||
///
|
||||
/// // Allow pubkey signers without accompanying signatures
|
||||
/// let config = SignerFromPathConfig {
|
||||
/// allow_null_signer: true,
|
||||
/// };
|
||||
///
|
||||
/// let signer = default_signer.signer_from_path_with_config(
|
||||
/// &clap_matches,
|
||||
/// &mut wallet_manager,
|
||||
/// &config,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn signer_from_path_with_config(
|
||||
&self,
|
||||
matches: &ArgMatches,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
config: &SignerFromPathConfig,
|
||||
) -> Result<Box<dyn Signer>, Box<dyn std::error::Error>> {
|
||||
signer_from_path_with_config(matches, &self.path, &self.arg_name, wallet_manager, config)
|
||||
signer_from_path_with_config(
|
||||
matches,
|
||||
self.path()?,
|
||||
&self.arg_name,
|
||||
wallet_manager,
|
||||
config,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,6 +446,15 @@ pub(crate) fn parse_signer_source<S: AsRef<str>>(
|
||||
let source = {
|
||||
#[cfg(target_family = "windows")]
|
||||
{
|
||||
// trim matched single-quotes since cmd.exe won't
|
||||
let mut source = source;
|
||||
while let Some(trimmed) = source.strip_prefix('\'') {
|
||||
source = if let Some(trimmed) = trimmed.strip_suffix('\'') {
|
||||
trimmed
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
source.replace("\\", "/")
|
||||
}
|
||||
#[cfg(not(target_family = "windows"))]
|
||||
@@ -277,7 +498,9 @@ pub(crate) fn parse_signer_source<S: AsRef<str>>(
|
||||
ASK_KEYWORD => Ok(SignerSource::new_legacy(SignerSourceKind::Prompt)),
|
||||
_ => match Pubkey::from_str(source.as_str()) {
|
||||
Ok(pubkey) => Ok(SignerSource::new(SignerSourceKind::Pubkey(pubkey))),
|
||||
Err(_) => Ok(SignerSource::new(SignerSourceKind::Filepath(source))),
|
||||
Err(_) => std::fs::metadata(source.as_str())
|
||||
.map(|_| SignerSource::new(SignerSourceKind::Filepath(source)))
|
||||
.map_err(|err| err.into()),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -298,19 +521,167 @@ pub fn presigner_from_pubkey_sigs(
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SignerFromPathConfig {
|
||||
pub allow_null_signer: bool,
|
||||
}
|
||||
|
||||
impl Default for SignerFromPathConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
allow_null_signer: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads a [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as various
|
||||
/// types of _signing source_, depending on its format, one of which is a path
|
||||
/// to a keypair file. Some sources may require user interaction in the course
|
||||
/// of calling this function.
|
||||
///
|
||||
/// The result of this function is a boxed object of the [Signer] trait. To load
|
||||
/// a concrete [Keypair], use the [keypair_from_path] function, though note that
|
||||
/// it does not support all signer sources.
|
||||
///
|
||||
/// The `matches` argument is the same set of parsed [clap] matches from which
|
||||
/// `path` was parsed. It is used to parse various additional command line
|
||||
/// arguments, depending on which signing source is requested, as described
|
||||
/// below in "Signing sources".
|
||||
///
|
||||
/// [clap]: https//docs.rs/clap
|
||||
///
|
||||
/// The `keypair_name` argument is the "name" of the signer, and is typically
|
||||
/// the name of the clap argument from which the `path` argument was parsed,
|
||||
/// like "keypair", "from", or "fee-payer". It is used solely for interactively
|
||||
/// prompting the user, either when entering seed phrases or selecting from
|
||||
/// multiple hardware wallets.
|
||||
///
|
||||
/// The `wallet_manager` is used for establishing connections to a hardware
|
||||
/// device such as Ledger. If `wallet_manager` is a reference to `None`, and a
|
||||
/// hardware signer is requested, then this function will attempt to create a
|
||||
/// wallet manager, assigning it to the mutable `wallet_manager` reference. This
|
||||
/// argument is typically a reference to `None`.
|
||||
///
|
||||
/// # Signing sources
|
||||
///
|
||||
/// The `path` argument can simply be a path to a keypair file, but it may also
|
||||
/// be interpreted in several other ways, in the following order.
|
||||
///
|
||||
/// Firstly, the `path` argument may be interpreted as a [URI], with the URI
|
||||
/// scheme indicating where to load the signer from. If it parses as a URI, then
|
||||
/// the following schemes are supported:
|
||||
///
|
||||
/// - `file:` — Read the keypair from a JSON keypair file. The path portion
|
||||
/// of the URI is the file path.
|
||||
///
|
||||
/// - `stdin:` — Read the keypair from stdin, in the JSON format used by
|
||||
/// the keypair file.
|
||||
///
|
||||
/// Non-scheme parts of the URI are ignored.
|
||||
///
|
||||
/// - `prompt:` — The user will be prompted at the command line
|
||||
/// for their seed phrase and passphrase.
|
||||
///
|
||||
/// In this URI the [query string][qs] may contain zero or one of the
|
||||
/// following key/value pairs that determine the [BIP44 derivation path][dp]
|
||||
/// of the private key from the seed:
|
||||
///
|
||||
/// - `key` — In this case the value is either one or two numerical
|
||||
/// indexes separated by a slash, which represent the "account", and
|
||||
/// "change" components of the BIP44 derivation path. Example: `key=0/0`.
|
||||
///
|
||||
/// - `full-path` — In this case the value is a full derivation path,
|
||||
/// and the user is responsible for ensuring it is correct. Example:
|
||||
/// `full-path=m/44/501/0/0/0`.
|
||||
///
|
||||
/// If neither is provided, then the default derivation path is used.
|
||||
///
|
||||
/// Note that when specifying derivation paths, this routine will convert all
|
||||
/// indexes into ["hardened"] indexes, even if written as "normal" indexes.
|
||||
///
|
||||
/// Other components of the URI besides the scheme and query string are ignored.
|
||||
///
|
||||
/// If the "skip_seed_phrase_validation" argument, as defined in
|
||||
/// [SKIP_SEED_PHRASE_VALIDATION_ARG] is found in `matches`, then the keypair
|
||||
/// seed will be generated directly from the seed phrase, without parsing or
|
||||
/// validating it as a BIP39 seed phrase. This allows the use of non-BIP39 seed
|
||||
/// phrases.
|
||||
///
|
||||
/// - `usb:` — Use a USB hardware device as the signer. In this case, the
|
||||
/// URI host indicates the device type, and is required. The only currently valid host
|
||||
/// value is "ledger".
|
||||
///
|
||||
/// Optionally, the first segment of the URI path indicates the base-58
|
||||
/// encoded pubkey of the wallet, and the "account" and "change" indices of
|
||||
/// the derivation path can be specified with the `key=` query parameter, as
|
||||
/// with the `prompt:` URI.
|
||||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// - `usb://ledger`
|
||||
/// - `usb://ledger?key=0/0`
|
||||
/// - `usb://ledger/9rPVSygg3brqghvdZ6wsL2i5YNQTGhXGdJzF65YxaCQd`
|
||||
/// - `usb://ledger/9rPVSygg3brqghvdZ6wsL2i5YNQTGhXGdJzF65YxaCQd?key=0/0`
|
||||
///
|
||||
/// Next the `path` argument may be one of the following strings:
|
||||
///
|
||||
/// - `-` — Read the keypair from stdin. This is the same as the `stdin:`
|
||||
/// URI scheme.
|
||||
///
|
||||
/// - `ASK` — The user will be prompted at the command line for their seed
|
||||
/// phrase and passphrase. _This uses a legacy key derivation method and should
|
||||
/// usually be avoided in favor of `prompt:`._
|
||||
///
|
||||
/// Next, if the `path` argument parses as a base-58 public key, then the signer
|
||||
/// is created without a private key, but with presigned signatures, each parsed
|
||||
/// from the additional command line arguments, provided by the `matches`
|
||||
/// argument.
|
||||
///
|
||||
/// In this case, the remaining command line arguments are searched for clap
|
||||
/// arguments named "signer", as defined by [SIGNER_ARG], and each is parsed as
|
||||
/// a key-value pair of the form "pubkey=signature", where `pubkey` is the same
|
||||
/// base-58 public key, and `signature` is a serialized signature produced by
|
||||
/// the corresponding keypair. One of the "signer" signatures must be for the
|
||||
/// pubkey specified in `path` or this function will return an error; unless the
|
||||
/// "sign_only" clap argument, as defined by [SIGN_ONLY_ARG], is present in
|
||||
/// `matches`, in which case the signer will be created with no associated
|
||||
/// signatures.
|
||||
///
|
||||
/// Finally, if `path`, interpreted as a file path, represents a file on disk,
|
||||
/// then the signer is created by reading that file as a JSON-serialized
|
||||
/// keypair. This is the same as the `file:` URI scheme.
|
||||
///
|
||||
/// [qs]: https://en.wikipedia.org/wiki/Query_string
|
||||
/// [dp]: https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
|
||||
/// [URI]: https://en.wikipedia.org/wiki/Uniform_Resource_Identifier
|
||||
/// ["hardened"]: https://wiki.trezor.io/Hardened_and_non-hardened_derivation
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// This shows a reasonable way to set up clap to parse all possible signer
|
||||
/// sources. Note the use of the [`OfflineArgs::offline_args`] method to add
|
||||
/// correct clap definitions of the `--signer` and `--sign-only` arguments, as
|
||||
/// required by the base-58 pubkey offline signing method.
|
||||
///
|
||||
/// [`OfflineArgs::offline_args`]: crate::offline::OfflineArgs::offline_args
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::signer_from_path;
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let mut wallet_manager = None;
|
||||
/// let signer = signer_from_path(
|
||||
/// &clap_matches,
|
||||
/// &keypair_str,
|
||||
/// "keypair",
|
||||
/// &mut wallet_manager,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn signer_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
@@ -321,6 +692,63 @@ pub fn signer_from_path(
|
||||
signer_from_path_with_config(matches, path, keypair_name, wallet_manager, &config)
|
||||
}
|
||||
|
||||
/// Loads a [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as various
|
||||
/// types of _signing source_, depending on its format, one of which is a path
|
||||
/// to a keypair file. Some sources may require user interaction in the course
|
||||
/// of calling this function.
|
||||
///
|
||||
/// This is the same as [`signer_from_path`] except that it additionaolly
|
||||
/// accepts a [`SignerFromPathConfig`] argument.
|
||||
///
|
||||
/// If the `allow_null_signer` field of `config` is `true`, then pubkey signers
|
||||
/// are allowed to have zero associated signatures via additional "signer"
|
||||
/// command line arguments. It the same effect as if the "sign_only" clap
|
||||
/// argument is present.
|
||||
///
|
||||
/// See [`signer_from_path`] for full documentation of how this function
|
||||
/// interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// This shows a reasonable way to set up clap to parse all possible signer
|
||||
/// sources. Note the use of the [`OfflineArgs::offline_args`] method to add
|
||||
/// correct clap definitions of the `--signer` and `--sign-only` arguments, as
|
||||
/// required by the base-58 pubkey offline signing method.
|
||||
///
|
||||
/// [`OfflineArgs::offline_args`]: crate::offline::OfflineArgs::offline_args
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::{signer_from_path_with_config, SignerFromPathConfig};
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let mut wallet_manager = None;
|
||||
///
|
||||
/// // Allow pubkey signers without accompanying signatures
|
||||
/// let config = SignerFromPathConfig {
|
||||
/// allow_null_signer: true,
|
||||
/// };
|
||||
///
|
||||
/// let signer = signer_from_path_with_config(
|
||||
/// &clap_matches,
|
||||
/// &keypair_str,
|
||||
/// "keypair",
|
||||
/// &mut wallet_manager,
|
||||
/// &config,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn signer_from_path_with_config(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
@@ -391,6 +819,43 @@ pub fn signer_from_path_with_config(
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads the pubkey of a [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as various
|
||||
/// types of _signing source_, depending on its format, one of which is a path
|
||||
/// to a keypair file. Some sources may require user interaction in the course
|
||||
/// of calling this function.
|
||||
///
|
||||
/// The only difference between this function and [`signer_from_path`] is in the
|
||||
/// case of a "pubkey" path: this function does not require that accompanying
|
||||
/// command line arguments contain an offline signature.
|
||||
///
|
||||
/// See [`signer_from_path`] for full documentation of how this function
|
||||
/// interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::pubkey_from_path;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"));
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let mut wallet_manager = None;
|
||||
/// let pubkey = pubkey_from_path(
|
||||
/// &clap_matches,
|
||||
/// &keypair_str,
|
||||
/// "keypair",
|
||||
/// &mut wallet_manager,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn pubkey_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
@@ -480,7 +945,7 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
|
||||
|
||||
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
|
||||
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
|
||||
let passphrase = prompt_password_stderr(&prompt)?;
|
||||
let passphrase = prompt_password_stderr(prompt)?;
|
||||
if !passphrase.is_empty() {
|
||||
let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?;
|
||||
if confirmed != passphrase {
|
||||
@@ -490,7 +955,46 @@ pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>>
|
||||
Ok(passphrase)
|
||||
}
|
||||
|
||||
/// Parses a path into a SignerSource and returns a Keypair for supporting SignerSourceKinds
|
||||
/// Loads a [Keypair] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as various
|
||||
/// types of _signing source_, depending on its format, one of which is a path
|
||||
/// to a keypair file. Some sources may require user interaction in the course
|
||||
/// of calling this function.
|
||||
///
|
||||
/// This is the same as [`signer_from_path`] except that it only supports
|
||||
/// signing sources that can result in a [Keypair]: prompt for seed phrase,
|
||||
/// keypair file, and stdin.
|
||||
///
|
||||
/// If `confirm_pubkey` is `true` then after deriving the pubkey, the user will
|
||||
/// be prompted to confirm that the pubkey is as expected.
|
||||
///
|
||||
/// See [`signer_from_path`] for full documentation of how this function
|
||||
/// interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::keypair_from_path;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"));
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
///
|
||||
/// let signer = keypair_from_path(
|
||||
/// &clap_matches,
|
||||
/// &keypair_str,
|
||||
/// "keypair",
|
||||
/// false,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn keypair_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
@@ -540,9 +1044,10 @@ pub fn keypair_from_path(
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads user input from stdin to retrieve a seed phrase and passphrase for keypair derivation
|
||||
/// Optionally skips validation of seed phrase
|
||||
/// Optionally confirms recovered public key
|
||||
/// Reads user input from stdin to retrieve a seed phrase and passphrase for keypair derivation.
|
||||
///
|
||||
/// Optionally skips validation of seed phrase. Optionally confirms recovered
|
||||
/// public key.
|
||||
pub fn keypair_from_seed_phrase(
|
||||
keypair_name: &str,
|
||||
skip_validation: bool,
|
||||
@@ -560,9 +1065,9 @@ pub fn keypair_from_seed_phrase(
|
||||
let keypair = if skip_validation {
|
||||
let passphrase = prompt_passphrase(&passphrase_prompt)?;
|
||||
if legacy {
|
||||
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
|
||||
keypair_from_seed_phrase_and_passphrase(seed_phrase, &passphrase)?
|
||||
} else {
|
||||
let seed = generate_seed_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase);
|
||||
let seed = generate_seed_from_seed_phrase_and_passphrase(seed_phrase, &passphrase);
|
||||
keypair_from_seed_and_derivation_path(&seed, derivation_path)?
|
||||
}
|
||||
} else {
|
||||
@@ -590,7 +1095,7 @@ pub fn keypair_from_seed_phrase(
|
||||
if legacy {
|
||||
keypair_from_seed(seed.as_bytes())?
|
||||
} else {
|
||||
keypair_from_seed_and_derivation_path(&seed.as_bytes(), derivation_path)?
|
||||
keypair_from_seed_and_derivation_path(seed.as_bytes(), derivation_path)?
|
||||
}
|
||||
};
|
||||
|
||||
@@ -618,10 +1123,14 @@ fn sanitize_seed_phrase(seed_phrase: &str) -> String {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_remote_wallet::locator::Manufacturer;
|
||||
use solana_sdk::system_instruction;
|
||||
use tempfile::NamedTempFile;
|
||||
use {
|
||||
super::*,
|
||||
crate::offline::OfflineArgs,
|
||||
clap::{value_t_or_exit, App, Arg},
|
||||
solana_remote_wallet::{locator::Manufacturer, remote_wallet::initialize_wallet_manager},
|
||||
solana_sdk::{signer::keypair::write_keypair_file, system_instruction},
|
||||
tempfile::{NamedTempFile, TempDir},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_sanitize_seed_phrase() {
|
||||
@@ -751,6 +1260,10 @@ mod tests {
|
||||
// Catchall into SignerSource::Filepath fails
|
||||
let junk = "sometextthatisnotapubkeyorfile".to_string();
|
||||
assert!(Pubkey::from_str(&junk).is_err());
|
||||
assert!(matches!(
|
||||
parse_signer_source(&junk),
|
||||
Err(SignerSourceError::IoError(_))
|
||||
));
|
||||
|
||||
let prompt = "prompt:".to_string();
|
||||
assert!(matches!(
|
||||
@@ -776,4 +1289,41 @@ mod tests {
|
||||
} if p == relative_path_str)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signer_from_path_with_file() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let dir = TempDir::new()?;
|
||||
let dir = dir.path();
|
||||
let keypair_path = dir.join("id.json");
|
||||
let keypair_path_str = keypair_path.to_str().expect("utf-8");
|
||||
|
||||
let keypair = Keypair::new();
|
||||
write_keypair_file(&keypair, &keypair_path)?;
|
||||
|
||||
let args = vec!["program", keypair_path_str];
|
||||
|
||||
let clap_app = App::new("my-program")
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.required(true)
|
||||
.help("The signing keypair"),
|
||||
)
|
||||
.offline_args();
|
||||
|
||||
let clap_matches = clap_app.get_matches_from(args);
|
||||
let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
|
||||
let wallet_manager = initialize_wallet_manager()?;
|
||||
|
||||
let signer = signer_from_path(
|
||||
&clap_matches,
|
||||
&keypair_str,
|
||||
"signer",
|
||||
&mut Some(wallet_manager),
|
||||
)?;
|
||||
|
||||
assert_eq!(keypair.pubkey(), signer.pubkey());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -1,7 +1,9 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::{collections::HashMap, io, path::Path};
|
||||
use url::Url;
|
||||
use {
|
||||
serde_derive::{Deserialize, Serialize},
|
||||
std::{collections::HashMap, io, path::Path},
|
||||
url::Url,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG_FILE: Option<String> = {
|
||||
@@ -107,24 +109,24 @@ mod test {
|
||||
#[test]
|
||||
fn compute_websocket_url() {
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://api.devnet.solana.com"),
|
||||
Config::compute_websocket_url("http://api.devnet.solana.com"),
|
||||
"ws://api.devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://api.devnet.solana.com"),
|
||||
Config::compute_websocket_url("https://api.devnet.solana.com"),
|
||||
"wss://api.devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://example.com:8899"),
|
||||
Config::compute_websocket_url("http://example.com:8899"),
|
||||
"ws://example.com:8900/".to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://example.com:1234"),
|
||||
Config::compute_websocket_url("https://example.com:1234"),
|
||||
"wss://example.com:1235/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||
assert_eq!(Config::compute_websocket_url("garbage"), String::new());
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@ extern crate lazy_static;
|
||||
|
||||
mod config;
|
||||
pub use config::{Config, CONFIG_FILE};
|
||||
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,20 +12,20 @@ documentation = "https://docs.rs/solana-cli-output"
|
||||
[dependencies]
|
||||
base64 = "0.13.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
console = "0.11.3"
|
||||
clap = "2.33.0"
|
||||
console = "0.14.1"
|
||||
humantime = "2.0.1"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.15.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-client = { path = "../client", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.17" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -8,6 +8,7 @@ use {
|
||||
QuietDisplay, VerboseDisplay,
|
||||
},
|
||||
chrono::{Local, TimeZone},
|
||||
clap::ArgMatches,
|
||||
console::{style, Emoji},
|
||||
inflector::cases::titlecase::to_title_case,
|
||||
serde::{Deserialize, Serialize},
|
||||
@@ -25,10 +26,10 @@ use {
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
stake::state::{Authorized, Lockup},
|
||||
stake_history::StakeHistoryEntry,
|
||||
transaction::{Transaction, TransactionError},
|
||||
},
|
||||
solana_stake_program::stake_state::{Authorized, Lockup},
|
||||
solana_transaction_status::{
|
||||
EncodedConfirmedBlock, EncodedTransaction, TransactionConfirmationStatus,
|
||||
UiTransactionStatusMeta,
|
||||
@@ -45,9 +46,11 @@ use {
|
||||
},
|
||||
};
|
||||
|
||||
static CHECK_MARK: Emoji = Emoji("✅ ", "");
|
||||
static CROSS_MARK: Emoji = Emoji("❌ ", "");
|
||||
static WARNING: Emoji = Emoji("⚠️", "!");
|
||||
|
||||
#[derive(PartialEq)]
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub enum OutputFormat {
|
||||
Display,
|
||||
Json,
|
||||
@@ -77,6 +80,21 @@ impl OutputFormat {
|
||||
OutputFormat::JsonCompact => serde_json::to_value(item).unwrap().to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_matches(matches: &ArgMatches<'_>, output_name: &str, verbose: bool) -> Self {
|
||||
matches
|
||||
.value_of(output_name)
|
||||
.map(|value| match value {
|
||||
"json" => OutputFormat::Json,
|
||||
"json-compact" => OutputFormat::JsonCompact,
|
||||
_ => unreachable!(),
|
||||
})
|
||||
.unwrap_or(if verbose {
|
||||
OutputFormat::DisplayVerbose
|
||||
} else {
|
||||
OutputFormat::Display
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@@ -233,6 +251,10 @@ pub struct CliEpochInfo {
|
||||
pub epoch_info: EpochInfo,
|
||||
#[serde(skip)]
|
||||
pub average_slot_time_ms: u64,
|
||||
#[serde(skip)]
|
||||
pub start_block_time: Option<UnixTimestamp>,
|
||||
#[serde(skip)]
|
||||
pub current_block_time: Option<UnixTimestamp>,
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliEpochInfo {}
|
||||
@@ -277,21 +299,41 @@ impl fmt::Display for CliEpochInfo {
|
||||
remaining_slots_in_epoch
|
||||
),
|
||||
)?;
|
||||
let (time_elapsed, annotation) = if let (Some(start_block_time), Some(current_block_time)) =
|
||||
(self.start_block_time, self.current_block_time)
|
||||
{
|
||||
(
|
||||
Duration::from_secs((current_block_time - start_block_time) as u64),
|
||||
None,
|
||||
)
|
||||
} else {
|
||||
(
|
||||
slot_to_duration(self.epoch_info.slot_index, self.average_slot_time_ms),
|
||||
Some("* estimated based on current slot durations"),
|
||||
)
|
||||
};
|
||||
let time_remaining = slot_to_duration(remaining_slots_in_epoch, self.average_slot_time_ms);
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Epoch Completed Time:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
slot_to_human_time(self.epoch_info.slot_index, self.average_slot_time_ms),
|
||||
slot_to_human_time(self.epoch_info.slots_in_epoch, self.average_slot_time_ms),
|
||||
slot_to_human_time(remaining_slots_in_epoch, self.average_slot_time_ms)
|
||||
"{}{}/{} ({} remaining)",
|
||||
humantime::format_duration(time_elapsed).to_string(),
|
||||
if annotation.is_some() { "*" } else { "" },
|
||||
humantime::format_duration(time_elapsed + time_remaining).to_string(),
|
||||
humantime::format_duration(time_remaining).to_string(),
|
||||
),
|
||||
)
|
||||
)?;
|
||||
if let Some(annotation) = annotation {
|
||||
writeln!(f)?;
|
||||
writeln!(f, "{}", annotation)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn slot_to_human_time(slot: Slot, slot_time_ms: u64) -> String {
|
||||
humantime::format_duration(Duration::from_secs((slot * slot_time_ms) / 1000)).to_string()
|
||||
fn slot_to_duration(slot: Slot, slot_time_ms: u64) -> Duration {
|
||||
Duration::from_secs((slot * slot_time_ms) / 1000)
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
@@ -323,6 +365,8 @@ pub struct CliValidators {
|
||||
pub total_current_stake: u64,
|
||||
pub total_delinquent_stake: u64,
|
||||
pub validators: Vec<CliValidator>,
|
||||
pub average_skip_rate: f64,
|
||||
pub average_stake_weighted_skip_rate: f64,
|
||||
#[serde(skip_serializing)]
|
||||
pub validators_sort_order: CliValidatorsSortOrder,
|
||||
#[serde(skip_serializing)]
|
||||
@@ -486,6 +530,18 @@ impl fmt::Display for CliValidators {
|
||||
writeln!(f, "{}", header)?;
|
||||
}
|
||||
|
||||
writeln!(f)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Average Stake-Weighted Skip Rate:",
|
||||
&format!("{:.2}%", self.average_stake_weighted_skip_rate,),
|
||||
)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Average Unweighted Skip Rate: ",
|
||||
&format!("{:.2}%", self.average_skip_rate),
|
||||
)?;
|
||||
|
||||
writeln!(f)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
@@ -522,7 +578,7 @@ impl fmt::Display for CliValidators {
|
||||
for (version, info) in self.stake_by_version.iter() {
|
||||
writeln!(
|
||||
f,
|
||||
"{:<8} - {:3} current validators ({:>5.2}%){}",
|
||||
"{:<8} - {:4} current validators ({:>5.2}%){}",
|
||||
version,
|
||||
info.current_validators,
|
||||
100. * info.current_active_stake as f64 / self.total_active_stake as f64,
|
||||
@@ -733,6 +789,7 @@ pub struct CliEpochReward {
|
||||
pub post_balance: u64, // lamports
|
||||
pub percent_change: f64,
|
||||
pub apr: Option<f64>,
|
||||
pub commission: Option<u8>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@@ -777,23 +834,27 @@ impl fmt::Display for CliKeyedEpochRewards {
|
||||
writeln!(f, "Epoch Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:<18} {:<18} {:>14} {:>14}",
|
||||
"Address", "Amount", "New Balance", "Percent Change", "APR"
|
||||
" {:<44} {:<18} {:<18} {:>14} {:>14} {:>10}",
|
||||
"Address", "Amount", "New Balance", "Percent Change", "APR", "Commission"
|
||||
)?;
|
||||
for keyed_reward in &self.rewards {
|
||||
match &keyed_reward.reward {
|
||||
Some(reward) => {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.2}% {}",
|
||||
" {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>14} {:>10}",
|
||||
keyed_reward.address,
|
||||
lamports_to_sol(reward.amount),
|
||||
lamports_to_sol(reward.post_balance),
|
||||
reward.percent_change,
|
||||
reward
|
||||
.apr
|
||||
.map(|apr| format!("{:>13.2}%", apr))
|
||||
.map(|apr| format!("{:.2}%", apr))
|
||||
.unwrap_or_default(),
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{}%", commission))
|
||||
.unwrap_or_else(|| "-".to_string())
|
||||
)?;
|
||||
}
|
||||
None => {
|
||||
@@ -910,13 +971,13 @@ fn show_epoch_rewards(
|
||||
writeln!(f, "Epoch Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<6} {:<11} {:<18} {:<18} {:>14} {:>14}",
|
||||
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR"
|
||||
" {:<6} {:<11} {:<18} {:<18} {:>14} {:>14} {:>10}",
|
||||
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR", "Commission"
|
||||
)?;
|
||||
for reward in epoch_rewards {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<6} {:<11} ◎{:<17.9} ◎{:<17.9} {:>13.2}% {}",
|
||||
" {:<6} {:<11} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>14} {:>10}",
|
||||
reward.epoch,
|
||||
reward.effective_slot,
|
||||
lamports_to_sol(reward.amount),
|
||||
@@ -924,8 +985,12 @@ fn show_epoch_rewards(
|
||||
reward.percent_change,
|
||||
reward
|
||||
.apr
|
||||
.map(|apr| format!("{:>13.2}%", apr))
|
||||
.map(|apr| format!("{:.2}%", apr))
|
||||
.unwrap_or_default(),
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{}%", commission))
|
||||
.unwrap_or_else(|| "-".to_string())
|
||||
)?;
|
||||
}
|
||||
}
|
||||
@@ -1287,7 +1352,7 @@ impl fmt::Display for CliValidatorInfo {
|
||||
writeln_name_value(
|
||||
f,
|
||||
&format!(" {}:", to_title_case(key)),
|
||||
&value.as_str().unwrap_or("?"),
|
||||
value.as_str().unwrap_or("?"),
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -1325,8 +1390,8 @@ impl fmt::Display for CliVoteAccount {
|
||||
build_balance_message(self.account_balance, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(f, "Validator Identity: {}", self.validator_identity)?;
|
||||
writeln!(f, "Authorized Voters: {}", self.authorized_voters)?;
|
||||
writeln!(f, "Authorized Withdrawer: {}", self.authorized_withdrawer)?;
|
||||
writeln!(f, "Vote Authority: {}", self.authorized_voters)?;
|
||||
writeln!(f, "Withdraw Authority: {}", self.authorized_withdrawer)?;
|
||||
writeln!(f, "Credits: {}", self.credits)?;
|
||||
writeln!(f, "Commission: {}%", self.commission)?;
|
||||
writeln!(
|
||||
@@ -1513,15 +1578,19 @@ impl fmt::Display for CliInflation {
|
||||
"Staking rate: {:>5.2}%",
|
||||
self.current_rate.validator * 100.
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Foundation rate: {:>5.2}%",
|
||||
self.current_rate.foundation * 100.
|
||||
)
|
||||
|
||||
if self.current_rate.foundation > 0. {
|
||||
writeln!(
|
||||
f,
|
||||
"Foundation rate: {:>5.2}%",
|
||||
self.current_rate.foundation * 100.
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[derive(Serialize, Deserialize, Default, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliSignOnlyData {
|
||||
pub blockhash: String,
|
||||
@@ -1669,6 +1738,7 @@ pub struct CliFeesInner {
|
||||
pub blockhash: String,
|
||||
pub lamports_per_signature: u64,
|
||||
pub last_valid_slot: Option<Slot>,
|
||||
pub last_valid_block_height: Option<Slot>,
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliFeesInner {}
|
||||
@@ -1682,11 +1752,11 @@ impl fmt::Display for CliFeesInner {
|
||||
"Lamports per signature:",
|
||||
&self.lamports_per_signature.to_string(),
|
||||
)?;
|
||||
let last_valid_slot = self
|
||||
.last_valid_slot
|
||||
let last_valid_block_height = self
|
||||
.last_valid_block_height
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_default();
|
||||
writeln_name_value(f, "Last valid slot:", &last_valid_slot)
|
||||
writeln_name_value(f, "Last valid block height:", &last_valid_block_height)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1715,6 +1785,7 @@ impl CliFees {
|
||||
blockhash: Hash,
|
||||
lamports_per_signature: u64,
|
||||
last_valid_slot: Option<Slot>,
|
||||
last_valid_block_height: Option<Slot>,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Some(CliFeesInner {
|
||||
@@ -1722,6 +1793,7 @@ impl CliFees {
|
||||
blockhash: blockhash.to_string(),
|
||||
lamports_per_signature,
|
||||
last_valid_slot,
|
||||
last_valid_block_height,
|
||||
}),
|
||||
}
|
||||
}
|
||||
@@ -1768,7 +1840,7 @@ impl fmt::Display for CliTokenAccount {
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Close authority:",
|
||||
&account.close_authority.as_ref().unwrap_or(&String::new()),
|
||||
account.close_authority.as_ref().unwrap_or(&String::new()),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1860,6 +1932,9 @@ pub struct CliUpgradeableProgram {
|
||||
pub authority: String,
|
||||
pub last_deploy_slot: u64,
|
||||
pub data_len: usize,
|
||||
pub lamports: u64,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
impl QuietDisplay for CliUpgradeableProgram {}
|
||||
impl VerboseDisplay for CliUpgradeableProgram {}
|
||||
@@ -1880,12 +1955,78 @@ impl fmt::Display for CliUpgradeableProgram {
|
||||
"Data Length:",
|
||||
&format!("{:?} ({:#x?}) bytes", self.data_len, self.data_len),
|
||||
)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Balance:",
|
||||
&build_balance_message(self.lamports, self.use_lamports_unit, true),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliUpgradeablePrograms {
|
||||
pub programs: Vec<CliUpgradeableProgram>,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
impl QuietDisplay for CliUpgradeablePrograms {}
|
||||
impl VerboseDisplay for CliUpgradeablePrograms {}
|
||||
impl fmt::Display for CliUpgradeablePrograms {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f)?;
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(
|
||||
"{:<44} | {:<9} | {:<44} | {}",
|
||||
"Program Id", "Slot", "Authority", "Balance"
|
||||
))
|
||||
.bold()
|
||||
)?;
|
||||
for program in self.programs.iter() {
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
&format!(
|
||||
"{:<44} | {:<9} | {:<44} | {}",
|
||||
program.program_id,
|
||||
program.last_deploy_slot,
|
||||
program.authority,
|
||||
build_balance_message(program.lamports, self.use_lamports_unit, true)
|
||||
)
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliUpgradeableProgramClosed {
|
||||
pub program_id: String,
|
||||
pub lamports: u64,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
impl QuietDisplay for CliUpgradeableProgramClosed {}
|
||||
impl VerboseDisplay for CliUpgradeableProgramClosed {}
|
||||
impl fmt::Display for CliUpgradeableProgramClosed {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Closed Program Id {}, {} reclaimed",
|
||||
&self.program_id,
|
||||
&build_balance_message(self.lamports, self.use_lamports_unit, true)
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliUpgradeableBuffer {
|
||||
pub address: String,
|
||||
pub authority: String,
|
||||
@@ -1970,6 +2111,11 @@ pub fn return_signers_with_config(
|
||||
output_format: &OutputFormat,
|
||||
config: &ReturnSignersConfig,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
let cli_command = return_signers_data(tx, config);
|
||||
Ok(output_format.formatted_string(&cli_command))
|
||||
}
|
||||
|
||||
pub fn return_signers_data(tx: &Transaction, config: &ReturnSignersConfig) -> CliSignOnlyData {
|
||||
let verify_results = tx.verify_with_results();
|
||||
let mut signers = Vec::new();
|
||||
let mut absent = Vec::new();
|
||||
@@ -1994,19 +2140,17 @@ pub fn return_signers_with_config(
|
||||
None
|
||||
};
|
||||
|
||||
let cli_command = CliSignOnlyData {
|
||||
CliSignOnlyData {
|
||||
blockhash: tx.message.recent_blockhash.to_string(),
|
||||
message,
|
||||
signers,
|
||||
absent,
|
||||
bad_sig,
|
||||
};
|
||||
|
||||
Ok(output_format.formatted_string(&cli_command))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
|
||||
let object: Value = serde_json::from_str(&reply).unwrap();
|
||||
let object: Value = serde_json::from_str(reply).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let blockhash = blockhash_str.parse::<Hash>().unwrap();
|
||||
let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new();
|
||||
@@ -2136,8 +2280,8 @@ impl fmt::Display for CliBlock {
|
||||
writeln!(f, "Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:^15} {:<15} {:<20} {:>14}",
|
||||
"Address", "Type", "Amount", "New Balance", "Percent Change"
|
||||
" {:<44} {:^15} {:<15} {:<20} {:>14} {:>10}",
|
||||
"Address", "Type", "Amount", "New Balance", "Percent Change", "Commission"
|
||||
)?;
|
||||
for reward in rewards {
|
||||
let sign = if reward.lamports < 0 { "-" } else { "" };
|
||||
@@ -2145,7 +2289,7 @@ impl fmt::Display for CliBlock {
|
||||
total_rewards += reward.lamports;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:^15} {:>15} {}",
|
||||
" {:<44} {:^15} {:>15} {} {}",
|
||||
reward.pubkey,
|
||||
if let Some(reward_type) = reward.reward_type {
|
||||
format!("{}", reward_type)
|
||||
@@ -2167,7 +2311,11 @@ impl fmt::Display for CliBlock {
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64))
|
||||
* 100.0
|
||||
)
|
||||
}
|
||||
},
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{:>9}%", commission))
|
||||
.unwrap_or_else(|| " -".to_string())
|
||||
)?;
|
||||
}
|
||||
|
||||
@@ -2377,15 +2525,184 @@ impl fmt::Display for CliGossipNodes {
|
||||
impl QuietDisplay for CliGossipNodes {}
|
||||
impl VerboseDisplay for CliGossipNodes {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliPing {
|
||||
pub source_pubkey: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fixed_blockhash: Option<String>,
|
||||
#[serde(skip_serializing)]
|
||||
pub blockhash_from_cluster: bool,
|
||||
pub pings: Vec<CliPingData>,
|
||||
pub transaction_stats: CliPingTxStats,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub confirmation_stats: Option<CliPingConfirmationStats>,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliPing {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f)?;
|
||||
writeln_name_value(f, "Source Account:", &self.source_pubkey)?;
|
||||
if let Some(fixed_blockhash) = &self.fixed_blockhash {
|
||||
let blockhash_origin = if self.blockhash_from_cluster {
|
||||
"fetched from cluster"
|
||||
} else {
|
||||
"supplied from cli arguments"
|
||||
};
|
||||
writeln!(
|
||||
f,
|
||||
"Fixed blockhash is used: {} ({})",
|
||||
fixed_blockhash, blockhash_origin
|
||||
)?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
for ping in &self.pings {
|
||||
write!(f, "{}", ping)?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
writeln!(f, "--- transaction statistics ---")?;
|
||||
write!(f, "{}", self.transaction_stats)?;
|
||||
if let Some(confirmation_stats) = &self.confirmation_stats {
|
||||
write!(f, "{}", confirmation_stats)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliPing {}
|
||||
impl VerboseDisplay for CliPing {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliPingData {
|
||||
pub success: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub signature: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ms: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
#[serde(skip_serializing)]
|
||||
pub print_timestamp: bool,
|
||||
pub timestamp: String,
|
||||
pub sequence: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lamports: Option<u64>,
|
||||
}
|
||||
impl fmt::Display for CliPingData {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let (mark, msg) = if let Some(signature) = &self.signature {
|
||||
if self.success {
|
||||
(
|
||||
CHECK_MARK,
|
||||
format!(
|
||||
"{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
|
||||
self.lamports.unwrap(),
|
||||
self.sequence,
|
||||
self.ms.unwrap(),
|
||||
signature
|
||||
),
|
||||
)
|
||||
} else if let Some(error) = &self.error {
|
||||
(
|
||||
CROSS_MARK,
|
||||
format!(
|
||||
"Transaction failed: seq={:<3} error={:?} signature={}",
|
||||
self.sequence, error, signature
|
||||
),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
CROSS_MARK,
|
||||
format!(
|
||||
"Confirmation timeout: seq={:<3} signature={}",
|
||||
self.sequence, signature
|
||||
),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
(
|
||||
CROSS_MARK,
|
||||
format!(
|
||||
"Submit failed: seq={:<3} error={:?}",
|
||||
self.sequence,
|
||||
self.error.as_ref().unwrap(),
|
||||
),
|
||||
)
|
||||
};
|
||||
|
||||
writeln!(
|
||||
f,
|
||||
"{}{}{}",
|
||||
if self.print_timestamp {
|
||||
&self.timestamp
|
||||
} else {
|
||||
""
|
||||
},
|
||||
mark,
|
||||
msg
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliPingData {}
|
||||
impl VerboseDisplay for CliPingData {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliPingTxStats {
|
||||
pub num_transactions: u32,
|
||||
pub num_transaction_confirmed: u32,
|
||||
}
|
||||
impl fmt::Display for CliPingTxStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss",
|
||||
self.num_transactions,
|
||||
self.num_transaction_confirmed,
|
||||
(100.
|
||||
- f64::from(self.num_transaction_confirmed) / f64::from(self.num_transactions)
|
||||
* 100.)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliPingTxStats {}
|
||||
impl VerboseDisplay for CliPingTxStats {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliPingConfirmationStats {
|
||||
pub min: f64,
|
||||
pub mean: f64,
|
||||
pub max: f64,
|
||||
pub std_dev: f64,
|
||||
}
|
||||
impl fmt::Display for CliPingConfirmationStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms",
|
||||
self.min, self.mean, self.max, self.std_dev,
|
||||
)
|
||||
}
|
||||
}
|
||||
impl QuietDisplay for CliPingConfirmationStats {}
|
||||
impl VerboseDisplay for CliPingConfirmationStats {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, NullSigner, Signature, Signer, SignerError},
|
||||
system_instruction,
|
||||
transaction::Transaction,
|
||||
use {
|
||||
super::*,
|
||||
clap::{App, Arg},
|
||||
solana_sdk::{
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, NullSigner, Signature, Signer, SignerError},
|
||||
system_instruction,
|
||||
transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -2408,6 +2725,10 @@ mod tests {
|
||||
fn try_sign_message(&self, _message: &[u8]) -> Result<Signature, SignerError> {
|
||||
Ok(Signature::new(&[1u8; 64]))
|
||||
}
|
||||
|
||||
fn is_interactive(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
let present: Box<dyn Signer> = Box::new(keypair_from_seed(&[2u8; 32]).unwrap());
|
||||
@@ -2436,6 +2757,22 @@ mod tests {
|
||||
assert_eq!(sign_only.absent_signers[0], absent.pubkey());
|
||||
assert_eq!(sign_only.bad_signers[0], bad.pubkey());
|
||||
|
||||
let res_data = return_signers_data(&tx, &ReturnSignersConfig::default());
|
||||
assert_eq!(
|
||||
res_data,
|
||||
CliSignOnlyData {
|
||||
blockhash: blockhash.to_string(),
|
||||
message: None,
|
||||
signers: vec![format!(
|
||||
"{}={}",
|
||||
present.pubkey().to_string(),
|
||||
tx.signatures[1]
|
||||
)],
|
||||
absent: vec![absent.pubkey().to_string()],
|
||||
bad_sig: vec![bad.pubkey().to_string()],
|
||||
}
|
||||
);
|
||||
|
||||
let expected_msg = "AwECBwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDgTl3Dqh9\
|
||||
F19Wo1Rmw0x+zMuNipG07jeiXfYPW4/Js5QEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE\
|
||||
BAQEBAYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBQUFBQUFBQUFBQUFBQUFBQUF\
|
||||
@@ -2449,10 +2786,26 @@ mod tests {
|
||||
let res = return_signers_with_config(&tx, &OutputFormat::JsonCompact, &config).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&res);
|
||||
assert_eq!(sign_only.blockhash, blockhash);
|
||||
assert_eq!(sign_only.message, Some(expected_msg));
|
||||
assert_eq!(sign_only.message, Some(expected_msg.clone()));
|
||||
assert_eq!(sign_only.present_signers[0].0, present.pubkey());
|
||||
assert_eq!(sign_only.absent_signers[0], absent.pubkey());
|
||||
assert_eq!(sign_only.bad_signers[0], bad.pubkey());
|
||||
|
||||
let res_data = return_signers_data(&tx, &config);
|
||||
assert_eq!(
|
||||
res_data,
|
||||
CliSignOnlyData {
|
||||
blockhash: blockhash.to_string(),
|
||||
message: Some(expected_msg),
|
||||
signers: vec![format!(
|
||||
"{}={}",
|
||||
present.pubkey().to_string(),
|
||||
tx.signatures[1]
|
||||
)],
|
||||
absent: vec![absent.pubkey().to_string()],
|
||||
bad_sig: vec![bad.pubkey().to_string()],
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -2501,4 +2854,50 @@ mod tests {
|
||||
"verbose"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_output_format_from_matches() {
|
||||
let app = App::new("test").arg(
|
||||
Arg::with_name("output_format")
|
||||
.long("output")
|
||||
.value_name("FORMAT")
|
||||
.global(true)
|
||||
.takes_value(true)
|
||||
.possible_values(&["json", "json-compact"])
|
||||
.help("Return information in specified output format"),
|
||||
);
|
||||
let matches = app
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--output", "json"]);
|
||||
assert_eq!(
|
||||
OutputFormat::from_matches(&matches, "output_format", false),
|
||||
OutputFormat::Json
|
||||
);
|
||||
assert_eq!(
|
||||
OutputFormat::from_matches(&matches, "output_format", true),
|
||||
OutputFormat::Json
|
||||
);
|
||||
|
||||
let matches = app
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--output", "json-compact"]);
|
||||
assert_eq!(
|
||||
OutputFormat::from_matches(&matches, "output_format", false),
|
||||
OutputFormat::JsonCompact
|
||||
);
|
||||
assert_eq!(
|
||||
OutputFormat::from_matches(&matches, "output_format", true),
|
||||
OutputFormat::JsonCompact
|
||||
);
|
||||
|
||||
let matches = app.clone().get_matches_from(vec!["test"]);
|
||||
assert_eq!(
|
||||
OutputFormat::from_matches(&matches, "output_format", false),
|
||||
OutputFormat::Display
|
||||
);
|
||||
assert_eq!(
|
||||
OutputFormat::from_matches(&matches, "output_format", true),
|
||||
OutputFormat::DisplayVerbose
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -5,11 +5,10 @@ use {
|
||||
indicatif::{ProgressBar, ProgressStyle},
|
||||
solana_sdk::{
|
||||
clock::UnixTimestamp, hash::Hash, message::Message, native_token::lamports_to_sol,
|
||||
program_utils::limited_deserialize, pubkey::Pubkey, transaction::Transaction,
|
||||
program_utils::limited_deserialize, pubkey::Pubkey, stake, transaction::Transaction,
|
||||
},
|
||||
solana_transaction_status::UiTransactionStatusMeta,
|
||||
spl_memo::id as spl_memo_id,
|
||||
spl_memo::v1::id as spl_memo_v1_id,
|
||||
spl_memo::{id as spl_memo_id, v1::id as spl_memo_v1_id},
|
||||
std::{collections::HashMap, fmt, io},
|
||||
};
|
||||
|
||||
@@ -140,7 +139,7 @@ fn format_account_mode(message: &Message, index: usize) -> String {
|
||||
} else {
|
||||
"-"
|
||||
},
|
||||
if message.is_writable(index, /*demote_sysvar_write_locks=*/ true) {
|
||||
if message.is_writable(index, /*demote_program_write_locks=*/ true) {
|
||||
"w" // comment for consistent rust fmt (no joking; lol)
|
||||
} else {
|
||||
"-"
|
||||
@@ -244,10 +243,9 @@ pub fn write_transaction<W: io::Write>(
|
||||
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_stake_program::id() {
|
||||
if let Ok(stake_instruction) = limited_deserialize::<
|
||||
solana_stake_program::stake_instruction::StakeInstruction,
|
||||
>(&instruction.data)
|
||||
} else if program_pubkey == stake::program::id() {
|
||||
if let Ok(stake_instruction) =
|
||||
limited_deserialize::<stake::instruction::StakeInstruction>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
|
||||
raw = false;
|
||||
@@ -432,8 +430,7 @@ pub fn unix_timestamp_to_string(unix_timestamp: UnixTimestamp) -> String {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use {super::*, solana_sdk::pubkey::Pubkey};
|
||||
|
||||
#[test]
|
||||
fn test_format_labeled_address() {
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.0"
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -16,7 +16,8 @@ chrono = { version = "0.4.11", features = ["serde"] }
|
||||
clap = "2.33.1"
|
||||
criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.5", features = ["termination"] }
|
||||
console = "0.11.3"
|
||||
console = "0.14.1"
|
||||
const_format = "0.2.14"
|
||||
dirs-next = "2.0.0"
|
||||
log = "0.4.11"
|
||||
Inflector = "0.11.4"
|
||||
@@ -25,33 +26,34 @@ humantime = "2.0.1"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.2.1"
|
||||
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "1.0.4"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.0" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana_rbpf = "=0.2.11"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.17" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.8.17" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.8.17" }
|
||||
solana-client = { path = "../client", version = "=1.8.17" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.8.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana_rbpf = "=0.2.24"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.17" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.7.0"
|
||||
tiny-bip39 = "0.8.1"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -1,11 +1,13 @@
|
||||
use crate::cli::CliError;
|
||||
use solana_client::{
|
||||
client_error::{ClientError, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig, fee_calculator::FeeCalculator, message::Message,
|
||||
native_token::lamports_to_sol, pubkey::Pubkey,
|
||||
use {
|
||||
crate::cli::CliError,
|
||||
solana_client::{
|
||||
client_error::{ClientError, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
},
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig, fee_calculator::FeeCalculator, message::Message,
|
||||
native_token::lamports_to_sol, pubkey::Pubkey,
|
||||
},
|
||||
};
|
||||
|
||||
pub fn check_account_for_fee(
|
||||
@@ -149,14 +151,16 @@ pub fn check_unique_pubkeys(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::json;
|
||||
use solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcResponseContext},
|
||||
use {
|
||||
super::*,
|
||||
serde_json::json,
|
||||
solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcResponseContext},
|
||||
},
|
||||
solana_sdk::system_instruction,
|
||||
std::collections::HashMap,
|
||||
};
|
||||
use solana_sdk::system_instruction;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn test_check_account_for_fees() {
|
||||
|
204
cli/src/clap_app.rs
Normal file
204
cli/src/clap_app.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
use {
|
||||
crate::{
|
||||
cli::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, stake::*,
|
||||
validator_info::*, vote::*, wallet::*,
|
||||
},
|
||||
clap::{App, AppSettings, Arg, ArgGroup, SubCommand},
|
||||
solana_clap_utils::{self, input_validators::*, keypair::*},
|
||||
solana_cli_config::CONFIG_FILE,
|
||||
};
|
||||
|
||||
pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, 'v> {
|
||||
App::new(name)
|
||||
.about(about)
|
||||
.version(version)
|
||||
.setting(AppSettings::SubcommandRequiredElseHelp)
|
||||
.arg({
|
||||
let arg = Arg::with_name("config_file")
|
||||
.short("C")
|
||||
.long("config")
|
||||
.value_name("FILEPATH")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.help("Configuration file to use");
|
||||
if let Some(ref config_file) = *CONFIG_FILE {
|
||||
arg.default_value(config_file)
|
||||
} else {
|
||||
arg
|
||||
}
|
||||
})
|
||||
.arg(
|
||||
Arg::with_name("json_rpc_url")
|
||||
.short("u")
|
||||
.long("url")
|
||||
.value_name("URL_OR_MONIKER")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.validator(is_url_or_moniker)
|
||||
.help(
|
||||
"URL for Solana's JSON RPC or moniker (or their first letter): \
|
||||
[mainnet-beta, testnet, devnet, localhost]",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("websocket_url")
|
||||
.long("ws")
|
||||
.value_name("URL")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.validator(is_url)
|
||||
.help("WebSocket URL for the solana cluster"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("KEYPAIR")
|
||||
.global(true)
|
||||
.takes_value(true)
|
||||
.help("Filepath or URL to a keypair"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("commitment")
|
||||
.long("commitment")
|
||||
.takes_value(true)
|
||||
.possible_values(&[
|
||||
"processed",
|
||||
"confirmed",
|
||||
"finalized",
|
||||
"recent", // Deprecated as of v1.5.5
|
||||
"single", // Deprecated as of v1.5.5
|
||||
"singleGossip", // Deprecated as of v1.5.5
|
||||
"root", // Deprecated as of v1.5.5
|
||||
"max", // Deprecated as of v1.5.5
|
||||
])
|
||||
.value_name("COMMITMENT_LEVEL")
|
||||
.hide_possible_values(true)
|
||||
.global(true)
|
||||
.help("Return information at the selected commitment level [possible values: processed, confirmed, finalized]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("verbose")
|
||||
.long("verbose")
|
||||
.short("v")
|
||||
.global(true)
|
||||
.help("Show additional information"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_address_labels")
|
||||
.long("no-address-labels")
|
||||
.global(true)
|
||||
.help("Do not use address labels in the output"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("output_format")
|
||||
.long("output")
|
||||
.value_name("FORMAT")
|
||||
.global(true)
|
||||
.takes_value(true)
|
||||
.possible_values(&["json", "json-compact"])
|
||||
.help("Return information in specified output format"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
||||
.global(true)
|
||||
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("rpc_timeout")
|
||||
.long("rpc-timeout")
|
||||
.value_name("SECONDS")
|
||||
.takes_value(true)
|
||||
.default_value(DEFAULT_RPC_TIMEOUT_SECONDS)
|
||||
.global(true)
|
||||
.hidden(true)
|
||||
.help("Timeout value for RPC requests"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("confirm_transaction_initial_timeout")
|
||||
.long("confirm-timeout")
|
||||
.value_name("SECONDS")
|
||||
.takes_value(true)
|
||||
.default_value(DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS)
|
||||
.global(true)
|
||||
.hidden(true)
|
||||
.help("Timeout value for initial transaction status"),
|
||||
)
|
||||
.cluster_query_subcommands()
|
||||
.feature_subcommands()
|
||||
.inflation_subcommands()
|
||||
.nonce_subcommands()
|
||||
.program_subcommands()
|
||||
.stake_subcommands()
|
||||
.validator_info_subcommands()
|
||||
.vote_subcommands()
|
||||
.wallet_subcommands()
|
||||
.subcommand(
|
||||
SubCommand::with_name("config")
|
||||
.about("Solana command-line tool configuration settings")
|
||||
.aliases(&["get", "set"])
|
||||
.setting(AppSettings::SubcommandRequiredElseHelp)
|
||||
.subcommand(
|
||||
SubCommand::with_name("get")
|
||||
.about("Get current config settings")
|
||||
.arg(
|
||||
Arg::with_name("specific_setting")
|
||||
.index(1)
|
||||
.value_name("CONFIG_FIELD")
|
||||
.takes_value(true)
|
||||
.possible_values(&[
|
||||
"json_rpc_url",
|
||||
"websocket_url",
|
||||
"keypair",
|
||||
"commitment",
|
||||
])
|
||||
.help("Return a specific config setting"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("set")
|
||||
.about("Set a config setting")
|
||||
.group(
|
||||
ArgGroup::with_name("config_settings")
|
||||
.args(&["json_rpc_url", "websocket_url", "keypair", "commitment"])
|
||||
.multiple(true)
|
||||
.required(true),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("import-address-labels")
|
||||
.about("Import a list of address labels")
|
||||
.arg(
|
||||
Arg::with_name("filename")
|
||||
.index(1)
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.help("YAML file of address labels"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("export-address-labels")
|
||||
.about("Export the current address labels")
|
||||
.arg(
|
||||
Arg::with_name("filename")
|
||||
.index(1)
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.help("YAML file to receive the current address labels"),
|
||||
),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("completion")
|
||||
.about("Generate completion scripts for various shells")
|
||||
.arg(
|
||||
Arg::with_name("shell")
|
||||
.long("shell")
|
||||
.short("s")
|
||||
.takes_value(true)
|
||||
.possible_values(&["bash", "fish", "zsh", "powershell", "elvish"])
|
||||
.default_value("bash")
|
||||
)
|
||||
)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user